diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..829031f423eb2783837c74d0c1641108fa165ef5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct IValue; +using Stack = std::vector; + +class OperatorHandle; +class KernelFunction; + +// This kernel implements the behavior of falling through to the next available +// registered dispatch key. The implementation of this function is FAST; it is +// no overhead to fallthrough to the next key. See cpp file for some more +// implementation notes; notably, this does NOT actually go through the +// boxing/unboxing codepath. +TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [Ambiguity in AutogradOther kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This error-reporting kernel is registered to the AutogradOther entry in the +// dispatch table when there is both a CompositeImplicitAutograd kernel and a +// backend kernel for ANY backend that maps to AutogradOther. To see why +// this is necessary in the AutogradOther case, it's helpful to first see +// why everything works out fine for a backend that has a reserved Autograd +// entry (see rule 2.2 in [Note] DispatchTable computation): +// +// CPU AutogradCPU +// reg? registers with... +// ------------------------------------------------- +// y Autograd registration takes precedence +// over CompositeImplicitAutograd. +// This is good, because the CPU specific backend +// implementation is more specialized and typically better; +// if we used the composite, we would bypass it. +// (NB: the Autograd key is guaranteed to exist because +// the autograd codegen requires it!) +// +// n CompositeImplicitAutograd takes precedence. +// This is also good, because the Autograd +// registration (if it exists) would try to redispatch +// to the (non-existent) CPU implementation; by +// using the composite, we ensure the operator +// actually works. +// +// As you can see, when we have a specific Autograd key (AutogradCPU), we can +// decide whether or not to use the CompositeImplicitAutograd kernel or the +// Autograd kernel based on whether or not the backend kernel exists. +// +// However, for AutogradOther (which is the catchall autograd kernel for +// everything that doesn't have a specific Autograd key), we can't do this +// trick because there isn't any unique backend to peek at to disambiguate; +// if there are some backends that have implementations they prefer Autograd, +// but unimplemented backends would prefer CompositeImplicitAutograd. Rather +// than arbitrarily pick one or the other, we just register a kernel that raises +// an error and let the user decide how to proceed. +TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [named_not_supported_kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This kernel implements reporting an error message saying that named tensor is +// not supported. This kernel doesn't rely on the Stack, and so it is special +// cased in the dispatcher to be triggered before we attempt boxing (so we can +// give a good error message in cases when boxing is not supported). When +// boxing is universally supported this can be removed. +[[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +/** + * BoxedKernel is similar to a std::function storing a boxed kernel. + */ +class TORCH_API BoxedKernel final { +public: + // This is how boxed kernels are actually stored + // + // Note [Plumbing Keys Through The Dispatcher] + // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS) + // upon every dispatch call into order to compute which kernel to dispatch to. + // + // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores + // to have a first argument of type DispatchKeySet. + // + // What are the invariants of the DispatchKeySet when it gets passed to a kernel? + // - All keys to the left of the current dispatch key have been masked out. + // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer) + // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments + // are still in the set. + // + // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches: + // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will + // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher + // upon redispatching. + // + // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature + // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples. + // + // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + // This is the public API for how boxed kernels are defined + using BoxedKernelFunction = void(const OperatorHandle&, Stack*); + using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*); + + BoxedKernel(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function with boxed arguments. + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>(); + */ + template + static BoxedKernel makeFromFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static BoxedKernel makeFromFunction(); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique()); + */ + template + static BoxedKernel makeFromFunctor(std::unique_ptr kernelFunctor); + + + static BoxedKernel makeFallthrough(); + static BoxedKernel makeAmbiguousAutogradOther(); + static BoxedKernel makeNamedNotSupported(); + +private: + + friend class KernelFunction; + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + explicit BoxedKernel(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func); + + OperatorKernel* getFunctor() const; + InternalBoxedKernelFunction* getFnPtr() const; + + c10::intrusive_ptr functor_; + InternalBoxedKernelFunction* boxed_kernel_func_; +}; + +} // namespace c10 + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..421b85cca3ec5a38e2cb9e27b1ea7f28bd3eb71b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel_impl.h @@ -0,0 +1,99 @@ +#pragma once + +namespace c10 { + +inline BoxedKernel::BoxedKernel() + : functor_() +, boxed_kernel_func_(nullptr) +{} + +inline BoxedKernel::BoxedKernel(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func) +: functor_(std::move(functor)) +, boxed_kernel_func_(boxed_kernel_func) +{} + +template +inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) { + // Note that we're dropping the DispatchKeySet argument. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + func(opHandle, stack); +} + +template +inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) { + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + func(opHandle, ks, stack); +} + +inline bool BoxedKernel::isValid() const { + return boxed_kernel_func_ != nullptr; +} + +inline bool BoxedKernel::isFallthrough() const { + return boxed_kernel_func_ == &fallthrough_kernel; +} + +inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + boxed_kernel_func_ != nullptr, + "Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel." + ); + (*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunction() { + return BoxedKernel( + nullptr, // no functor_ object + &make_boxed_function + ); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunction() { + return BoxedKernel( + nullptr, // no functor_ object + &make_boxed_function + ); +} + +inline BoxedKernel BoxedKernel::makeFallthrough() { + return BoxedKernel( + nullptr, // no functor_ object + &fallthrough_kernel + ); +} + +inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() { + return BoxedKernel( + nullptr, // no functor_ object + &ambiguous_autogradother_kernel + ); +} + +inline BoxedKernel BoxedKernel::makeNamedNotSupported() { + return BoxedKernel( + nullptr, // no functor_ object + &named_not_supported_kernel + ); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr kernelFunctor) { + static_assert(std::is_base_of::value, "Tried to call BoxedKernel::makeFromFunctor, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + return BoxedKernel( + std::move(kernelFunctor), + [](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) { + (*static_cast(kernel))(op, ks, stack); + } + ); +} + +inline OperatorKernel* BoxedKernel::getFunctor() const { + return functor_.get(); +} +inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const { + return boxed_kernel_func_; +} + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..c950f4c80ffc717d59e4faeabc0552aef25b799d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h @@ -0,0 +1,260 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. + +class OperatorHandle; +struct OperatorKernel; +class KernelFunction; + +template +using has_symint = + std::disjunction< + std::is_same, + std::is_same, + std::is_same, + std::is_same, T> + >; + +template +struct remove_symint { + using type = T; +}; + +template <> +struct remove_symint { + using type = int64_t; +}; + +template <> +struct remove_symint { + using type = OptionalIntArrayRef; +}; + +template <> +struct remove_symint { + using type = c10::IntArrayRef; +}; + +template <> +struct remove_symint> { + using type = c10::optional; +}; + + +template +struct maybe_keep_symint final {}; + +template +struct maybe_keep_symint { using type = T; }; + +template +struct maybe_keep_symint { using type = typename remove_symint::type; }; + +template +using fn_has_symint = typename guts::typelist::true_for_any_type< + has_symint, + typename guts::infer_function_traits::type::parameter_types +>; + +template +struct fn_remove_symint; + +template +struct fn_remove_symint { + using type = Ret(typename remove_symint::type...); +}; + +/** + * KernelFunction is similar to std::function but stores a kernel function. + * You can create a KernelFunction from a boxed or unboxed function/functor/lambda + * and call it in a boxed or unboxed way. If the way it was created doesn't + * match the way it was called, it will do boxing or unboxing as necessary. + */ +class TORCH_API KernelFunction final { +public: + using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction; + using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction; + using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys; + + KernelFunction(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValidUnboxed() const; + bool isValidSymUnboxed() const; + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function in a boxed way. + * If the kernel function was created with an unboxed function, + * this will call an unboxing wrapper which then calls into that + * unboxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.callBoxed(stack); + * + * Or, with an unboxed implementation: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.callBoxed(stack); + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Call the function in an unboxed way. + * If the kernel function was created with a boxed function, + * this will box all inputs and then call into that boxed function. + * + * Note that this doesn't work for all types yet. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.call(tensor1, true); + * + * Or, with a boxed implementation: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.call(tensor1, true); + */ + template + Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const; + + /** + * Create a KernelFunction from a BoxedKernel. + */ + static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn); + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>(); + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * Create a KernelFunction from an unboxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromUnboxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromBoxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from an unboxed function. + * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction + * because knowing the function pointer as a template argument (i.e. at + * compile time) allows the compiler to inline the function into its + * unboxing wrapper and yields better performance when calling the function. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedFunction(); + */ + template + static KernelFunction makeFromUnboxedFunction(FuncPtr); + + /** + * Create a KernelFunction from an unboxed function. + * KernelFunction::makeFromUnboxedFunction is usually a better choice than + * this if you know the function pointer at compile time, see doc comment + * there for an explanation. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func); + */ + template + static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func); + + static KernelFunction makeFallthrough(); + static KernelFunction makeAmbiguousAutogradOther(); + static KernelFunction makeNamedNotSupported(); + + /** + * Create a KernelFunction from an unboxed lambda. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + */ + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + + std::string dumpState() const; + // For testing internal invariants only + bool _equalsBoxedAndUnboxed(const KernelFunction&) const; + +private: + + explicit KernelFunction( + std::unique_ptr functor, + InternalBoxedKernelFunction* boxed_kernel_func, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + explicit KernelFunction( + BoxedKernel boxed_fn, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + + BoxedKernel boxed_kernel_func_; + void* unboxed_kernel_func_; + void* sym_unboxed_kernel_func_; +}; + +} + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..0d6149c8090a9a4ba914485fba0fdccca5fc21de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h @@ -0,0 +1,229 @@ +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +inline KernelFunction::KernelFunction() + : boxed_kernel_func_() + , unboxed_kernel_func_(nullptr) + , sym_unboxed_kernel_func_(nullptr) +{} + +inline KernelFunction::KernelFunction(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(functor), boxed_kernel_func) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(boxed_fn)) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline bool KernelFunction::isValidUnboxed() const { + return unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValidSymUnboxed() const { + return sym_unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValid() const { + return boxed_kernel_func_.isValid(); +} + +inline bool KernelFunction::isFallthrough() const { + return boxed_kernel_func_.isFallthrough(); +} + +inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const { + boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack); +} + +template +inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) { + using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...); + ActualSignature* func = reinterpret_cast(unboxed_kernel_func); + return (*func)(functor, dispatchKeySet, std::forward(args)...); +} + +// This template requires you to explicitly specify the argument you want to +// forward; it doesn't work if you try to deduce it +// NB: keep this in sync with cloneWithRealTypes in function_schema.cpp + +template +inline typename remove_symint::type unpackSymInt(T x) { return x; } + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymInt x) { + return x.guard_int(__FILE__, __LINE__); +} + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymIntArrayRef x) { + return C10_AS_INTARRAYREF_SLOW(x); +} + +template <> +inline typename remove_symint>::type unpackSymInt(c10::optional x) { + return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt; +} + +template <> +inline typename remove_symint::type unpackSymInt(at::OptionalSymIntArrayRef x) { + return x.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : c10::nullopt; +} + +template +C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const { + // note: Args above is intentionally not Args&&. We don't want perfect + // forwarding, which would require Args to be deduced, but instead we + // want callers to explicitly specify the Args. + + if constexpr (std::disjunction_v...>) { + if (sym_unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + + if (unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction::type...>( + unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt(args)...); + } + } else { + if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + } + + return impl::BoxedKernelWrapper::call( + boxed_kernel_func_, + opHandle, + dispatchKeySet, + std::forward(args)... + ); +} + +inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) { + return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +inline KernelFunction KernelFunction::makeFallthrough() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFallthrough()); +} + +inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeAmbiguousAutogradOther()); +} + +inline KernelFunction KernelFunction::makeNamedNotSupported() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeNamedNotSupported()); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr kernelFunctor) { +#ifndef NDEBUG + // This assertion is costly for build time so it's debug-gated. + static_assert(guts::is_functor::value, "Tried to call KernelFunction::makeFromUnboxedFunctor but the argument is not a functor."); +#endif + static_assert(std::is_base_of::value, "Tried to call KernelFunction::makeFromUnboxedFunctor, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed::call; + void* void_unboxed_fn = reinterpret_cast(unboxed_fn); + bool is_symint = fn_has_symint::value; + return KernelFunction( + std::move(kernelFunctor), + &impl::make_boxed_from_unboxed_functor::call, + is_symint ? nullptr : void_unboxed_fn, + is_symint ? void_unboxed_fn : nullptr + ); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr kernelFunctor) { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) { + static_assert(is_compile_time_function_pointer::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr"); + +#if !defined(C10_MOBILE) + (void)func_ptr; // Suppress unused variable warning + return makeFromUnboxedFunctor::type>( + guts::make_unique_base::type>() + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr()); +#endif +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) { + static_assert(guts::is_function_type::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr"); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(func) + ); +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + +#if !defined(C10_MOBILE) + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + using FuncType = typename guts::infer_function_traits_t>::func_type; + return makeFromUnboxedRuntimeFunction(lambda); +#endif +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..82c68935540e2a066733083f19f7765ad14045d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/OperatorKernel.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace c10 { + +/** + * Inherit from OperatorKernel to implement a c10 kernel. + * + * Example: + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * + * The kernel class is allowed to have members but these are equivalent + * to global variables. The kernel implementation is responsible for + * preventing race conditions on them. + * + * See below for how to register this kernel with PyTorch. + */ +struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target { + ~OperatorKernel() override = default; +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d7687cde3f74348ad1f73deacc2af1d03c8da9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + namespace detail { + template class WrapFunctionIntoFunctor_ {}; + template + class WrapFunctionIntoFunctor_> final : public c10::OperatorKernel { + public: + C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) { + return (*FuncPtr::func_ptr())(std::forward(args)...); + } + }; + } + + // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor. + // Since it is a compile time function pointer, many compilers can inline it + // into the wrapper and you don't get any performance overhead for wrapping. + template + struct WrapFunctionIntoFunctor final { + static_assert(c10::is_compile_time_function_pointer::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN."); + using type = detail::WrapFunctionIntoFunctor_< + FuncPtr, + typename guts::function_traits::return_type, + typename guts::function_traits::parameter_types + >; + }; +} + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..5308499edd439f3b82eab18a6963ab147271cf28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -0,0 +1,600 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +class OperatorHandle; + +/* + * [Note: Argument forwarding in the dispatcher] + * + * The dispatcher uses a somewhat unusual way to forward arguments through several layers of + * wrapper functions. This can be confusing because an experienced C++ programmer would look at this + * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.". + * It is not a bug. The common way in C++ to forward arguments is to use universal references: + * + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument. + * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a + * different reference type than was used in the kernel function. The correct reference type + * is dictated by the kernel signature and must be identical since we cast function pointers + * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines + * the reference type to use by looking at the explicitly supplied operator signature, not by looking at + * the argument we're calling it with. + * + * What does std::forward do, exactly? + * ------------------------------------ + * std::forward(t) is a way to cast t to the reference type supplied in T. + * Let's assume decay_t == U and T is either U or some reference of U. + * - std::forward(t) will return U&, no matter what kind of reference t is. + * - std::forward(t) will return U&&, no matter what kind of reference t is. + * - std::forward(t) will return U&& (not U!), no matter what kind of reference t is. + * + * For universal references, that means that in the following function + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be + * a non-reference U, and std::forward(t) will return U&&, correctly moving the argument. + * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only + * way to match the signature (in C++, a type that is (T&)&& will collapse to T&). + * That means std::forward(t) will return U& and the value will not be moved but passed on as + * a lvalue reference. + * + * How do we use that? + * ------------------------------------ + * But std::forward can also be used outside of the common "universal forwarding" pattern to change + * reference types. So instead of following the common C++ pattern, we notice what + * std::forward() actually does, and that is it takes a value and changes its reference to the + * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this + * to forward based on an explicitly specified reference type instead of the inferred argument type. + * + * This is why many of the dispatcher functions look like + * > template func(T t) { func2(std::forward(t)); } + * instead of the common + * > template func(T&& t) { func2(std::forward(t)); } + * + * and are expected to be called by explicitly specifying the template parameters in a way that matches + * the expected operator signature at each call site. + */ + +namespace impl { + // supported_primitive_arg_types defines which primitive types we allow in + // kernel functions as arguments or returns. + // Additionally, we support lists, dicts and optionals containing these types. + using supported_primitive_arg_types = guts::typelist::typelist< + int64_t, + double, + bool, + c10::string_view, + at::Tensor, + at::Scalar, + c10::QScheme, + c10::ScalarType, + c10::Device, + c10::DeviceIndex, + c10::Layout, + c10::MemoryFormat, + at::Dimname + >; + + // We have an unboxed functor in hand that takes C++ arguments, and + // we're building a boxed functor wrapper for it that takes IValues. + // So "outside" is boxed and "inside" is unboxed. + // + // So a valid input type is one that our boxed functor wrapper can + // unbox from an IValue into a C++ value. + // + // Whereas a valid output type is one that our wrapper can recieve + // as a C++ value from the unboxed functor, and box into an IValue. + + // + // assert_is_valid_input_type + // checks that T can be unboxed from an IValue into a C++ value. + // + + template + struct assert_is_valid_input_type { + assert_is_valid_input_type() { + if constexpr (guts::typelist::contains::value) { + /* everything is ok, this is a primitive type */ + } else { + /* otherwise this must be an instance of a valid custom class, since it can only + have been created via IValue(x), which ensures this. */ + } + } + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type {}; + + template + struct TypeCheckHelper; + + template + struct TypeCheckHelper {}; + + template + struct TypeCheckHelper + : TypeCheckHelper { + assert_is_valid_input_type check; + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : TypeCheckHelper {}; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported input type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: ArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: OptionalArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: std::array. Please use std::array instead."); + }; + + template + struct assert_is_valid_input_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: float. Please use double instead."); + }; + template + struct assert_is_valid_input_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_input_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: vector. Please use List instead."); + }; + template + struct assert_is_valid_input_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead."); + }; + template + struct assert_is_valid_input_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead."); + }; + + // TODO: it probably would be good to tighten this up quite a bit more with + // an explicit list for everything + + // + // assert_is_valid_output_type + // + + template + struct assert_is_valid_output_type { + assert_is_valid_output_type() { + if constexpr(guts::typelist::contains::value) { + /* everything is ok, this is a primitive type */ + } else { + /* otherwise T is verified to be a registered custom class in the IValue + constructor, so no benefit in double-checking here */ + } + } + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: Dict. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::vector. Please use List, List or Tensor instead."); + // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector. Please use List instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::array. Please use std::array instead."); + }; + + // The following specialisations of assert_is_valid_output_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. + template + struct assert_is_valid_output_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: float. Please use double instead."); + }; + template + struct assert_is_valid_output_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_output_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: vector. Please use List instead."); + }; + template + struct assert_is_valid_output_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead."); + }; + + // ivalue_to_arg + + template + struct decay_if_not_tensor final { + using type = std::decay_t; + }; + + template<> + struct decay_if_not_tensor final { + using type = at::Tensor&; + }; + + template<> + struct decay_if_not_tensor final { + using type = const at::Tensor&; + }; + + template + struct ivalue_to_arg final { + static decltype(auto) call(IValue& v) { + assert_is_valid_input_type(); + return std::move(v).to(); + } + }; + + // The following two specializations take advantage of specialized + // `toTensor()` overloads on IValue to avoid copying. + template + struct ivalue_to_arg final { + // We cannot use the default implementation if they asked for a + // `at::Tensor&` because it moves from the IValue, so it can't get + // an lvalue reference. + static at::Tensor& call(IValue& v) { + // Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + // We should not use the default implementation if they asked for + // a `const at::Tensor&` because it moves from the IValue and they + // didn't ask for that. + static const at::Tensor& call(IValue& v) { + // const Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + static List call(IValue& v) { + return v.toTensorList(); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is ArrayRef, convert the IValue to a std::vector and pass that + // to the operator. std::vector is implicitly convertible to ArrayRef. + static std::vector call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + template + struct ivalue_to_arg final { + static std::vector call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return r; + } else { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + } + }; + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + static OptionalArray call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return OptionalArray(std::move(r)); + } else { + return std::move(v).to>(); + } + } + }; + template + struct ivalue_to_arg>, AllowDeprecatedTypes> final { + // If an argument is optional>, convert the IValue to an optional> and pass that + // to the operator. OptionalArray is basically a optional> but implicitly convertible + // to optional>. + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is OptionalArrayRef, convert the IValue to an + // optional> and pass that to the operator. OptionalArray + // is basically a optional> but implicitly convertible to + // OptionalArrayRef + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + // return_to_ivalue + template + struct return_to_ivalue final {}; + + template + struct return_to_ivalue::value>> final { + static IValue call(T&& v) { + assert_is_valid_output_type(); + return c10::ivalue::from(std::move(v)); + } + static IValue copy(const T& v) { + assert_is_valid_output_type(); + return IValue(v); + } + }; + + // Special case to allow kernels to return `Tensor&`. + // TODO Delete this once kernels don't do that anymore + template + struct return_to_ivalue final { + static IValue call(at::Tensor& v) { + return c10::ivalue::from(v); + } + static IValue copy(at::Tensor& v) { + return IValue(v); + } + }; + + // wrap_kernel_functor_unboxed_ + + template + struct wrap_kernel_functor_unboxed_ final {}; + + // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet + // This includes kernels with 0 arguments. + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // Note [Plumbing Keys Through The Dispatcher 2] + // See Note [Plumbing Keys Through The Dispatcher] for the background. + // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel. + // + // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type + // DispatchKeySet. + // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention + // of the dispatcher from the calling convention of manually written kernels. + return (*functor_)(std::forward(args)...); + } + }; + + // This specialization is for kernels with a first argument of type DispatchKeySet + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + return (*functor_)(dispatchKeySet, std::forward(args)...); + } + }; + + template + using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_::func_type>; + + // call_functor_with_args_from_stack + + template + std::decay_t::return_type> + call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence, guts::typelist::typelist*) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning. + + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + return wrap_kernel_functor_unboxed::call(functor, dispatchKeySet, + ivalue_to_arg::type, AllowDeprecatedTypes>::call( + torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices)) + )...); + } + + template + std::decay_t::return_type> + call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) { + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr size_t num_ivalue_args = guts::typelist::size::value; + return call_functor_with_args_from_stack_(functor, dispatchKeySet, stack, std::make_index_sequence(), static_cast(nullptr)); + } + + // push_outputs + + template + struct push_outputs final { + // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here + // to avoid one extra call to the move constructor in this case. This is still not a + // universal reference though because OutputType is an explicitly specified class + // template parameter. + static void call(OutputType&& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(output))); + } + static void copy(const OutputType& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::copy(output)); + } + }; + template + struct push_outputs, AllowDeprecatedTypes> final { + static void call(std::tuple&& output, Stack* stack) { + call_(std::move(output), stack, std::make_index_sequence()); + } + static void copy(const std::tuple& output, Stack* stack) { + copy_(output, stack, std::make_index_sequence()); + } + + private: + template + static void call_(std::tuple&& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(std::get(output)))...); + } + template + static void copy_(const std::tuple& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::copy(std::get(output))...); + } + }; + template + struct push_outputs final { + static void call(int /*dummy*/, Stack* /*stack*/) { + } + static void copy(int /*dummy*/, Stack* /*stack*/) { + } + }; + + // make_boxed_from_unboxed_functor + + template + struct make_boxed_from_unboxed_functor final { + static_assert(std::is_base_of::value, + "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) { + using ReturnType = typename guts::infer_function_traits_t::return_type; + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr bool has_outputs = !std::is_same::value; + constexpr size_t num_inputs = guts::typelist::size::value; + if constexpr (has_outputs) { + // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value + // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`. + // [Note: VC++ and 'std': ambiguous symbol] + using ReturnType_ = ::std::decay_t; + ReturnType_ output = call_functor_with_args_from_stack(functor, dispatchKeySet, stack); + torch::jit::drop(*stack, num_inputs); + // See note [ VC++ and 'std': ambiguous symbol] + push_outputs::call(::std::move(output), stack); + } else { + call_functor_with_args_from_stack(functor, dispatchKeySet, stack); + torch::jit::drop(*stack, num_inputs); + } + } + }; +} // namespace impl + +} // namespace c10 + +namespace torch { + using OperatorKernel = c10::OperatorKernel; +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..93b11dc853f00f2ac06ebfd361b6ee02986cfd1f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +template +inline std::vector makeStack(Inputs&&... inputs) { + return {std::forward(inputs)...}; +} + +inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) { + auto* allocator = c10::GetCPUAllocator(); + int64_t nelements = 1; + auto dtype = caffe2::TypeMeta::Make(); + int64_t size_bytes = nelements * dtype.itemsize(); + auto storage_impl = c10::make_intrusive( + c10::StorageImpl::use_byte_size_t(), + size_bytes, + allocator->allocate(size_bytes), + allocator, + /*resizable=*/true); + at::Tensor t = at::detail::make_tensor(storage_impl, ks, dtype); + // TODO: We add this to simulate the ideal case where we only have Autograd backend keys + // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl + // constructor by default. + if (!requires_grad) { + t.unsafeGetTensorImpl()->remove_autograd_key(); + } + return t; +} + +inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) { + return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad); +} + +template +inline std::vector callOp(const c10::OperatorHandle& op, Args... args) { + auto stack = makeStack(std::forward(args)...); + op.callBoxed(&stack); + return stack; +} + +template +inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) { + return op.typed().call(std::forward(args)...); +} + +template +inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) { + return op.typed().callWithDispatchKey(dispatchKey, std::forward(args)...); +} + +template +inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) { + return op.typed().redispatch(ks, std::forward(args)...); +} + +inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_ANY_THROW( + callOp(*op, dummyTensor(dispatch_key), 5); + ); +} + +inline void expectDoesntFindOperator(const char* op_name) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_FALSE(op.has_value()); +} + +template +inline void expectThrows(Functor&& functor, const char* expectMessageContains) { + try { + std::forward(functor)(); + } catch (const Exception& e) { + EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains)); + return; + } + ADD_FAILURE() << "Expected to throw exception containing \"" + << expectMessageContains << "\" but didn't throw"; +} + +template +void expectListEquals(c10::ArrayRef expected, std::array actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::ArrayRef actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::List actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual.get(i)); + } +} + +template +void expectListEquals(c10::ArrayRef expected, std::vector actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +// NB: This is not really sound, but all of the type sets constructed here +// are singletons so it's fine +static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) { + return legacyExtractDispatchKey(t.key_set()); +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..57409442950f275406ce6ab6d02755277082b466 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h @@ -0,0 +1,160 @@ +#pragma once + +/** + * This file contains functionality to take a C++ function and infer its + * c10::FunctionSchema. + */ + +#include +#include + +namespace c10 { +namespace detail { + +namespace infer_schema { + +/// The templated inference code creates `ArgumentDef` instead of `Argument`, +/// because that can be constructed at compile time and has a much smaller +/// binary size than having calls to `Argument` constructors in the template. +/// Creating `Argument` objects from `ArgumentDef` can then be done at +/// runtime in a non-templated way. +struct ArgumentDef final { + using GetTypeFn = TypePtr(); + GetTypeFn* getTypeFn; + GetTypeFn* getFakeTypeFn; + constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {} + explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {} +}; + +template +struct bool_t {}; +template<> struct bool_t : std::true_type {}; +template<> struct bool_t : std::false_type {}; + +/// Checks the static C++ types `Types` for correctness to catch common error cases. +template +constexpr int checkStaticTypes() { + // Give nice error messages for some of the common error cases. + // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT + static_assert(std::conjunction< + bool_t::value || std::is_same::value || std::is_same::value || std::is_same::value>... + >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type"); + static_assert(std::conjunction< + bool_t::value>... + >::value, "INVALID TYPE: float is not supported as an argument type, use double instead"); + return 0; +} + +template +constexpr std::array createArgumentVectorFromTypes(std::index_sequence) { + return ( + // Check types for common errors + checkStaticTypes(), + + // Create the return value + std::array{ + ArgumentDef(&getTypePtrCopy>, &getFakeTypePtrCopy>)...} + ); +} + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as template arguments. +template struct createArguments final {}; +template +struct createArguments> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as a tuple (i.e. in the way c10 kernels return values). +/// It can be a tuple if there's three output arguments with types A, B, C. +/// It can be an empty tuple<>, or void for kernels that don't return anything. +/// It can be a single type A (i.e. no tuple) for the case where a kernel just +/// returns one value. +template struct createReturns final {}; + +template +struct createReturns, void> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +template +struct createReturns::value && !guts::is_instantiation_of::value>> final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template<> +struct createReturns final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template +struct createSingleReturn { + static constexpr std::array call() { + return createArgumentVectorFromTypes(std::make_index_sequence<1>()); + } +}; + +TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef arguments, c10::ArrayRef returns); +TORCH_API FunctionSchema make_function_schema(c10::ArrayRef arguments, c10::ArrayRef returns); + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Flattens std::tuple returns into multiple return types +template +FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createReturns::call(); + + return make_function_schema(arguments, returns); +} + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Preserves std::tuple returns as a Tuple return type +template +FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createSingleReturn::call(); + + return make_function_schema(std::move(name), std::move(overload_name), arguments, returns); +} + +} +} + +template +FunctionSchema inferFunctionSchemaFlattenedReturns() { + return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns>(); +} + +template +FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) { + return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn>(std::move(name), std::move(overload_name)); +} + +TORCH_API c10::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h new file mode 100644 index 0000000000000000000000000000000000000000..0b083dc6b6759654d264cfc822302d5c3c648143 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h @@ -0,0 +1,596 @@ +#pragma once + +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ + +#include +#include +#include +#include +#include +#include +#include +#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +#include +#endif +#include + +namespace c10 { + +namespace detail { +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] +template +std::unique_ptr inferFunctionSchemaFromFunctor() { + using func_type = typename c10::remove_DispatchKeySet_arg_from_func::func_type; + return std::make_unique(inferFunctionSchemaFlattenedReturns()); +} +} + +/** + * An instance of this class handles the registration for one or more operators. + * Make sure you keep the RegisterOperators instance around since it will + * deregister the operator it's responsible for in its destructor. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ +class TORCH_API RegisterOperators final { +public: + RegisterOperators() = default; + ~RegisterOperators() = default; + + RegisterOperators(const RegisterOperators&) = delete; + RegisterOperators& operator=(const RegisterOperators&) = delete; + RegisterOperators(RegisterOperators&&) noexcept = default; + RegisterOperators& operator=(RegisterOperators&&) noexcept = default; + + class TORCH_API Options final { + public: + Options(const Options&) = delete; + Options(Options&&) noexcept = delete; + Options& operator=(const Options&) = delete; + Options& operator=(Options&&) noexcept = delete; + + // internal-only for registering stack based kernels + template + Options&& kernel(DispatchKey dispatch_key) && { + return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal-only for registering stack based catch-all kernels + template + Options&& catchAllKernel() && { + return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal only for registering caffe2 ops + Options&& schema(FunctionSchema&& schema) { + TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration."); + schemaOrName_ = FunctionSchema(std::move(schema)); + return std::move(*this); + } + + /** + * Use this to specify the schema for an operator. You can also specify + * the operator name only to have the function signature part of the + * schema be inferred from the kernel function. + * + * Example: + * + * > // Infer function signature from my_kernel_cpu + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * > + * > + * > // Explicitly specify full schema + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op(Tensor a) -> Tensor") + * > .kernel(DispatchKey::CPU)); + */ + Options&& schema(const std::string& schemaOrName) { + TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration."); + + #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD) + throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build."); + #else + schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName); + #endif + + return std::move(*this); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, "some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel("some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel() && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel(FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. A functor kernel with cache gets a new instance of + // its cache each time the kernel is looked up from the dispatch table. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // So, instead of making users having to think about it (including the thread-safety + // issues this causes), let's just forbid stateful lambdas altogether. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedLambda(std::forward(functor)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel([] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> catchAllKernel(Lambda&& lambda) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // This would be a likely source for unexpected race conditions, so we forbid it. + // If a kernel really needs global state, they can just have regular global state + // in their .cpp file next to the kernel lambda. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && { + TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration."); + aliasAnalysisKind_ = aliasAnalysisKind; + return std::move(*this); + } + + private: + Options&& kernel(c10::optional dispatch_key, KernelFunction&& func, c10::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { + KernelRegistrationConfig config; + config.dispatch_key = dispatch_key; + config.func = std::move(func); + config.cpp_signature = cpp_signature; + config.inferred_function_schema = std::move(inferred_function_schema); + kernels.push_back(std::move(config)); + return std::move(*this); + } + + Options() + : schemaOrName_(c10::nullopt) + , kernels() + , aliasAnalysisKind_(c10::nullopt) + {} + + // KernelRegistrationConfig accumulates all information from the config + // parameters passed to a RegisterOperators::op() call into one object. + struct KernelRegistrationConfig final { + KernelRegistrationConfig() + : dispatch_key(c10::nullopt) + , func() + , cpp_signature(c10::nullopt) + , inferred_function_schema(nullptr) + {} + + c10::optional dispatch_key; + KernelFunction func; + c10::optional cpp_signature; + std::unique_ptr inferred_function_schema; + }; + + c10::optional> schemaOrName_; + + std::vector kernels; + optional aliasAnalysisKind_; + friend class RegisterOperators; + friend class Library; + }; + + /** + * Call this to get an instance of registration options, which + * can be passed to a call to RegisterOperators::op() to specify + * these options for the operator registration. + * See class doc comment for examples. + */ + static Options options() { + return {}; + } + + /** + * Call this to register an operator. See class doc comment for examples. + */ + RegisterOperators&& op(Options&& options) && { + checkSchemaAndRegisterOp_(std::move(options)); + return std::move(*this); + } + + // Regular mutator version of the && version above + RegisterOperators& op(Options&& options) & { + checkSchemaAndRegisterOp_(std::move(options)); + return *this; + } + + /** + * This is a shorthand for RegisterOperators::op(Options) where you can + * specify the operator schema outside of the options parameter. + * See class doc comment for examples. + */ + RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && { + return std::move(*this).op(std::move(options).schema(schemaOrName)); + } + + // internal only for registering caffe2 ops + RegisterOperators&& op(FunctionSchema schema, Options&& options) && { + return std::move(*this).op(std::move(options).schema(std::move(schema))); + } + + template + explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options()) + : RegisterOperators() { + std::move(*this).op(schemaOrName, std::forward(func), std::move(options)); + } + + /** + * This API registers an operator based on a kernel function pointer. + * + * Given a kernel + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", &my_kernel_cpu); + * + * If your kernel is small and the overhead of calling it matters, + * then this API might be the wrong choice since the following API + * has a slightly lower overhead for calling into the kernel: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + * + * Or, alternatively, write your kernel as a functor: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction. + std::enable_if_t::value && !std::is_same::value, RegisterOperators&&> + op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && { + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + /** + * This API registers an operator based on a kernel lambda. + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", [] (Tensor a, Tensor b) {...}); + * + * This is equivalent to: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); + * + */ + template + // enable_if: only enable it if Lambda is actually a stateless lambda + std::enable_if_t::value && guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + template + C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.") + // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda + std::enable_if_t::value && !guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + +private: + void checkSchemaAndRegisterOp_(Options&& config); + + static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options); + void checkNoDuplicateKernels_(const Options& options); + void registerOp_(Options&& options); + + std::vector registrars_; +}; + +} // namespace c10 + +namespace torch { + // Old-style API + using RegisterOperators = c10::RegisterOperators; +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b0b1412298d7b65bd0354d234bbddd09f19031a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh @@ -0,0 +1,47 @@ +#include + +#include + +namespace at::cuda { + +/** + Computes ceil(a / b) +*/ +template +__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) { + return (a + b - 1) / b; +} + +namespace { + +// Threads per block for our apply kernel +// FIXME: use occupancy calculator instead +constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512; +constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4; + +template +inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + if (curDevice == -1) return false; + uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step); + uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread); + uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0]; + if (numBlocks > maxGridX) + numBlocks = maxGridX; + grid = dim3(numBlocks); + return true; +} + +constexpr int getApplyBlocksPerSM() { + return AT_APPLY_BLOCKS_PER_SM; +} + +constexpr int getApplyBlockSize() { + return AT_APPLY_THREADS_PER_BLOCK; +} + +inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + return dim3(max_threads_per_block); +} + +} // anonymous namespace +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh new file mode 100644 index 0000000000000000000000000000000000000000..56ee8f87e253049421c1268a21258f52a729c3b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh @@ -0,0 +1,508 @@ +#pragma once + +#include +#include +#include + +#include + +#if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))) +#include +#endif + +template +struct AtomicFPOp; + +template <> +struct AtomicFPOp { + template + inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::Half hsum; + do { + assumed = old; + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + hsum = func(hsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return hsum; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::BFloat16 bsum; + do { + assumed = old; + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + bsum = func(bsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return bsum.x; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ double operator() (double * address, double val, const func_t& func) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull; + unsigned long long int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, func(val, assumed)); + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __longlong_as_double(old); + } +}; + +#define ATOMIC_INTEGER_IMPL(NAME) \ +template \ +struct Atomic##NAME##IntegerImpl; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 3; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + uint32_t old = *address_as_ui; \ + uint32_t shift = offset * 8; \ + uint32_t old_byte; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_byte = (old >> shift) & 0xff; \ + newval = static_cast(func(val, static_cast(old_byte))); \ + newval = (old & ~(0x000000ff << shift)) | (newval << shift); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 2; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + bool is_32_align = offset; \ + uint32_t old = *address_as_ui; \ + uint32_t old_bytes; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_bytes = is_32_align ? old >> 16 : old & 0xffff; \ + newval = static_cast(func(val, static_cast(old_bytes))); \ + newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + uint32_t * address_as_ui = (uint32_t *) (address); \ + uint32_t old = *address_as_ui; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + unsigned long long * address_as_ui = (unsigned long long *) (address); \ + unsigned long long old = *address_as_ui; \ + unsigned long long newval; \ + unsigned long long assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; + + +# define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \ +static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \ +Atomic##NAME##IntegerImpl()(address, \ + val, \ + [](DTYPE a, DTYPE b) { \ + return OP; \ + }); \ +} \ + +ATOMIC_INTEGER_IMPL(Add) +GPU_ATOMIC_INTEGER(Add, a || b, bool) + +// Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64) +static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](uint8_t a, uint8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int8_t a, int8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int16_t a, int16_t b) { + return a + b; + }); +} + +static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) { + return atomicAdd(address, val); +} + +static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) { +#if defined(USE_ROCM) + __atomic_fetch_add(address, val, __ATOMIC_RELAXED); +#else + static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed"); + atomicAdd(reinterpret_cast(address), static_cast(val)); +#endif +} + +static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) + return AtomicFPOp()(address, val, + [](at::Half hsum, at::Half val) { + return hsum + val; + }); +#else + return atomicAdd(reinterpret_cast<__half*>(address), val); +#endif +} + +static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))) +return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum + val; + }); +#else + __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val)); + return *reinterpret_cast(&r); +#endif +} + +#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600) +// from CUDA C Programmic Guide +static inline __device__ double atomicAdd(double* address, double val) +#if defined(__clang__) && defined(__CUDA__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wgcc-compat" + __attribute__((enable_if(true, ""))) +#pragma GCC diagnostic pop +#endif +{ + + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val + __longlong_as_double(assumed)); + }); +} +#elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__)) + +/* Note [hip-clang differences to hcc] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The upcoming hip-clang compiler for ROCm differs from hcc in a few details. + * It exports the __HIP__ macro, we can hence differentiate between hcc and + * hip-clang. In the below, hcc only received support for atomicAdd with double + * typing after work week 18312. hip-clang had support from the first version. + * In general, the code-visible differences between hip-clang and hcc will be + * minimal. + */ + +#if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__ + // This needs to be defined for the host side pass + static inline __device__ double atomicAdd(double *address, double val) { } +#endif +#endif + +static inline __device__ double gpuAtomicAdd(double *address, double val) { + return atomicAdd(address, val); +} + +static inline __device__ float gpuAtomicAdd(float *address, float val) { + return atomicAdd(address, val); +} + +template +static inline __device__ void gpuAtomicAdd(c10::complex *address, c10::complex val) { + gpuAtomicAdd(&address->real_, val.real_); + gpuAtomicAdd(&address->imag_, val.imag_); +} + +/* Note [gpuAtomicAdd vs atomicAdd] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Some extensions such as torchvision call atomicAdd() + * directly and require non-library provided data type support. Only for these, we + * continue to provide atomicAdd overloads. + */ +static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int8_t *address, int8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int16_t *address, int16_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int64_t *address, int64_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(bool *address, bool val) { + gpuAtomicAdd(address, val); +} + +/* Note [explicitly non-returning atomics] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet(). + * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction. + * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd, + * therefore we need a new API 'gpuAtomicAddNoReturn'. + */ +template +static inline __device__ void gpuAtomicAddNoReturn(c10::complex *address, c10::complex val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); } + +/* Special case fp32 atomic. */ +#if defined(USE_ROCM) +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); } +#else +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); } +#endif + +// Atomic multiplication implementation. + +ATOMIC_INTEGER_IMPL(Mul) +GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int16_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int32_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int64_t) + +inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return bsum * val; + }); +} + +inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum * val; + }); +} + +inline __device__ double gpuAtomicMul(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val * __longlong_as_double(assumed)); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMul (float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val * + __int_as_float(assumed))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic maximum implementation. + +template +__host__ __device__ T safe_max(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b)); + #else + T max = at::_isnan(b) ? b : std::max(a, b); + #endif + + return max; +} + +ATOMIC_INTEGER_IMPL(Max) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_max(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_max(bsum, val); + }); +} + +inline __device__ double gpuAtomicMax(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_max(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMax(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_max(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic minimum implementation. + +template +__host__ __device__ T safe_min(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b)); + #else + T min = at::_isnan(b) ? b : std::min(a, b); + #endif + + return min; +} + +ATOMIC_INTEGER_IMPL(Min) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_min(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_min(bsum, val); + }); +} + +inline __device__ double gpuAtomicMin(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_min(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMin(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_min(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h new file mode 100644 index 0000000000000000000000000000000000000000..eb12bb350c5987187b85aaf68b9228b1919f6e49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h @@ -0,0 +1,375 @@ +#pragma once +/* + Provides a subset of CUDA BLAS functions as templates: + + gemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, + ldc) + + gemv(transa, m, n, alpha, a, lda, x, incx, beta, y, incy) + + dot(n, x, incx, y, incy, result) + + where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot). + The functions are available in at::cuda::blas namespace. + */ + +#include +#include + +namespace at::cuda::blas { + +// RAII guard that sets the CuBLAS pointer mode and restores it to +// its previous value when the guard is destroyed +class PointerModeGuard { +public: + PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) : + handle(handle) { + TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode)); + TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode)); + } + + ~PointerModeGuard() { + cublasSetPointerMode(handle, previous_mode); + } + +private: + cublasHandle_t handle; + cublasPointerMode_t previous_mode; +}; + +/* LEVEL 3 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type beta,\ + Dtype *c, int64_t ldc + +#define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc + +template +inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +template +inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm_internal: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +enum GEMMAndBiasActivationEpilogue { + None, + RELU, + GELU, +}; + +// NOTE: GELU activation is not supported prior to CUDA 11.4 and will +// do nothing if passed in that case. +template +void gemm_and_bias( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + at::opmath_type alpha_val, + const Dtype* mat1_ptr, + int64_t mat1_ld, + const Dtype* mat2_ptr, + int64_t mat2_ld, + const Dtype* bias, + Dtype* result_ptr, + int64_t result_ld, + GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None); + +void int8_gemm( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + const int8_t* mat1_ptr, + int64_t mat1_ld, + const int8_t* mat2_ptr, + int64_t mat2_ld, + int32_t* result_ptr, + int64_t result_ld); + +void scaled_gemm( + char transa, + char transb, + int64_t m, + int64_t n, + int64_t k, + const void* mat1_ptr, + const void* mat1_scale_ptr, + int64_t mat1_ld, + ScalarType mat1_dtype, + const void* mat2_ptr, + const void* mat2_scale_ptr, + int64_t mat2_ld, + ScalarType mat2_dtype, + const void* bias_ptr, + ScalarType bias_dtype, + void* result_ptr, + const void* result_scale_ptr, + int64_t result_ld, + ScalarType result_dtype, + void* amax_ptr, + bool use_fast_accum); +#endif + +#define CUDABLAS_BGEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, int64_t stridea, \ + const Dtype *b, int64_t ldb, int64_t strideb, \ + at::opmath_type beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches + +#define CUDABLAS_BGEMM_ARGS(Dtype) \ + transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches + +template +inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +template +inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm_internal: not implemented for ", typeid(Dtype).name()); +} + +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +#if defined(USE_ROCM) && ROCM_VERSION <= 50500 +// ROCm 5.6 hipblas matches the const Dtype *A API, but prior hipblas does not. +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + hipblasHandle_t handle, hipblasSideMode_t side, hipblasFillMode_t uplo, \ + hipblasOperation_t trans, hipblasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A, int lda, Dtype *B, int ldb +#else +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb +#endif + +template +inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); + +#define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \ + int batchCount + +template +inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::trsmBatched: not implemented for ", + typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); + +/* LEVEL 2 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMV_ARGTYPES(Dtype) \ + char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \ + const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy + +template +inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(double)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(float)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::Half)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::BFloat16)); + +/* LEVEL 1 BLAS FUNCTIONS */ + +#define CUDABLAS_DOT_ARGTYPES(Dtype) \ + cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \ + int incy, Dtype *result + +template +inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void dot(CUDABLAS_DOT_ARGTYPES(double)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(float)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::Half)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::BFloat16)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +template +inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRS_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, \ + int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \ + Dtype** dB_array, int ldb, int* info_array, int batchsize + +template +void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ", + typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); + +#define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \ + Dtype **tau_array, int *info, int batchsize + +template +void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::geqrfBatched: not implemented for ", + typeid(Dtype).name()); +} +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRF_ARGTYPES(Dtype) \ + int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize + +template +void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) { + TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); + +#define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize + +template +void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name()); +} + +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); + +} // namespace at::cuda::blas diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h new file mode 100644 index 0000000000000000000000000000000000000000..0cb024dd701b284502965cba681f1f9beb214592 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +// Preserved for BC, as many files depend on these includes +#include +#include +#include +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h new file mode 100644 index 0000000000000000000000000000000000000000..4ec35f59a210860054ecf84807d173b1c0766a58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h @@ -0,0 +1,95 @@ +#pragma once +// Light-weight version of CUDAContext.h with fewer transitive includes + +#include + +#include +#include +#include + +// cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also +// added bf16 support +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +#include +#endif + +#ifdef CUDART_VERSION +#include +#endif + +#if defined(USE_ROCM) && ROCM_VERSION >= 50300 +#include +#endif + +#include +#include + +namespace c10 { +struct Allocator; +} + +namespace at::cuda { + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +inline int64_t getNumGPUs() { + return c10::cuda::device_count(); +} + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +inline bool is_available() { + return c10::cuda::device_count() > 0; +} + +TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties(); + +TORCH_CUDA_CPP_API int warp_size(); + +TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device); + +TORCH_CUDA_CPP_API bool canDeviceAccessPeer( + c10::DeviceIndex device, + c10::DeviceIndex peer_device); + +TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator(); + +/* Handles */ +TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle(); +TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle(); +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) +TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle(); +#endif + +TORCH_CUDA_CPP_API void clearCublasWorkspaces(); + +#if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 +TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle(); +#endif + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h new file mode 100644 index 0000000000000000000000000000000000000000..7b3f5c391f2ac074b2a50961fafc1b42dc1a2320 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h @@ -0,0 +1,115 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +template +cudaDataType getCudaDataType() { + TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.") +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_16F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_32F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_64F; +} + +// HIP doesn't define integral types +#ifndef USE_ROCM +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8U; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32I; +} +#endif + +#if !defined(USE_ROCM) +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16BF; +} +#endif + +inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) { + switch (scalar_type) { +// HIP doesn't define integral types +#ifndef USE_ROCM + case c10::ScalarType::Byte: + return CUDA_R_8U; + case c10::ScalarType::Char: + return CUDA_R_8I; + case c10::ScalarType::Int: + return CUDA_R_32I; +#endif + case c10::ScalarType::Half: + return CUDA_R_16F; + case c10::ScalarType::Float: + return CUDA_R_32F; + case c10::ScalarType::Double: + return CUDA_R_64F; + case c10::ScalarType::ComplexHalf: + return CUDA_C_16F; + case c10::ScalarType::ComplexFloat: + return CUDA_C_32F; + case c10::ScalarType::ComplexDouble: + return CUDA_C_64F; +#if !defined(USE_ROCM) + case c10::ScalarType::Short: + return CUDA_R_16I; + case c10::ScalarType::Long: + return CUDA_R_64I; + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 + case c10::ScalarType::Float8_e4m3fn: + return CUDA_R_8F_E4M3; + case c10::ScalarType::Float8_e5m2: + return CUDA_R_8F_E5M2; +#endif +#else // USE_ROCM + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#if defined(HIP_NEW_TYPE_ENUMS) + case c10::ScalarType::Float8_e4m3fnuz: + return HIP_R_8F_E4M3_FNUZ; + case c10::ScalarType::Float8_e5m2fnuz: + return HIP_R_8F_E5M2_FNUZ; +#else + case c10::ScalarType::Float8_e4m3fnuz: + return static_cast(1000); + case c10::ScalarType::Float8_e5m2fnuz: + return static_cast(1001); +#endif +#endif + default: + TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.") + } +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9a5eb849a091be6e86658a8c7af87a0a3fbb8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +inline Device getDeviceFromPtr(void* ptr) { + cudaPointerAttributes attr{}; + + AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr)); + +#if !defined(USE_ROCM) + TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered, + "The specified pointer resides on host memory and is not registered with any CUDA device."); +#endif + + return {c10::DeviceType::CUDA, static_cast(attr.device)}; +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..2fe8a6f6c8f4fe9ac6a8bc8f78c145ae48e15c9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h @@ -0,0 +1,138 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { +/** + * Note [CUDA Graph-safe RNG states] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Strategy: + * ~~~~~~~~~ + * (It helps to look at + * cuda/detail/PhiloxCudaStateRaw.cuh and + * cuda/detail/UnpackRaw.cuh + * while you read this.) + * + * A CUDA graph containing multiple RNG ops behaves like a + * single giant kernel from the perspective of ops external + * to the graph. During graph capture, logic in CUDAGeneratorImpl + * records the total of all offset increments that occur in the + * graphed region, and records the final total as the offset for + * the entire graph. + * + * When the graph reruns, the logic that reruns it + * increments this device's CUDA generator's offset + * by that total. + * + * Meanwhile, within the graph, at capture time, instead of + * populating PhiloxCudaStates with the uint64_t offset pulled + * directly from the global state, PhiloxCudaState uses a pointer + * to a one-element stream-local int64_t device tensor + * holding an initial offset value, and a uint64_t holding an + * intra-graph offset. (The intra-graph offset starts from zero + * when capture begins.) In each consumer kernel, + * at::cuda::philox::unpack computes the offset to use for this kernel + * as intra-graph offset + *initial offset. + * + * When the graph reruns, the logic that reruns it first + * fill_s the initial offset tensor with this device's + * CUDA generator's current offset. + * + * The control flow above ensures graphed execution is bitwise + * identical to eager execution as long as RNG ops are enqueued + * from a single thread, even if RNG ops and graphs containing + * RNG ops are enqueued and run simultaneously on multiple streams. + * + * Usage: + * ~~~~~~ + * PhiloxCudaState in this file, and unpack() in + * cuda/CUDAGraphsUtils.cuh allow non-divergent use of + * CUDAGeneratorImpl whether graph capture is underway or not. + * + * Each PhiloxCudaState instance should be used for one and only one + * consumer kernel. + * + * Example (see e.g. native/cuda/Dropout.cu): + * + * #include + * #include + * + * __global__ void kernel(..., PhiloxCudaState philox_args) { + * auto seeds = at::cuda::philox::unpack(philox_args); + * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; + * curandStatePhilox4_32_10_t state; + * curand_init(std::get<0>(seeds), // seed + * idx, // per-thread subsequence + * std::get<1>(seeds), // offset in subsequence + * &state); + * ... + * } + * + * host_caller(...) { + * PhiloxCudaState rng_engine_inputs; + * { + * // See Note [Acquire lock when using random generators] + * std::lock_guard lock(gen->mutex_); + * + * // gen could be HostState or DevState here! No divergent code needed! + * rng_engine_inputs = gen->philox_cuda_state(offset_increment); + * } + * kernel<<<...>>>(..., rng_engine_inputs); + * } + * + */ + +struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl { + // Constructors + CUDAGeneratorImpl(DeviceIndex device_index = -1); + ~CUDAGeneratorImpl() override = default; + + // CUDAGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + void set_offset(uint64_t offset) override; + uint64_t get_offset() const override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void set_philox_offset_per_thread(uint64_t offset); + uint64_t philox_offset_per_thread() const; + void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph); + uint64_t capture_epilogue(); + PhiloxCudaState philox_cuda_state(uint64_t increment); + + bool reset_rnn_state() { + return !no_reset_rnn_state_.test_and_set(); + } + + // Temporarily accommodates call sites that use philox_engine_inputs. + // Allows incremental refactor of call sites to use philox_cuda_state. + std::pair philox_engine_inputs(uint64_t increment); + + static c10::DeviceType device_type(); + +private: + CUDAGeneratorImpl* clone_impl() const override; + uint64_t seed_ = default_rng_seed_val; + uint64_t philox_offset_per_thread_ = 0; + int64_t* seed_extragraph_{}; + int64_t* offset_extragraph_{}; + uint32_t offset_intragraph_ = 0; + bool graph_expects_this_gen_ = false; + std::atomic_flag no_reset_rnn_state_; +}; + +namespace cuda::detail { + +TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator( + DeviceIndex device_index = -1); +TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1); + +} // namespace cuda::detail +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h new file mode 100644 index 0000000000000000000000000000000000000000..b395de9a252a7e1656340b572cc110113686b207 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { + +struct CUDAGeneratorImpl; + +namespace cuda { + +// Standalone way to get a unique mempool id usable as a pool=... argument +// to CUDAGraph::capture_begin +TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle(); + +struct TORCH_CUDA_CPP_API CUDAGraph { + CUDAGraph(); + ~CUDAGraph(); + + static void inc_pending_event_queries(); + static void dec_pending_event_queries(); + static int num_pending_event_queries(); + void capture_begin(MempoolId_t pool={0, 0}, cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal); + void capture_end(); + void replay(); + void reset(); + MempoolId_t pool(); + void enable_debug_mode(); + void debug_dump(const std::string& debug_path); + + protected: +#if !defined(USE_ROCM) || ROCM_VERSION >= 50300 + cudaGraph_t graph_ = NULL; + cudaGraphExec_t graph_exec_ = NULL; +#endif + + static std::atomic pending_event_queries; + + // internal states so reset() can do its best cleaning up + // Set to true in capture_end if cudaStreamEndCapture succeeded + // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate + // to create graph_exec_, then graph_ is deleted + bool has_graph_ = false; + // Set to true in capture_end if cudaGraphInstantiate succeeded + bool has_graph_exec_ = false; + + // uuid of this instance's current capture, used to + // specify the pool. + CaptureId_t id_; + + // the ID assigned by cuda during graph capture, + // used to identify when a stream is participating in capture + CaptureId_t capture_id_ = -1; + + // uuid used to request a particular private mempool from CUDACachingAllocator. + // By default, this will be set to {id_, 0}. + // + // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_ + // will be set to the other graph's mempool_id_, and therefore share a mempool with the + // other graph. + // + // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(), + // it will share a mempool with any other captures that used "pool=handle". + // + // Sharing a mempool across graphs saves memory, and it's safe if you + // know you'll replay those graphs in the same order you captured them. + MempoolId_t mempool_id_; + + // Stream on which capture began + at::cuda::CUDAStream capture_stream_; + + // Default generator on device where capture began + at::CUDAGeneratorImpl* capture_gen_; + + // Device where capture occurred. Right now, for simplicity, we require all ops + // in a capture to run on the same device, but this is a limitation of CUDAGraph, + // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device + // captures if needed. + int capture_dev_; + + // RNG state trackers + at::Tensor seed_extragraph_; + at::Tensor offset_extragraph_; + uint64_t wholegraph_increment_; +}; + +} // namespace cuda +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f3e27a7b159ac6029fc85fd91a048202dfb352e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten. +// This file adds utils used by aten only. + +namespace at::cuda { + +using CaptureId_t = c10::cuda::CaptureId_t; +using CaptureStatus = c10::cuda::CaptureStatus; + +// Use this version where you don't want to create a CUDA context if none exists. +inline CaptureStatus currentStreamCaptureStatus() { +#if !defined(USE_ROCM) || ROCM_VERSION >= 50300 + // don't create a context if we don't have to + if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) { + return c10::cuda::currentStreamCaptureStatusMayInitCtx(); + } else { + return CaptureStatus::None; + } +#else + return CaptureStatus::None; +#endif +} + +inline void assertNotCapturing(std::string attempt) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + attempt, + " during CUDA graph capture. If you need this call to be captured, " + "please file an issue. " + "Current cudaStreamCaptureStatus: ", + status); +} + +inline void errorIfCapturingCudnnBenchmark(std::string version_specific) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + "Current cudaStreamCaptureStatus: ", + status, + "\nCapturing ", + version_specific, + "is prohibited. Possible causes of this error:\n" + "1. No warmup iterations occurred before capture.\n" + "2. The convolutions you're trying to capture use dynamic shapes, " + "in which case capturing them is generally prohibited."); +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h new file mode 100644 index 0000000000000000000000000000000000000000..1052469ea7d8ad1047466b3092e184c127a49661 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#if defined(USE_ROCM) +#include +#define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch) +#endif + +// cuSparse Generic API added in CUDA 10.1 +// Windows support added in CUDA 11.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32))) +#define AT_USE_CUSPARSE_GENERIC_API() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_API() 0 +#endif + +// cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \ + (CUSPARSE_VERSION < 12000) +#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1 +#else +#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0 +#endif + +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \ + (CUSPARSE_VERSION >= 12000) +#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1 +#else +#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0 +#endif + +#if defined(USE_ROCM) +// hipSparse const API added in v2.4.0 +#if HIPSPARSE_VERSION >= 200400 +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_GENERIC_API() 1 +#else +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1 +#define AT_USE_HIPSPARSE_GENERIC_API() 1 +#endif +#else // USE_ROCM +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_GENERIC_API() 0 +#endif // USE_ROCM + +// cuSparse Generic API spsv function was added in CUDA 11.3.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500) +#define AT_USE_CUSPARSE_GENERIC_SPSV() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSV() 0 +#endif + +// cuSparse Generic API spsm function was added in CUDA 11.3.1 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600) +#define AT_USE_CUSPARSE_GENERIC_SPSM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSM() 0 +#endif + +// cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400) +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400) +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 0 +#endif + +// BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0) +#if defined(CUDART_VERSION) || \ + (defined(USE_ROCM) && ROCM_VERSION >= 40500 ) +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1 +#else +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0 +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h new file mode 100644 index 0000000000000000000000000000000000000000..c99d42c9a7de8852d84167bbca70bc242aabb2b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h @@ -0,0 +1,318 @@ +#pragma once + +/* + Provides a subset of cuSPARSE functions as templates: + + csrgeam2(...) + + where scalar_t is double, float, c10::complex or c10::complex. + The functions are available in at::cuda::sparse namespace. +*/ + +#include +#include + +namespace at::cuda::sparse { + +#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \ + const int *csrSortedColIndC, size_t *pBufferSizeInBytes + +template +inline void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float)); +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); + +#define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \ + cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \ + int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \ + const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace + +template +inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) { + TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz( + handle, + m, + n, + descrA, + nnzA, + csrSortedRowPtrA, + csrSortedColIndA, + descrB, + nnzB, + csrSortedRowPtrB, + csrSortedColIndB, + descrC, + csrSortedRowPtrC, + nnzTotalDevHostPtr, + workspace)); +} + +#define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \ + void *pBuffer + +template +inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(float)); +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(double)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \ + int kb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc + +template +inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmm: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(float)); +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(double)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nb, int nnzb, \ + const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y + +template +inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmv: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(float)); +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(double)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +#define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float)); +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \ + cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float)); +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float)); +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \ + scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float)); +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +} // namespace at::cuda::sparse diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4e89ea1cdb77da1d7866ffe99c64dabfd735d27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace at { +template <> +inline __half* Tensor::data() const { + return reinterpret_cast<__half*>(data()); +} +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f65dd6a572f9cab15dbae9983df8037b724a99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace at::cuda { + +// Check if every tensor in a list of tensors matches the current +// device. +inline bool check_device(ArrayRef ts) { + if (ts.empty()) { + return true; + } + Device curDevice = Device(kCUDA, current_device()); + for (const Tensor& t : ts) { + if (t.device() != curDevice) return false; + } + return true; +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..65ad7f7d16e24bbc46af7cacc1c8233436109be2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +// +// A caching allocator for CUDA host allocations (pinned memory). +// +// This provides a drop-in replacement for THCudaHostAllocator, which re-uses +// freed pinned (page-locked) memory allocations. This avoids device +// synchronizations due to cudaFreeHost calls. +// +// To ensure correct behavior, THCCachingHostAllocator_recordEvent must be +// called anytime a pointer from this allocator is used in a cudaMemcpyAsync +// call between host and device, and passed the corresponding context from the +// allocation. This is currently invoked by at::native::copy_kernel_cuda. +// +// Note that this allocator does not split larger allocations into smaller +// blocks, unlike the caching device allocator. +// +TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator(); + +// Records an event in the specified stream. The allocation corresponding to the +// input `ptr`/`ctx` will not be re-used until the event has occurred. +TORCH_CUDA_CPP_API bool +CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream); + +// Releases cached pinned memory allocations via cudaHostFree +TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache(); + +inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) { + return getCachingHostAllocator()->allocate(size); +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c0a2fc47c0069bed4e15beec249162a556f7dc3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +__device__ __forceinline__ unsigned int ACTIVE_MASK() +{ +#if !defined(USE_ROCM) + return __activemask(); +#else +// will be ignored anyway + return 0xffffffff; +#endif +} + +__device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) { +#if !defined(USE_ROCM) + return __syncwarp(mask); +#endif +} + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate) +{ +return __ballot(predicate); +} +#else +__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __ballot_sync(mask, predicate); +#else + return __ballot(predicate); +#endif +} +#endif + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_sync(mask, value, srcLane, width); +#else + return __shfl(value, srcLane, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_up_sync(mask, value, delta, width); +#else + return __shfl_up(value, delta, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_down_sync(mask, value, delta, width); +#else + return __shfl_down(value, delta, width); +#endif +} + +#if defined(USE_ROCM) +template<> +__device__ __forceinline__ int64_t WARP_SHFL_DOWN(int64_t value, unsigned int delta, int width , unsigned int mask) +{ + //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ + int2 a = *reinterpret_cast(&value); + a.x = __shfl_down(a.x, delta); + a.y = __shfl_down(a.y, delta); + return *reinterpret_cast(&a); +} +#endif + +template<> +__device__ __forceinline__ c10::Half WARP_SHFL_DOWN(c10::Half value, unsigned int delta, int width, unsigned int mask) +{ + return c10::Half(WARP_SHFL_DOWN(value.x, delta, width, mask), c10::Half::from_bits_t{}); +} + +template +__device__ __forceinline__ c10::complex WARP_SHFL_DOWN(c10::complex value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return c10::complex( + __shfl_down_sync(mask, value.real_, delta, width), + __shfl_down_sync(mask, value.imag_, delta, width)); +#else + return c10::complex( + __shfl_down(value.real_, delta, width), + __shfl_down(value.imag_, delta, width)); +#endif +} + +/** + * For CC 3.5+, perform a load using __ldg + */ +template +__device__ __forceinline__ T doLdg(const T* p) { +#if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM) + return __ldg(p); +#else + return *p; +#endif +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..18733f0beb30b42752cd10891beea9c8de59adf3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h @@ -0,0 +1,44 @@ +#pragma once +#include + +namespace at::detail { + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + ScalarType dtype, + c10::optional device_opt, + c10::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + const TensorOptions &options); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + c10::optional device_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions &options); + + +} // namespace at::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7081e94837caa7d5050128e0bfe19aa67f93cd39 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include + +// NumericLimits.cuh is a holder for numeric limits definitions of commonly used +// types. This header is very specific to ROCm HIP and may be removed in the future. +// This header is derived from the legacy THCNumerics.cuh. + +// The lower_bound and upper_bound constants are same as lowest and max for +// integral types, but are -inf and +inf for floating point types. They are +// useful in implementing min, max, etc. + +namespace at { + +template +struct numeric_limits { +}; + +// WARNING: the following at::numeric_limits definitions are there only to support +// HIP compilation for the moment. Use std::numeric_limits if you are not +// compiling for ROCm. +// from @colesbury: "The functions on numeric_limits aren't marked with +// __device__ which is why they don't work with ROCm. CUDA allows them +// because they're constexpr." + +namespace { + // ROCm doesn't like INFINITY too. + constexpr double inf = INFINITY; +} + +template <> +struct numeric_limits { + static inline __host__ __device__ bool lowest() { return false; } + static inline __host__ __device__ bool max() { return true; } + static inline __host__ __device__ bool lower_bound() { return false; } + static inline __host__ __device__ bool upper_bound() { return true; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ uint8_t lowest() { return 0; } + static inline __host__ __device__ uint8_t max() { return UINT8_MAX; } + static inline __host__ __device__ uint8_t lower_bound() { return 0; } + static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int8_t lowest() { return INT8_MIN; } + static inline __host__ __device__ int8_t max() { return INT8_MAX; } + static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; } + static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int16_t lowest() { return INT16_MIN; } + static inline __host__ __device__ int16_t max() { return INT16_MAX; } + static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; } + static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int32_t lowest() { return INT32_MIN; } + static inline __host__ __device__ int32_t max() { return INT32_MAX; } + static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; } + static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; } +}; + +template <> +struct numeric_limits { +#ifdef _MSC_VER + static inline __host__ __device__ int64_t lowest() { return _I64_MIN; } + static inline __host__ __device__ int64_t max() { return _I64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; } +#else + static inline __host__ __device__ int64_t lowest() { return INT64_MIN; } + static inline __host__ __device__ int64_t max() { return INT64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; } +#endif +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); } + static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ float lowest() { return -FLT_MAX; } + static inline __host__ __device__ float max() { return FLT_MAX; } + static inline __host__ __device__ float lower_bound() { return -static_cast(inf); } + static inline __host__ __device__ float upper_bound() { return static_cast(inf); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ double lowest() { return -DBL_MAX; } + static inline __host__ __device__ double max() { return DBL_MAX; } + static inline __host__ __device__ double lower_bound() { return -inf; } + static inline __host__ __device__ double upper_bound() { return inf; } +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h new file mode 100644 index 0000000000000000000000000000000000000000..1abf1dcfc12447d7226df19701486cd6bc2affee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h @@ -0,0 +1,11 @@ +#include +#include + +namespace at::cuda { +namespace detail { +void init_p2p_access_cache(int64_t num_devices); +} + +TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev); + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4b131aa9c4b468092c5819f2c4aa321be8ca30 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h @@ -0,0 +1,5 @@ +#pragma once + +#include + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..0b161b70d96f428512cf6c26c3acc1654da74bf0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..fd8b9e91d48815761e7234b0d6fbcaab7f62fcee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include + +// Collection of in-kernel scan / prefix sum utilities + +namespace at::cuda { + +// Inclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) { + // Within-warp, we use warp voting. +#if defined (USE_ROCM) + unsigned long long int vote = WARP_BALLOT(in); + T index = __popcll(getLaneMaskLe() & vote); + T carry = __popcll(vote); +#else + T vote = WARP_BALLOT(in); + T index = __popc(getLaneMaskLe() & vote); + T carry = __popc(vote); +#endif + + int warp = threadIdx.x / C10_WARP_SIZE; + + // Per each warp, write out a value + if (getLaneId() == 0) { + smem[warp] = carry; + } + + __syncthreads(); + + // Sum across warps in one thread. This appears to be faster than a + // warp shuffle scan for CC 3.0+ + if (threadIdx.x == 0) { + int current = 0; + for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { + T v = smem[i]; + smem[i] = binop(smem[i], current); + current = binop(current, v); + } + } + + __syncthreads(); + + // load the carry from the preceding warp + if (warp >= 1) { + index = binop(index, smem[warp - 1]); + } + + *out = index; + + if (KillWARDependency) { + __syncthreads(); + } +} + +// Exclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) { + inclusiveBinaryPrefixScan(smem, in, out, binop); + + // Inclusive to exclusive + *out -= (T) in; + + // The outgoing carry for all threads is the last warp's sum + *carry = smem[at::ceil_div(blockDim.x, C10_WARP_SIZE) - 1]; + + if (KillWARDependency) { + __syncthreads(); + } +} + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h new file mode 100644 index 0000000000000000000000000000000000000000..d31bf68ccafba92a242894e8f426329693212f5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h @@ -0,0 +1,10 @@ +#pragma once +#include +#include + +namespace at::cuda { + +// enqueues a kernel that spins for the specified number of cycles +TORCH_CUDA_CU_API void sleep(int64_t cycles); + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..85783c303534ec9f6190c2d6bb44a8faafd680f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +/// Allocator for Thrust to re-route its internal device allocations +/// to the THC allocator +class ThrustAllocator { +public: + typedef char value_type; + + char* allocate(std::ptrdiff_t size) { + return static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(size)); + } + + void deallocate(char* p, size_t size) { + c10::cuda::CUDACachingAllocator::raw_delete(p); + } +}; + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..062c365a4e1a9af6160cb01d487d6c7a37483286 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh @@ -0,0 +1,413 @@ +#pragma once +#include + +#include +#include +#include +#include + +#include + +#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() + +#include + +#else + +// include cub in a safe manner, see: +// https://github.com/pytorch/pytorch/pull/55292 +#undef CUB_NS_POSTFIX //undef to avoid redefinition warnings +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER +#define CUB_NS_PREFIX namespace at_cuda_detail { +#define CUB_NS_POSTFIX } +#define CUB_NS_QUALIFIER ::at_cuda_detail::cub +#include +#undef CUB_NS_POSTFIX +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER + +#endif + +#include +#include +#include + +// handle the temporary storage and 'twice' calls for cub API +#define CUB_WRAPPER(func, ...) do { \ + size_t temp_storage_bytes = 0; \ + func(nullptr, temp_storage_bytes, __VA_ARGS__); \ + auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \ + auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \ + func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \ + AT_CUDA_CHECK(cudaGetLastError()); \ +} while (false) + +#ifdef USE_ROCM +#define NO_ROCM(x) +#define ROCM_HIPCUB(x) ::hipcub +#else +#define NO_ROCM(x) x +#define ROCM_HIPCUB(x) x +#endif + +#if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || \ + (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +#if !defined(USE_ROCM) +namespace at_cuda_detail { +#endif + +// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 + +template <> +struct ROCM_HIPCUB(cub)::FpLimits +{ + static __host__ __device__ __forceinline__ c10::BFloat16 Max() { + unsigned short max_word = 0x7F7F; + return reinterpret_cast(max_word); + } + + static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { + unsigned short lowest_word = 0xFF7F; + return reinterpret_cast(lowest_word); + } +}; + +template <> +struct ROCM_HIPCUB(cub)::NumericTraits: + ROCM_HIPCUB(cub)::BaseTraits {}; + +#if !defined(USE_ROCM) +} // namespace at_cuda_detail +#endif + +#endif + +#if !defined(USE_ROCM) +namespace at::native { +namespace cub = ::at_cuda_detail::cub; +} // namespace at::native +#endif + +namespace at::cuda::cub { + +namespace detail { + +template +struct cuda_type { + using type = T; +}; +template<> +struct cuda_type { + using type = __half; +}; + +#if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16() + +template<> +struct cuda_type { + using type = __nv_bfloat16; +}; + +#elif (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +template<> +struct cuda_type { + using type = hip_bfloat16; +}; + +#endif + +} // namespace detail + +template +inline void segmented_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t num_elements, int64_t num_segments, + OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, + bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8 +) { + TORCH_CHECK(num_elements <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + TORCH_CHECK(num_segments <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + using key_t_ = typename detail::cuda_type::type; + + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + + if (keys_out == nullptr) { + keys_out_owner = allocator->allocate(num_elements * sizeof(key_t)); + keys_out = reinterpret_cast(keys_out_owner.get()); + } + + const key_t_ *keys_in_ = reinterpret_cast(keys_in); + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +} + +#if CUB_SUPPORTS_UNIQUE_BY_KEY() +template +inline void unique_by_key( + KeysInputIteratorT keys_in, ValuesInputIteratorT values_in, + KeysOutputIteratorT keys_out, ValuesOutputIteratorT values_out, + NumSelectedIteratorT num_selected, int64_t num_input_items) +{ + // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed. + constexpr bool null_keys_out = std::is_same::value; + using KeyT = typename std::iterator_traits::value_type; + using RealKeysOutputIteratorT = typename std::conditional::type; + RealKeysOutputIteratorT keys_out_; + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + if constexpr (null_keys_out) { + keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT)); + keys_out_ = static_cast(keys_out_owner.get()); + } else { + keys_out_ = keys_out; + } + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey, + keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream()); +} +#endif + +namespace impl { + +template +C10_LAUNCH_BOUNDS_1(1) +__global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){ + // NOTE: out here not the final scan output, but an intermediate of the accumulation type. + using acc_t = typename std::iterator_traits::value_type; + *out = scan_op(static_cast(*a), static_cast(*b)); +} + +#if !CUB_SUPPORTS_FUTURE_VALUE() +template +struct chained_iterator { + using iterator_category = std::random_access_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = ValueT; + using pointer = ValueT*; + using reference = ValueT&; + + InputIteratorT iter; + ValueT *first; + difference_type offset = 0; + + __device__ ValueT operator[](difference_type i) { + i += offset; + if (i == 0) { + return *first; + } else { + return ValueT(iter[i - 1]); + } + } + __device__ chained_iterator operator+(difference_type i) { + return chained_iterator{iter, first, i}; + } + __device__ ValueT operator*() { + return (*this)[0]; + } +}; +#endif + +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 +} + +// non synchronizing cub call +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +template +inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + using input_t = typename std::iterator_traits::value_type; + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(input_t)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { + return *first_elem_ptr; + } else { + return x.value; + } + }; + auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i + 1, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +template +inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i - 1, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +#if CUB_SUPPORTS_SCAN_BY_KEY() + +template +inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey, + keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +template +inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey, + keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +#endif + +template +void unique(InputIteratorT input, OutputIteratorT output, + NumSelectedIteratorT num_selected_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub unique does not support more than INT_MAX elements"); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); +} + +template +void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out, + LengthOutputIteratorT length_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub run_length_encode does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode, + input, output, counts_out, length_out, num_items, + at::cuda::getCurrentCUDAStream()); +} + +template +void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub reduce does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce, + input, output, num_items, op, init, + at::cuda::getCurrentCUDAStream()); + +} + +} // namespace at::cuda::cub diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h new file mode 100644 index 0000000000000000000000000000000000000000..dcd7cd66505e0f77ca0a6455a0125ed8b2b7b8d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h @@ -0,0 +1,87 @@ +#pragma once +#include +#include +#include + +// NOTE: These templates are intentionally not defined in this header, +// which aviods re-compiling them for each translation unit. If you get +// a link error, you need to add an explicit instantiation for your +// types in cub.cu + +namespace at::cuda::cub { + +inline int get_num_bits(uint64_t max_key) { + int num_bits = 1; + while (max_key > 1) { + max_key >>= 1; + num_bits++; + } + return num_bits; +} + +namespace detail { + +// radix_sort_pairs doesn't interact with value_t other than to copy +// the data, so we can save template instantiations by reinterpreting +// it as an opaque type. +template struct alignas(N) OpaqueType { char data[N]; }; + +template +void radix_sort_pairs_impl( + const key_t *keys_in, key_t *keys_out, + const OpaqueType *values_in, OpaqueType *values_out, + int64_t n, bool descending, int64_t begin_bit, int64_t end_bit); + +} // namespace detail + +template +void radix_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) { + static_assert(std::is_trivially_copyable::value || + AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types + "radix_sort_pairs value type must be trivially copyable"); + // Make value type opaque, so all inputs of a certain size use the same template instantiation + using opaque_t = detail::OpaqueType; + static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0, + "This size of value_t is not instantiated. Please instantiate it in cub.cu" + " and modify this check."); + static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned"); + detail::radix_sort_pairs_impl( + keys_in, keys_out, + reinterpret_cast(values_in), + reinterpret_cast(values_out), + n, descending, begin_bit, end_bit); +} + +template +void radix_sort_keys( + const key_t *keys_in, key_t *keys_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8); + +// NOTE: Intermediate sums will be truncated to input_t precision +template +void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n); + +template +void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return inclusive_sum_truncating(input, output, n); +} + +// NOTE: Sums are done is common_type +template +void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n); + +template +void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return exclusive_sum_in_common_type(input, output, n); +} + +void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n); +inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) { + return mask_exclusive_sum( + reinterpret_cast(mask), output_idx, n); +} + +} // namespace at::cuda::cub diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..dddeab1e2675fd791b678eb1da9c81713b8440e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h @@ -0,0 +1,54 @@ +#pragma once + +#include + +#include +#include + +// TODO: No need to have this whole header, we can just put it all in +// the cpp file + +namespace at::cuda::detail { + +// Set the callback to initialize Magma, which is set by +// torch_cuda_cu. This indirection is required so magma_init is called +// in the same library where Magma will be used. +TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)()); + + +// The real implementation of CUDAHooksInterface +struct CUDAHooks : public at::CUDAHooksInterface { + CUDAHooks(at::CUDAHooksArgs) {} + void initCUDA() const override; + Device getDeviceFromPtr(void* data) const override; + bool isPinnedPtr(const void* data) const override; + const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override; + bool hasCUDA() const override; + bool hasMAGMA() const override; + bool hasCuDNN() const override; + bool hasCuSOLVER() const override; + bool hasROCM() const override; + const at::cuda::NVRTC& nvrtc() const override; + DeviceIndex current_device() const override; + bool hasPrimaryContext(DeviceIndex device_index) const override; + Allocator* getCUDADeviceAllocator() const override; + Allocator* getPinnedMemoryAllocator() const override; + bool compiledWithCuDNN() const override; + bool compiledWithMIOpen() const override; + bool supportsDilatedConvolutionWithCuDNN() const override; + bool supportsDepthwiseConvolutionWithCuDNN() const override; + bool supportsBFloat16ConvolutionWithCuDNNv8() const override; + bool hasCUDART() const override; + long versionCUDART() const override; + long versionCuDNN() const override; + std::string showConfig() const override; + double batchnormMinEpsilonCuDNN() const override; + int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex device_index) const override; + void cuFFTSetPlanCacheMaxSize(DeviceIndex device_index, int64_t max_size) const override; + int64_t cuFFTGetPlanCacheSize(DeviceIndex device_index) const override; + void cuFFTClearPlanCache(DeviceIndex device_index) const override; + int getNumGPUs() const override; + void deviceSynchronize(DeviceIndex device_index) const override; +}; + +} // at::cuda::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..db8519389e9ff567b11fd9a3bb31ac3d8646fe92 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include + +namespace at::cuda::detail { + +TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t); +using at::native::canUse32BitIndexMath; + +template +TensorInfo +getTensorInfo(const at::TensorBase &t) { + IndexType sz[MAX_TENSORINFO_DIMS]; + IndexType st[MAX_TENSORINFO_DIMS]; + + int dims = t.dim(); + for (int i = 0; i < dims; ++i) { + sz[i] = t.size(i); + st[i] = t.stride(i); + } + + scalar* data_ptr = nullptr; + + if constexpr (std::is_const::value) { + data_ptr = t.const_data_ptr(); + } else { + data_ptr = t.mutable_data_ptr(); + } + + return TensorInfo( + data_ptr, dims, sz, st); +} + +} // namespace at::cuda::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e8a26b5e06a6ffeeeaf5df26f09175a2c903aa01 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh @@ -0,0 +1,124 @@ +#pragma once + +#include +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) +#include +#endif + +namespace at::cuda::detail { + +// A utility class to implement integer division by multiplication, given a fixed +// divisor. +// +// WARNING: The fast divider algorithm is only implemented for unsigned int; +// otherwise we default to plain integer division. For unsigned int, +// we further assume that the dividend is at most INT32_MAX. Thus, +// IntDivider must NOT be used for general integer division. +// +// This reduced range is enough for our purpose, and it allows us to +// slightly simplify the computation. +// +// (NOTE: Below, "2^k" denotes exponentiation, i.e., 1< 0), we can find a "magic number" m (2^N +// <= m < 2^(N+1)) and shift s such that: +// +// \floor(n / d) = \floor((m * n) / 2^(N+s)). +// +// Given such m and s, the integer division can be then implemented as: +// +// let m' = m - 2^N // 0 <= m' < 2^N +// +// fast_integer_division(n): +// // Multiply two N-bit unsigned integers: the result is a 2N-bit unsigned +// // integer. Then take the higher N bits. +// t = (m' * n) >> N +// +// // Here we use the fact that n is less than 2^(N-1): otherwise the value +// // of (t + n) may not fit in an N-bit integer. +// return (t + n) >> s +// +// Finding such a magic number is surprisingly easy: +// +// s = \ceil(\log_2 d) +// m' = \floor(2^N * (2^s - d) / d) + 1 // Need 2N-bit integer arithmetic. +// +// See also: +// - Division by Invariant Integers Using Multiplication, +// Torbjörn Granlund and Peter L. Montgomery, 1994. +// +// - http://www.hackersdelight.org/magic.htm +// +// - http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html + +// Result of div/mod operation stored together. +template +struct DivMod { + Value div, mod; + + C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { } +}; + +// Base case: we only have an implementation for uint32_t for now. For +// everything else, we use plain division. +template +struct IntDivider { + IntDivider() = default; + IntDivider(Value d) : divisor(d) { } + + C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; } + C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; } + C10_HOST_DEVICE inline DivMod divmod(Value n) const { + return DivMod(n / divisor, n % divisor); + } + + Value divisor; +}; + +// Implement fast integer division. +template <> +struct IntDivider { + static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); + + IntDivider() = default; + + IntDivider(unsigned int d) : divisor(d) { + assert(divisor >= 1 && divisor <= INT32_MAX); + + // TODO: gcc/clang has __builtin_clz() but it's not portable. + for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; + + uint64_t one = 1; + uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; + m1 = magic; + assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. + } + + C10_HOST_DEVICE inline unsigned int div(unsigned int n) const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and + // 'm1'. + unsigned int t = __umulhi(n, m1); + return (t + n) >> shift; +#else + // Using uint64_t so that the addition does not overflow. + uint64_t t = ((uint64_t) n * m1) >> 32; + return (t + n) >> shift; +#endif + } + + C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const { + return n - div(n) * divisor; + } + + C10_HOST_DEVICE inline DivMod divmod(unsigned int n) const { + unsigned int q = div(n); + return DivMod(q, n - q * divisor); + } + + unsigned int divisor; // d above. + unsigned int m1; // Magic number: m' above. + unsigned int shift; // Shift amounts. +}; + +} // namespace at::cuda::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..61f576368c3286a3a4eb233b93513f8c3b560a79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/KernelUtils.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace at::cuda::detail { + +// CUDA: grid stride looping +// +// int64_t _i_n_d_e_x specifically prevents overflow in the loop increment. +// If input.numel() < INT_MAX, _i_n_d_e_x < INT_MAX, except after the final +// iteration of the loop where _i_n_d_e_x += blockDim.x * gridDim.x can be +// greater than INT_MAX. But in that case _i_n_d_e_x >= n, so there are no +// further iterations and the overflowed value in i=_i_n_d_e_x is not used. +#define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \ + int64_t _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \ + for (index_type i=_i_n_d_e_x; _i_n_d_e_x < (n); _i_n_d_e_x+=blockDim.x * gridDim.x, i=_i_n_d_e_x) + +#define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int) + + +// Use 1024 threads per block, which requires cuda sm_2x or above +constexpr int CUDA_NUM_THREADS = 1024; + +// CUDA: number of blocks for threads. +inline int GET_BLOCKS(const int64_t N, const int64_t max_threads_per_block=CUDA_NUM_THREADS) { + TORCH_INTERNAL_ASSERT(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N); + constexpr int64_t max_int = std::numeric_limits::max(); + + // Round up division for positive number that cannot cause integer overflow + auto block_num = (N - 1) / max_threads_per_block + 1; + TORCH_INTERNAL_ASSERT(block_num <= max_int, "Can't schedule too many blocks on CUDA device"); + + return static_cast(block_num); +} + +} // namespace at::cuda::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h new file mode 100644 index 0000000000000000000000000000000000000000..95e52c94377bf568060c25f887454dbbaf2054b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h @@ -0,0 +1,11 @@ +#pragma once +#include +namespace at::cuda { +// Forward-declares at::cuda::NVRTC +struct NVRTC; + +namespace detail { +extern NVRTC lazyNVRTC; +} // namespace detail + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2959fe8e65b87e4113f02225079bf1ff7ef8cbc7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +// If element_sizes is nullptr, then the strides will be in bytes, otherwise +// the strides will be in # of elements. +// Operands that share the same shape, but may have different strides. +// OffsetCalculator iterates the tensor in a column-major order + +#if defined(USE_ROCM) +constexpr int MAX_DIMS = 16; +#else +constexpr int MAX_DIMS = 25; +#endif + +template +struct OffsetCalculator { + // We allow having negative strides to implement some operations like torch.flip + using stride_t = std::conditional_t, + index_t>; + // The offset for each argument. Wrapper around fixed-size array. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + // if element_sizes is nullptr, then the strides will be in bytes, otherwise + // the strides will be in # of elements. + OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides, const int64_t* element_sizes=nullptr) : dims(dims) { + TORCH_CHECK(dims <= MAX_DIMS, "tensor has too many (>", MAX_DIMS, ") dims"); + for (int i=0; i < dims; i++){ + sizes_[i] = at::cuda::detail::IntDivider(sizes[i]); + for (int arg = 0; arg < NARGS; arg++) { + int64_t element_size = (element_sizes == nullptr ? 1LL : element_sizes[arg]); + strides_[i][arg] = strides[arg][i] / element_size; + } + } + } + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = 0; + } + + #pragma unroll + for (int dim = 0; dim < MAX_DIMS; ++dim) { + if (dim == dims) { + break; + } + auto divmod = sizes_[dim].divmod(linear_idx); + linear_idx = divmod.div; + + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] += divmod.mod * strides_[dim][arg]; + } + + } + return offsets; + } + + int dims; + at::cuda::detail::IntDivider sizes_[MAX_DIMS]; + stride_t strides_[MAX_DIMS][std::max(NARGS, 1)]; +}; + +template +struct TrivialOffsetCalculator { + // The offset for each argument. Wrapper around fixed-size array. + // The offsets are in # of elements, not in bytes. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = linear_idx; + } + return offsets; + } +}; + +// Make an OffsetCalculator with byte offsets +template +static OffsetCalculator make_offset_calculator(const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + } + return OffsetCalculator(iter.ndim(), iter.shape().data(), strides.data()); +} + +// Make an OffsetCalculator with element offsets +template +static OffsetCalculator make_element_offset_calculator( + const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + std::array element_sizes; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + element_sizes[i] = iter.element_size(i); + } + return OffsetCalculator( + iter.ndim(), iter.shape().data(), strides.data(), element_sizes.data()); +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c9eeeadd542d7702348276d7b538bd49846c5da8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh @@ -0,0 +1,43 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +// Stores RNG state values. Passed as a kernel argument. +// See Note [CUDA Graph-safe RNG states]. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +namespace at { + +struct PhiloxCudaState { + PhiloxCudaState() = default; + // Called if graph capture is not underway + PhiloxCudaState(uint64_t seed, + uint64_t offset) { + seed_.val = seed; + offset_.val = offset; + } + // Called if graph capture is underway + PhiloxCudaState(int64_t* seed, + int64_t* offset_extragraph, + uint32_t offset_intragraph) { + seed_.ptr = seed; + offset_.ptr = offset_extragraph; + offset_intragraph_ = offset_intragraph; + captured_ = true; + } + + // Public members, directly accessible by at::cuda::philox::unpack. + // If we made them private with getters/setters, the getters/setters + // would have to be __device__, and we can't declare __device__ in ATen. + union Payload { + uint64_t val; + int64_t* ptr; + }; + + Payload seed_; + Payload offset_; + uint32_t offset_intragraph_ = 0; + bool captured_ = false; +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a320000ae881faa7416bae6ed1f37793f357a73a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/TensorInfo.cuh @@ -0,0 +1,116 @@ +#pragma once + +#include + +namespace at::cuda::detail { + +#define MAX_TENSORINFO_DIMS 25 + +// CUDA kernel argument that defines tensor layout +template +struct TensorInfo { + TensorInfo(); + TensorInfo(T* p, + int dim, + IndexType sz[MAX_TENSORINFO_DIMS], + IndexType st[MAX_TENSORINFO_DIMS]); + + // Set the size of the given dimension to 1, as if it were a + // reduction dim (allows you to calculate offsets of the reduction + // slice) + void reduceDim(int dim); + + // See note on [collapse dims]. + int collapseDims(const int excludeDim = -1); + + // Contiguous tensors of more than one dimension are collapsed down + // to one tensor + __host__ __device__ inline bool isContiguous() const { + return (dims == 1 && strides[0] == 1); + } + + T* data; + IndexType sizes[MAX_TENSORINFO_DIMS]; + IndexType strides[MAX_TENSORINFO_DIMS]; + int dims; +}; + +template +TensorInfo::TensorInfo() { + data = nullptr; + dims = 0; +} + +template +TensorInfo::TensorInfo(T* p, + int dim, + IndexType sz[MAX_TENSORINFO_DIMS], + IndexType st[MAX_TENSORINFO_DIMS]) { + data = p; + dims = dim; + TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "CUDA Tensors cannot have more than 25 dimensions"); + + for (int i = 0; i < dim; ++i) { + sizes[i] = sz[i]; + strides[i] = st[i]; + } +} + +template +void +TensorInfo::reduceDim(int dim) { + TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1"); + sizes[dim] = 1; +} + +template +int +TensorInfo::collapseDims(const int excludeDim) { + auto result = at::collapse_dims(sizes, strides, dims, excludeDim); + dims = std::get<1>(result); + return std::get<0>(result); +} + +// Translate a linear index for the apply to a T* offset; +// specialized on `Dims` to reduce nvcc compilation time +template +struct IndexToOffset { + static __host__ __device__ IndexType get( + IndexType linearId, + const TensorInfo& info) { + + IndexType offset = 0; + + // Uses static dims + for (int i = Dims - 1; i > 0; --i) { + IndexType curDimIndex = linearId % info.sizes[i]; + IndexType curDimOffset = curDimIndex * info.strides[i]; + offset += curDimOffset; + linearId /= info.sizes[i]; + } + + return offset + linearId * info.strides[0]; + } +}; + +// Uses dynamic (runtime) instead of static (compiletime) dims +template +struct IndexToOffset { + static inline __host__ __device__ IndexType get( + IndexType linearId, + const TensorInfo& info) { + + IndexType offset = 0; + + for (int i = info.dims - 1; i > 0; --i) { + IndexType curDimIndex = linearId % info.sizes[i]; + IndexType curDimOffset = curDimIndex * info.strides[i]; + offset += curDimOffset; + linearId /= info.sizes[i]; + } + + return offset + linearId * info.strides[0]; + } +}; + +} // namespace at::cuda::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..70cd222a484844cdf4f4cb222af2ac6408598cbd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh @@ -0,0 +1,28 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +namespace at::cuda::philox { + +// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether +// that instance was created with graph capture underway or not. +// See Note [CUDA Graph-safe RNG states]. +// +// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen. +// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable. +// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +__host__ __device__ __forceinline__ std::tuple +unpack(at::PhiloxCudaState arg) { + if (arg.captured_) { + // static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long". + // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel. + // For most threads' reads it will hit in cache, so it shouldn't hurt performance. + return std::make_tuple(static_cast(*arg.seed_.ptr), static_cast(*(arg.offset_.ptr) + arg.offset_intragraph_)); + } else { + return std::make_tuple(arg.seed_.val, arg.offset_.val); + } +} + +} // namespace at::cuda::philox diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a440cd7b614732d5ad16eb28c56e71bb20b6e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h @@ -0,0 +1,40 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include + +#include +#include + +namespace at::cuda { + +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref); + +} // namespace at::cuda + +#else + +namespace at::cuda { + +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref) { + TORCH_CHECK(false, "Jiterator is not supported"); + } +} // namespace at::cuda + +#endif // AT_USE_JITERATOR() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..eef073fbd86b72cfd4283e73e4b5a711669e9c3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h @@ -0,0 +1,249 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at::native { + + +#define AT_FOR_8_CASES(_) \ + _(1) \ + _(2) \ + _(3) \ + _(4) \ + _(5) \ + _(6) \ + _(7) \ + _(8) + +#define AT_FOR_8_CASES_WITH_COMMA(_) \ + _(1) , \ + _(2) , \ + _(3) , \ + _(4) , \ + _(5) , \ + _(6) , \ + _(7) , \ + _(8) + +c10::SmallVector get_extra_args_typenames(const c10::SmallVector& extra_args) { + c10::SmallVector args_typenames(extra_args.size()); + for (const auto i : c10::irange(extra_args.size())) { + args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type()); + } + return args_typenames; +} + +int can_vectorize_up_to(at::ScalarType type, char* pointer) { + switch(type) { +#define DEFINE_CASE(ctype, scalartype) \ + case ScalarType::scalartype : return memory::can_vectorize_up_to(pointer); + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +// jitted version of the above +// See Note [Jiterator], this relies on the assumptions enumerated there +int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) { + const at::ScalarType common_dtype = iter.common_dtype(); + const at::ScalarType result_dtype = common_dtype; + + // Deals with output + int result = can_vectorize_up_to(result_dtype, static_cast(iter.data_ptr(0))); + + // Incorporates input(s) + for (auto i = 1; i < iter.ntensors(); ++i) { + result = std::min(result, can_vectorize_up_to(common_dtype, static_cast(iter.data_ptr(i)))); + } + + return result; +} + +template +static std::unique_ptr> make_unique_offset_calculator( + const TensorIteratorBase& iter) { + // array size can not be 0, this happens when N == 0 + constexpr int array_size = std::max(N, 1); + TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs())); + + std::array strides; + int64_t element_sizes[array_size]; + for (int i = 0; i < N; i++) { + int index = IS_INPUT ? i + iter.noutputs() : i; + strides[i] = iter.strides(index).data(); + element_sizes[i] = iter.element_size(index); + } + return std::make_unique>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +struct OffsetCalculatorVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using OffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + OffsetCalculatorVariant(const TensorIteratorBase& iter) { + int num = IS_INPUT ? iter.ninputs() : iter.noutputs(); + + switch(num) { +#define DEFINE_CASE(index) \ + case index : v = make_unique_offset_calculator(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + default: + TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + + private: + OffsetCalculatorTypes v; +}; + +struct ArrayVariant { +// works for up to 8 input + 8 outputs +#define DEFINE_CASE(index) at::detail::Array, at::detail::Array + using ArrayTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + ArrayVariant(const TensorIteratorBase& iter) { + int ntensors = iter.ntensors(); + switch(ntensors) { +#define DEFINE_CASE(index) \ + case index: array = at::detail::Array{}; break; \ + case index+8: array = at::detail::Array{}; break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors); + } + + std::visit([&](auto& a) { + for (auto i = 0; i < ntensors; ++i) { + a[i] = (char*)iter.data_ptr(i); + } + }, array); + } + + void* data_ptr() { + return std::visit([](auto & a){ return static_cast(&a); }, array); + } + +private: + ArrayTypes array; +}; + +struct TrivialOffsetCalculatorVariant { +#define DEFINE_CASE(index) TrivialOffsetCalculator + using TrivialOffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + TrivialOffsetCalculatorVariant(int num) { + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = TrivialOffsetCalculator(); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(&v); }, v); + } + +private: + TrivialOffsetCalculatorTypes v; +}; + +struct LoadWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using LoadWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + LoadWithCastVariant(const TensorIteratorBase& iter) { + int arity = iter.ninputs(); + switch(arity) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + LoadWithCastPtr v; +}; + +struct StoreWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using StoreWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + StoreWithCastVariant(const TensorIteratorBase& iter) { + int num = iter.noutputs(); + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + StoreWithCastPtr v; +}; + +} // namespace at::native + + +#endif // AT_USE_JITERATOR() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..aba40d4f42eac74ee9435d904ac3fb82d1e988c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +TORCH_CUDA_CPP_API const std::string &get_traits_string(); +TORCH_CUDA_CPP_API const std::string &get_cmath_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_math_string(); + +} // namespace at::cuda diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..ab3ed56796b2f7d7b70fe5f116badaf693bf42d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmCommon.h @@ -0,0 +1,174 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include + +#include +#include +#include + +namespace at::cuda::tunable { + +enum class BlasOp { + N = 0, + T = 1 +}; + +inline std::string BlasOpToString(BlasOp op) { + switch (op) { + case BlasOp::N: + return "N"; + case BlasOp::T: + return "T"; + } + TORCH_CHECK(false, "unrecognized BlasOp"); + return "N"; +} + +template +struct GemmParams : OpParams { + std::string Signature() const override { + return c10::str(transa, transb, "_", m, "_", n, "_", k); + } + + GemmParams* DeepCopy() const { + GemmParams* copy = new GemmParams; + *copy = *this; + c10::DeviceIndex device = 0; + AT_CUDA_CHECK(c10::cuda::GetDevice(&device)); + size_t c_size = m * n * sizeof(T); + copy->c = static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(c_size)); + AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync( + copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true)); + return copy; + } + + // only call on object returned by DeepCopy + void Delete() { + c10::cuda::CUDACachingAllocator::raw_delete(c); + } + + TuningStatus NumericalCheck(GemmParams *other) { + auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType::value).device(at::kCUDA); + // comparison done as 1D tensor + at::Tensor ref = at::from_blob(c, {m*n}, options); + at::Tensor oth = at::from_blob(other->c, {m*n}, options); + at::Tensor ref_float = ref.to(at::kFloat); + at::Tensor oth_float = oth.to(at::kFloat); + std::vector atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; + std::vector rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; + double last_succeed_atol = 1; + double last_succeed_rtol = 1; + for (auto& atol : atols) { + for (auto& rtol : rtols) { + if (at::allclose(ref_float, oth_float, rtol, atol)) { + last_succeed_atol = atol; + last_succeed_rtol = rtol; + } + } + } + if (last_succeed_atol == 1) { + return FAIL; + } + else { + TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol); + } + + return OK; + } + + char transa; + char transb; + int64_t m; + int64_t n; + int64_t k; + at::opmath_type alpha; + const T* a; + int64_t lda; + const T* b; + int64_t ldb; + at::opmath_type beta; + T* c; + int64_t ldc; +}; + +template +struct GemmStridedBatchedParams : OpParams { + std::string Signature() const override { + return c10::str(transa, transb, "_", m, "_", n, "_", k, "_B_", batch); + } + + GemmStridedBatchedParams* DeepCopy() const { + GemmStridedBatchedParams* copy = new GemmStridedBatchedParams; + *copy = *this; + c10::DeviceIndex device = 0; + AT_CUDA_CHECK(c10::cuda::GetDevice(&device)); + size_t c_size = batch * stride_c * sizeof(T); + copy->c = static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(c_size)); + AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync( + copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true)); + return copy; + } + + // only call on object returned by DeepCopy + void Delete() { + c10::cuda::CUDACachingAllocator::raw_delete(c); + } + + TuningStatus NumericalCheck(GemmStridedBatchedParams *other) { + auto options = at::TensorOptions().dtype(c10::CppTypeToScalarType::value).device(at::kCUDA); + // comparison done as 1D tensor + at::Tensor ref = at::from_blob(c, {batch*stride_c}, options); + at::Tensor oth = at::from_blob(other->c, {batch*stride_c}, options); + at::Tensor ref_float = ref.to(at::kFloat); + at::Tensor oth_float = oth.to(at::kFloat); + std::vector atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; + std::vector rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; + double last_succeed_atol = 1; + double last_succeed_rtol = 1; + for (auto& atol : atols) { + for (auto& rtol : rtols) { + if (at::allclose(ref_float, oth_float, rtol, atol)) { + last_succeed_atol = atol; + last_succeed_rtol = rtol; + } + } + } + if (last_succeed_atol == 1) { + return FAIL; + } + else { + TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol); + } + + return OK; + } + + char transa; + char transb; + int64_t m; + int64_t n; + int64_t k; + at::opmath_type alpha; + const T* a; + int64_t lda; + int64_t stride_a; + const T* b; + int64_t ldb; + int64_t stride_b; + at::opmath_type beta; + T* c; + int64_t ldc; + int64_t stride_c; + int64_t batch; +}; + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h new file mode 100644 index 0000000000000000000000000000000000000000..fbed75d513b5dcba079b381d754bc7614cb30ac0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h @@ -0,0 +1,379 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +#define TORCH_HIPBLASLT_CHECK(EXPR) \ + do { \ + hipblasStatus_t __err = EXPR; \ + TORCH_CHECK(__err == HIPBLAS_STATUS_SUCCESS, \ + "hipblaslt error: ", \ + hipblasStatusToString(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +namespace at::cuda::tunable { + +#ifdef HIPBLASLT_HAS_GETINDEXFROMALGO +#define GETINDEXFROMALGO(algo) hipblaslt_ext::getIndexFromAlgo(algo) +#else +static int getIndexFromAlgo(hipblasLtMatmulAlgo_t& algo) { + int* algo_ptr = (int*)algo.data; + if(*algo_ptr < 0) { + return -1; + } + return *algo_ptr; +} +#define GETINDEXFROMALGO(algo) getIndexFromAlgo(algo) +#endif + +#ifdef HIPBLASLT_CUSTOM_COMPUTE_TYPE +#define COMPUTE_TYPE_32 HIPBLASLT_COMPUTE_F32 +#else +#define COMPUTE_TYPE_32 HIPBLAS_COMPUTE_32F +#endif + +#ifdef HIPBLASLT_CUSTOM_DATA_TYPE + +template +constexpr hipblasltDatatype_t HipBlasDataTypeFor(); + +template <> +constexpr hipblasltDatatype_t HipBlasDataTypeFor() { + return HIPBLASLT_R_32F; +} + +template <> +constexpr hipblasltDatatype_t HipBlasDataTypeFor() { + return HIPBLASLT_R_16F; +} + +template <> +constexpr hipblasltDatatype_t HipBlasDataTypeFor() { + return HIPBLASLT_R_16B; +} + +template <> +constexpr hipblasltDatatype_t HipBlasDataTypeFor() { + return HIPBLASLT_R_64F; +} + +#define DATA_TYPE_R_32 HIPBLASLT_R_32F + +#else + +template +constexpr hipblasDatatype_t HipBlasDataTypeFor(); + +template <> +constexpr hipblasDatatype_t HipBlasDataTypeFor() { + return HIPBLAS_R_32F; +} + +template <> +constexpr hipblasDatatype_t HipBlasDataTypeFor() { + return HIPBLAS_R_16F; +} + +template <> +constexpr hipblasDatatype_t HipBlasDataTypeFor() { + return HIPBLAS_R_16B; +} + +template <> +constexpr hipblasDatatype_t HipBlasDataTypeFor() { + return HIPBLAS_R_64F; +} + +#ifdef HIPBLAS_V2 +#define DATA_TYPE_R_32 HIP_R_32F +#else +#define DATA_TYPE_R_32 HIPBLAS_R_32F +#endif + +#endif + +template +int GetBatchFromParams(const ParamsT* params) { + return 1; +} + +template +int GetBatchFromParams(const GemmStridedBatchedParams* params) { + return params->batch; +} + +template +int GetStrideAFromParams(const ParamsT* params) { + return 1; +} + +template +int GetStrideAFromParams(const GemmStridedBatchedParams* params) { + return params->stride_a; +} + +template +int GetStrideBFromParams(const ParamsT* params) { + return 1; +} + +template +int GetStrideBFromParams(const GemmStridedBatchedParams* params) { + return params->stride_b; +} + +template +int GetStrideCFromParams(const ParamsT* params) { + return 1; +} + +template +int GetStrideCFromParams(const GemmStridedBatchedParams* params) { + return params->stride_c; +} + +static hipblasOperation_t _hipblasOpFromChar(char op) { + switch (op) { + case 'n': + case 'N': + return HIPBLAS_OP_N; + case 't': + case 'T': + return HIPBLAS_OP_T; + case 'c': + case 'C': + return HIPBLAS_OP_C; + } + AT_ERROR( + "_hipblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`"); +} + +static char _charFromhipblasOp(hipblasOperation_t op) { + switch (op) { + case HIPBLAS_OP_N: + return 'N'; + case HIPBLAS_OP_T: + return 'T'; + case HIPBLAS_OP_C: + return 'C'; + } + AT_ERROR( + "_charFromhipblasOp input should be HIPBLAS_OP_N/T/C but got `", op, "`"); +} + +static hipblasOperation_t MapLayoutToHipBlasLt(BlasOp layout) { + if (layout == BlasOp::N) { + return HIPBLAS_OP_N; + } + return HIPBLAS_OP_T; +} + +static size_t GetHipblasltWorkspaceSize() { + static const char * env = getenv("HIPBLASLT_WORKSPACE_SIZE"); + // 256MB is max workspace size allowed for hipblaslt + // hipblaslt-bench uses 32MB + // recommendation from hipblaslt author was 76MB + size_t workspace_size = 2*128*1024*1024; // default 256MB + if (env) { + try { + workspace_size = std::stoi(env); + } catch(std::invalid_argument const& e) { + TORCH_WARN("invalid HIPBLASLT_WORKSPACE_SIZE,", + " using default workspace size of ", workspace_size, " bytes."); + } catch(std::out_of_range const& e) { + TORCH_WARN("HIPBLASLT_WORKSPACE_SIZE out of range,", + " using default workspace size of ", workspace_size, " bytes."); + } + } + return workspace_size; +} + +template +class HipblasltGemmOp : public Callable { + public: + HipblasltGemmOp(hipblasLtMatmulAlgo_t algo) : algo_{algo} {} + + TuningStatus Call(const ParamsT* params) override { + hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout); + hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout); + auto in_out_datatype = HipBlasDataTypeFor(); + auto opa = _hipblasOpFromChar(params->transa); + auto opb = _hipblasOpFromChar(params->transb); + + TORCH_CHECK(transa_outer == opa && transb_outer == opb, "trans mismatch, shouldn't happen"); + + float alpha = static_cast(params->alpha); + float beta = static_cast(params->beta); + + hipblasLtMatrixLayout_t mat_a, mat_b, mat_c; + hipblasLtMatmulDesc_t matmul; + if (opa == HIPBLAS_OP_N) { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->m, params->k, params->lda)); + } + else { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, in_out_datatype, params->k, params->m, params->lda)); + } + if (opb == HIPBLAS_OP_N) { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->k, params->n, params->ldb)); + } + else { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, in_out_datatype, params->n, params->k, params->ldb)); + } + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_c, in_out_datatype, params->m, params->n, params->ldc)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescCreate(&matmul, COMPUTE_TYPE_32, DATA_TYPE_R_32)); + + int batch = GetBatchFromParams(params); + if (batch > 1) { + int64_t stride_a = GetStrideAFromParams(params); + int64_t stride_b = GetStrideBFromParams(params); + int64_t stride_c = GetStrideCFromParams(params); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_a, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_a, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_a, sizeof(stride_a))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_b, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_b, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_b, sizeof(stride_b))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_c, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_c, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_c, sizeof(stride_c))); + } + + TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute( + matmul, HIPBLASLT_MATMUL_DESC_TRANSA, &opa, sizeof(int32_t))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescSetAttribute( + matmul, HIPBLASLT_MATMUL_DESC_TRANSB, &opb, sizeof(int32_t))); + + size_t workspace_size = GetHipblasltWorkspaceSize(); + + auto op_handle = at::cuda::getCurrentCUDABlasLtHandle(); + + size_t ret_workspace_size = 0; + auto status = hipblaslt_ext::matmulIsAlgoSupported(op_handle, + matmul, + &alpha, + mat_a, + mat_b, + &beta, + mat_c, + mat_c, + algo_, + ret_workspace_size); + + if (status == HIPBLAS_STATUS_SUCCESS) { + if (ret_workspace_size >= workspace_size) { + //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " workspace too large"); + return FAIL; + } + } + else { + //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " not supported"); + return FAIL; + } + + void* workspace_buffer = nullptr; + if (workspace_size > 0) { + workspace_buffer = c10::cuda::CUDACachingAllocator::raw_alloc(workspace_size); + } + + TORCH_HIPBLASLT_CHECK(hipblasLtMatmul(op_handle, + matmul, + &alpha, + params->a, + mat_a, + params->b, + mat_b, + &beta, + params->c, + mat_c, + params->c, + mat_c, + &algo_, + workspace_buffer, + workspace_size, + at::cuda::getCurrentCUDAStream())); + + TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescDestroy(matmul)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_a)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_b)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_c)); + if (workspace_size > 0) { + c10::cuda::CUDACachingAllocator::raw_delete(workspace_buffer); + } + return OK; + } + + private: + hipblasLtMatmulAlgo_t algo_; +}; + +template +auto GetHipBlasLtTypeStringAndOps() { + hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout); + hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout); + auto in_out_datatype = HipBlasDataTypeFor(); + std::vector heuristic_result; + + hipblasLtHandle_t handle; + TORCH_HIPBLASLT_CHECK(hipblasLtCreate(&handle)); + TORCH_HIPBLASLT_CHECK(hipblaslt_ext::getAllAlgos(handle, + hipblaslt_ext::GemmType::HIPBLASLT_GEMM, + transa_outer, + transb_outer, + in_out_datatype, + in_out_datatype, + in_out_datatype, + in_out_datatype, + COMPUTE_TYPE_32, + heuristic_result)); + TORCH_HIPBLASLT_CHECK(hipblasLtDestroy(handle)); + + // Sort heuristic_result by algo index to make sure the order of returned algos is deterministic. + std::sort(heuristic_result.begin(), + heuristic_result.end(), + [](hipblasLtMatmulHeuristicResult_t& a, hipblasLtMatmulHeuristicResult_t& b) { + return GETINDEXFROMALGO(a.algo) < GETINDEXFROMALGO(b.algo); + }); + + int returned_algo_count = heuristic_result.size(); + std::vector>>> ret; + for (int i = 0; i < returned_algo_count; i++) { + auto algo = heuristic_result[i].algo; + int algo_index = GETINDEXFROMALGO(algo); + auto callable = std::make_unique>(algo); + std::string type_string = c10::str( + "Gemm_Hipblaslt_", _charFromhipblasOp(transa_outer), _charFromhipblasOp(transb_outer), "_", algo_index); + ret.emplace_back(type_string, std::move(callable)); + } + + return ret; +} + +template +auto GetHipBlasLtGemmTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +template +auto GetHipBlasLtGemmStridedBatchedTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +#undef TORCH_HIPBLASLT_CHECK +#undef GETINDEXFROMALGO +#undef COMPUTE_TYPE_32 +#undef DATA_TYPE_R_32 + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h new file mode 100644 index 0000000000000000000000000000000000000000..f096ff00fd9b49d109d1bea8589551a8af941a12 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h @@ -0,0 +1,275 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include + +#define ROCBLAS_BETA_FEATURES_API +#include + +#define TORCH_ROCBLAS_CHECK(EXPR) \ + do { \ + rocblas_status __err = EXPR; \ + TORCH_CHECK(__err == rocblas_status_success, \ + "rocblas error: ", \ + rocblas_status_to_string(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +namespace at::cuda::tunable { + +template +constexpr rocblas_datatype RocBlasDataTypeFor(); + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor() { + return rocblas_datatype_f32_r; +} + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor() { + return rocblas_datatype_f64_r; +} + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor() { + return rocblas_datatype_f16_r; +} + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor() { + return rocblas_datatype_bf16_r; +} + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor>() { + return rocblas_datatype_f32_c; +} + +template <> +constexpr rocblas_datatype RocBlasDataTypeFor>() { + return rocblas_datatype_f64_c; +} + +template +constexpr rocblas_datatype RocBlasComputeTypeFor(); + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor() { + return rocblas_datatype_f32_r; +} + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor() { + return rocblas_datatype_f64_r; +} + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor() { + // Note that we're returning the _compute_ type for a given datatype. + // As of 12/2022, using compute type FP16 for 16-bit floats was much + // slower than using compute type FP32. So we use FP32 compute even for + // FP16 datatypes. This is how GEMM is implemented even in the function + // rocblasGemmHelper (see fpgeneric.h) + return rocblas_datatype_f32_r; +} + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor() { + // Note that we're returning the _compute_ type for a given datatype. + // As of 12/2022, using compute type FP16 for 16-bit floats was much + // slower than using compute type FP32. So we use FP32 compute even for + // BF16 datatypes. This is how GEMM is implemented even in the function + // rocblasGemmHelper (see fpgeneric.h) + return rocblas_datatype_f32_r; +} + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor>() { + return rocblas_datatype_f32_c; +} + +template <> +constexpr rocblas_datatype RocBlasComputeTypeFor>() { + return rocblas_datatype_f64_c; +} + +template +auto DoCastForHalfOrBfloat16(const T fp) { + return fp; +} + +template <> +inline auto DoCastForHalfOrBfloat16(const Half fp) { + // alpha and beta should be the same as compute_type, in Half case it is float. + float h = fp; + return h; +} + +template <> +inline auto DoCastForHalfOrBfloat16(const BFloat16 fp) { + // alpha and beta should be the same as compute_type, in bfloat16 case it is float. + float h = fp; + return h; +} + +static rocblas_operation _rocblasOpFromChar(char op) { + switch (op) { + case 'n': + case 'N': + return rocblas_operation_none; + case 't': + case 'T': + return rocblas_operation_transpose; + case 'c': + case 'C': + return rocblas_operation_conjugate_transpose; + } + AT_ERROR( + "_rocblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`"); +} + +template +class RocblasGemmOp : public Callable> { + public: + RocblasGemmOp(int solution) : solution_{solution} {} + + TuningStatus Call(const GemmParams* params) override { + auto input_output_type = RocBlasDataTypeFor(); + auto compute_type = RocBlasComputeTypeFor(); + auto h_a = DoCastForHalfOrBfloat16(params->alpha); + auto h_b = DoCastForHalfOrBfloat16(params->beta); + auto status = rocblas_gemm_ex( + (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(), + _rocblasOpFromChar(params->transa), + _rocblasOpFromChar(params->transb), + params->m, params->n, params->k, + &h_a, + params->a, input_output_type, params->lda, + params->b, input_output_type, params->ldb, + &h_b, + params->c, input_output_type, params->ldc, + params->c, input_output_type, params->ldc, + compute_type, + rocblas_gemm_algo_solution_index, + solution_, + rocblas_gemm_flags_none); + if (status != rocblas_status_success) { + return FAIL; + } + return OK; + } + + private: + int solution_; +}; + +template +auto GetRocBlasGemmTypeStringAndOps() { + rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(); + int solution_size; + auto input_output_type = RocBlasDataTypeFor(); + auto compute_type = RocBlasComputeTypeFor(); + // Get the number of available solutions + TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle, + input_output_type, + input_output_type, + compute_type, + rocblas_gemm_flags_none, + nullptr, + &solution_size)); + std::vector solutions(solution_size); + // Get the list of available solutions + TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle, + input_output_type, + input_output_type, + compute_type, + rocblas_gemm_flags_none, + solutions.data(), + &solution_size)); + // Sort the solutions in ascending order to make the solution vector deterministic across runs + std::sort(solutions.begin(), solutions.end()); + + std::vector>>>> ret; + for (size_t i = 0; i < solutions.size(); ++i) { + auto callable = std::make_unique>(solutions[i]); + ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable))); + } + return ret; +} + +template +class RocblasGemmStridedBatchedOp : public Callable> { + public: + RocblasGemmStridedBatchedOp(int solution) : solution_{solution} {} + + TuningStatus Call(const GemmStridedBatchedParams* params) override { + auto input_output_type = RocBlasDataTypeFor(); + auto compute_type = RocBlasComputeTypeFor(); + auto h_a = DoCastForHalfOrBfloat16(params->alpha); + auto h_b = DoCastForHalfOrBfloat16(params->beta); + auto status = rocblas_gemm_strided_batched_ex( + (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(), + _rocblasOpFromChar(params->transa), + _rocblasOpFromChar(params->transb), + params->m, params->n, params->k, + &h_a, + params->a, input_output_type, params->lda, params->stride_a, + params->b, input_output_type, params->ldb, params->stride_b, + &h_b, + params->c, input_output_type, params->ldc, params->stride_c, + params->c, input_output_type, params->ldc, params->stride_c, + params->batch, + compute_type, + rocblas_gemm_algo_solution_index, + solution_, + rocblas_gemm_flags_none); + if (status != rocblas_status_success) { + return FAIL; + } + return OK; + } + + private: + int solution_; +}; + +template +auto GetRocBlasGemmStridedBatchedTypeStringAndOps() { + rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle(); + int solution_size; + auto input_output_type = RocBlasDataTypeFor(); + auto compute_type = RocBlasComputeTypeFor(); + // Get the number of available solutions + TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle, + input_output_type, + input_output_type, + compute_type, + rocblas_gemm_flags_none, + nullptr, + &solution_size)); + std::vector solutions(solution_size); + // Get the list of available solutions + TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle, + input_output_type, + input_output_type, + compute_type, + rocblas_gemm_flags_none, + solutions.data(), + &solution_size)); + // Sort the solutions in ascending order to make the solution vector deterministic across runs + std::sort(solutions.begin(), solutions.end()); + + std::vector>>>> ret; + for (size_t i = 0; i < solutions.size(); ++i) { + auto callable = std::make_unique>(solutions[i]); + ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable))); + } + return ret; +} + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h new file mode 100644 index 0000000000000000000000000000000000000000..69889cbbcbfc6f0b03da266f49e92723a38094ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h @@ -0,0 +1,34 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include + +#include + +namespace at::cuda::tunable { + +class StreamTimer : public ITimer { + public: + StreamTimer(); + virtual ~StreamTimer(); + + void Start() override; + + void End() override; + + float Duration() override; + + private: + cudaEvent_t start_; + cudaEvent_t end_; +}; + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/Tunable.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/Tunable.h new file mode 100644 index 0000000000000000000000000000000000000000..eb849a213fe5addf366c05d35b9593b605bb07fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/Tunable.h @@ -0,0 +1,205 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at::cuda::tunable { + +static void TunableLog(const std::string& msg) { + static const char *env = getenv("PYTORCH_TUNABLEOP_VERBOSE"); + if (env != nullptr && strcmp(env, "1") == 0) { + std::cerr << msg << std::endl; + } +} +#define TUNABLE_LOG(...) TunableLog(c10::str(__VA_ARGS__)) + +enum TuningStatus { + OK = 0, + FAIL = 1, + UNSUPPORTED = 2, +}; + +// Mapping from params signature to kernel id +class ResultEntry { + public: + explicit ResultEntry(const std::string& key, double time) : key_(key), time_(time) {} + bool operator==(const ResultEntry& other) { return key_ == other.key_; } + bool operator!=(const ResultEntry& other) { return key_ != other.key_; } + operator std::string () { return key_; } + friend std::ostream& operator<<(std::ostream& stream, const ResultEntry& entry); + static ResultEntry Null() { return ResultEntry("Null", 0.0); } + static ResultEntry Default() { return ResultEntry("Default", 0.0); } + + private: + std::string key_; + double time_; +}; + +typedef std::unordered_map KernelMap; +typedef std::unordered_map ResultsMap; + +struct TuningResults { + // Validates if these results are compatible with the libraries + std::unordered_map validators; + + // Mapping from Callable signature to Callable's tuning result + ResultsMap results; +}; + +class TuningResultsManager { + public: + TuningResultsManager() = default; + ~TuningResultsManager() = default; + + KernelMap Lookup(const std::string& op_signature); + + ResultEntry Lookup(const std::string& op_signature, const std::string& params_signature); + + inline void AddImpl(const std::string& op_signature, + const std::string& params_signature, + ResultEntry best, + KernelMap& kernel_map); + + void Add(const std::string& op_signature, + const std::string& params_signature, + ResultEntry best); + + void Delete(const std::string& op_signature, const std::string& params_signature); + + inline void DisjointMergeImpl( + const std::string& op_signature, + const KernelMap& kernel_map, + /*out*/ ResultsMap& results); + + void Load(const ResultsMap& results_to_load); + + ResultsMap Dump(); + + void DisjointMerge(const std::string& op_signature, const KernelMap& kernel_map); + + size_t GetSize(); + + private: + std::mutex lock_; + ResultsMap results_; +}; + +class TuningResultsValidator { + public: + using GetFunc = std::function; + using ValidateFunc = std::function; + using GetValidateFuncs = std::unordered_map>; + + TuningResultsValidator(); + ~TuningResultsValidator() = default; + + std::unordered_map GetAllValidators() const; + TuningStatus ValidateAll(const std::unordered_map& to_validate) const; + void RegisterValidator(const std::string& key, const GetFunc& gf, const ValidateFunc& vf); + + protected: + std::string GetPyTorchVersion() const; + TuningStatus ValidatePyTorchVersion(const std::string& value) const; + + public: + static constexpr const std::array mandatory_keys{"PT_VERSION"}; + + private: + GetValidateFuncs validators_; +}; + +class TuningContext { + public: + TuningContext(); + ~TuningContext(); + TuningContext(TuningContext &) = delete; + TuningContext(TuningContext &&) = delete; + TuningContext &operator=(TuningContext &) = delete; + TuningContext &operator=(TuningContext &&) = delete; + + void EnableTunableOp(); + void DisableTunableOp(); + bool IsTunableOpEnabled() const; + + void EnableTuning(); + void DisableTuning(); + bool IsTuningEnabled() const; + + void SetMaxTuningDurationMs(int max_duration_ms); + int GetMaxTuningDurationMs() const; + + void SetMaxTuningIterations(int max_iter); + int GetMaxTuningIterations() const; + + void SetMaxWarmupDurationMs(int max_duration_ms); + int GetMaxWarmupDurationMs() const; + + void SetMaxWarmupIterations(int max_iter); + int GetMaxWarmupIterations() const; + + void EnableTunableOpAndTuning(); + void DisableTunableOpAndTuning(); + + TuningResultsManager& GetTuningResultsManager(); + + TuningResultsValidator& GetTuningResultsValidator(); + + TuningResults GetTuningResults(); + + TuningStatus LoadTuningResults(const TuningResults& tr); + + void SetFilename(const std::string& filename); + std::string GetFilename() const; + + protected: + bool ReadFile(const std::string& filename); + bool WriteFile(const std::string& filename); + + private: + bool enable_; + bool tuning_enable_; + bool manager_initialized_; + int max_tuning_duration_ms_; + int max_tuning_iterations_; + int max_warmup_duration_ms_; + int max_warmup_iterations_; + mutable TuningResultsManager manager_; + mutable c10::once_flag manager_init_once_; + TuningResultsValidator validator_; + std::string filename_; + size_t results_count_from_input_file_; +}; + +TuningContext* getTuningContext(); + +class ITimer { + public: + ITimer() = default; + virtual ~ITimer() = default; + + virtual void Start() = 0; + virtual void End() = 0; + + /// Computes the elapsed time in milliseconds between Start() and End() + virtual float Duration() = 0; +}; + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba0d761277be90fcc6c1c988be378845b459742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h @@ -0,0 +1,278 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include +#ifdef USE_ROCM +#if ROCM_VERSION >= 50700 +#include +#endif +#include +#endif +#include +#include +#include +#include + +#ifdef USE_ROCM +#include +#endif + +#define STRINGIFY(s) #s +#define XSTRINGIFY(s) STRINGIFY(s) + +namespace at::cuda::tunable { + +template +class DefaultGemmOp : public Callable> { + public: + TuningStatus Call(const GemmParams* params) override { + at::cuda::blas::gemm_internal( + params->transa, params->transb, + params->m, params->n, params->k, + params->alpha, + params->a, params->lda, + params->b, params->ldb, + params->beta, + params->c, params->ldc); + return OK; + } +}; + +template +class DefaultGemmStridedBatchedOp : public Callable> { + public: + TuningStatus Call(const GemmStridedBatchedParams* params) override { + at::cuda::blas::bgemm_internal( + params->transa, params->transb, + params->m, params->n, params->k, + params->alpha, + params->a, params->lda, params->stride_a, + params->b, params->ldb, params->stride_b, + params->beta, + params->c, params->ldc, params->stride_c, + params->batch); + return OK; + } +}; + +template +bool IsZero(T v) { + return v == 0.0f; +} + +template <> +bool IsZero(BFloat16 v) { + return v.x == 0; +} + +template <> +bool IsZero(Half v) { + return float(v) == 0.0f; +} + +template <> +bool IsZero(c10::complex v) { + return v == 0.0; +} + +template <> +bool IsZero(c10::complex v) { + return v == 0.0f; +} + +template +std::string TypeName(T v) { + return "unknown"; +} + +template <> +std::string TypeName(float v) { + return "float"; +} + +template <> +std::string TypeName(double v) { + return "double"; +} + +template <> +std::string TypeName(BFloat16 v) { + return "BFloat16"; +} + +template <> +std::string TypeName(Half v) { + return "Half"; +} + +template <> +std::string TypeName(c10::complex v) { + return "c10::complex"; +} + +template <> +std::string TypeName(c10::complex v) { + return "c10::complex"; +} + + +template +class GemmTunableOp : public TunableOp, StreamTimer> { + public: + GemmTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + + auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); + +#ifdef USE_ROCM + for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + + if (validators.find("ROCM_VERSION") == validators.end()) { + std::string rocm_version = ROCM_BUILD_INFO; + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "ROCM_VERSION", + [rocm_version]() { return rocm_version; }, + [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; }); + } + + if (validators.find("GCN_ARCH_NAME") == validators.end()) { + std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName; + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "GCN_ARCH_NAME", + [gcn_arch_name]() { return gcn_arch_name; }, + [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; }); + } + + if (validators.find("ROCBLAS_VERSION") == validators.end()) { + std::string rocblas_version = c10::str( + XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".", + XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".", + XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-", + XSTRINGIFY(ROCBLAS_VERSION_TWEAK)); + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "ROCBLAS_VERSION", + [rocblas_version]() { return rocblas_version; }, + [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; }); + } +#endif + +#if defined(USE_ROCM) && ROCM_VERSION >= 50700 + static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); + if (env == nullptr || strcmp(env, "1") == 0) { + // disallow tuning of hipblaslt with c10::complex + if constexpr ( + !std::is_same_v> && + !std::is_same_v>) { + for (auto&& [name, op] : GetHipBlasLtGemmTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + + if (validators.find("HIPBLASLT_VERSION") == validators.end()) { + std::string hipblaslt_version = c10::str( + XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", + XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", + XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", + XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "HIPBLASLT_VERSION", + [hipblaslt_version]() { return hipblaslt_version; }, + [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); + } + } +#endif + } + + std::string Signature() override { + return c10::str("GemmTunableOp_", TypeName(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +template +class GemmStridedBatchedTunableOp : public TunableOp, StreamTimer> { + public: + GemmStridedBatchedTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + + auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); + +#ifdef USE_ROCM + for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + + if (validators.find("ROCM_VERSION") == validators.end()) { + std::string rocm_version = ROCM_BUILD_INFO; + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "ROCM_VERSION", + [rocm_version]() { return rocm_version; }, + [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; }); + } + + if (validators.find("GCN_ARCH_NAME") == validators.end()) { + std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName; + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "GCN_ARCH_NAME", + [gcn_arch_name]() { return gcn_arch_name; }, + [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; }); + } + + if (validators.find("ROCBLAS_VERSION") == validators.end()) { + std::string rocblas_version = c10::str( + XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".", + XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".", + XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-", + XSTRINGIFY(ROCBLAS_VERSION_TWEAK)); + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "ROCBLAS_VERSION", + [rocblas_version]() { return rocblas_version; }, + [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; }); + } +#endif + +#if defined(USE_ROCM) && ROCM_VERSION >= 50700 + static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); + if (env == nullptr || strcmp(env, "1") == 0) { + // disallow tuning of hipblaslt with c10::complex + if constexpr ( + !std::is_same_v> && + !std::is_same_v>) { + for (auto&& [name, op] : GetHipBlasLtGemmStridedBatchedTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + + if (validators.find("HIPBLASLT_VERSION") == validators.end()) { + std::string hipblaslt_version = c10::str( + XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", + XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", + XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", + XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); + getTuningContext()->GetTuningResultsValidator().RegisterValidator( + "HIPBLASLT_VERSION", + [hipblaslt_version]() { return hipblaslt_version; }, + [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); + } + } +#endif + } + + std::string Signature() override { + return c10::str("GemmStridedBatchedTunableOp_", TypeName(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +#undef XSTRINGIFY +#undef STRINGIFY + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h new file mode 100644 index 0000000000000000000000000000000000000000..65257974ab0cd11f369533b18decb36bfdb43d14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h @@ -0,0 +1,242 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include +#include + +#ifndef _WIN32 +#include +#endif + +#include +#include +#include +#include + +namespace at::cuda::tunable { + +template +class Callable { + public: + Callable() = default; + Callable(Callable&&) = default; + virtual ~Callable() = default; + virtual TuningStatus Call(const ParamsT*) { + return FAIL; + } + virtual TuningStatus IsSupported(const ParamsT* params) { + return Call(params); + } +}; + +template +class TunableOp { + public: + TunableOp() = default; + TunableOp(TunableOp&&) = default; + virtual ~TunableOp() = default; + + TuningStatus operator()(const ParamsT* params) { + ResultEntry result = ResultEntry::Null(); + TuningContext* ctx = getTuningContext(); + if (ctx->IsTunableOpEnabled()) { + auto& mgr = ctx->GetTuningResultsManager(); + auto op_sig = Signature(); + auto params_sig = params->Signature(); + result = mgr.Lookup(op_sig, params_sig); + // If there is not previous tuning result been found, we do the tuning iff tuning is enabled + if (result == ResultEntry::Null() && ctx->IsTuningEnabled()) { + result = FindFastest(params); + mgr.Add(op_sig, params_sig, result); + } + } + else { + result = ResultEntry::Default(); + } + if (result == ResultEntry::Null()) { + TUNABLE_LOG("no result, using default"); + result = ResultEntry::Default(); + } + auto iter = ops_.find(result); + TORCH_CHECK(iter != ops_.end()); + return iter->second->Call(params); + } + + virtual std::string Signature() { + // According to C++17 standard https://wg21.link/n4659 section 15.7.4 + // > if the operand of typeid refers to the + // > object under construction or destruction, typeid yields the std::type_info object representing the constructor + // > or destructor’s class. + // So delay the op signature generation. + c10::call_once(signature_init_once_, [this]() { signature_ = CreateSignature(); }); + return signature_; + } + + protected: + void RegisterOp(const std::string& name, std::unique_ptr> op) { + this->op_names_.emplace_back(name); + this->ops_.emplace(name, std::move(op)); + } + + private: + static void WarmUp(Callable *op, ParamsT* param, size_t num_iter) { + for (size_t i = 0; i < num_iter; i++) { + TORCH_CHECK(op->Call(param) == OK); + } + } + + static double Profile(Callable *op, ParamsT* param, size_t num_iter) { + TimerT timer{}; + timer.Start(); + for (size_t i = 0; i < num_iter; i++) { + TORCH_CHECK(op->Call(param) == OK); + } + timer.End(); + return timer.Duration() / num_iter; + } + + protected: + bool IsNumericsCheckEnabled() { + static const char *env = getenv("PYTORCH_TUNABLEOP_NUMERICAL_CHECK"); + if (env != nullptr && strcmp(env, "0") == 0) { + return false; + } + return true; + } + + virtual ResultEntry FindFastest(const ParamsT* params) { + TuningContext* ctx = getTuningContext(); + auto op_sig = Signature(); + auto params_sig = params->Signature(); + TUNABLE_LOG("finding fastest for ", op_sig, '(', params_sig, ')', " out of ", op_names_.size(), " candidates"); + auto min_duration_ms = std::numeric_limits::infinity(); + std::string id_name = "Default"; + + // calcaulte a reference answer for numerical check + ParamsT* reference_params = params->DeepCopy(); + TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK); + + // need a copy of params to reuse + ParamsT* reusable_params = params->DeepCopy(); + + for (size_t i = 0; i < op_names_.size(); i++) { + auto* candidate = ops_[op_names_[i]].get(); // borrow pointer + auto status = candidate->Call(reusable_params); + if (status != OK) { + TUNABLE_LOG("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + + if (IsNumericsCheckEnabled()) { + ParamsT* numerical_params = params->DeepCopy(); + WarmUp(candidate, numerical_params, 1); + status = reference_params->NumericalCheck(numerical_params); + numerical_params->Delete(); + if (status != OK) { + TUNABLE_LOG("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + } + + // collect a small profile + constexpr const int approx_num_iter = 3; + auto approx_duration = Profile(candidate, reusable_params, approx_num_iter); + // bail if too slow + if (approx_duration > 2 * min_duration_ms) { + TUNABLE_LOG("├──skip slow instance id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + + // for warmup does user set max duration, max iters, or both? + double max_warmup_duration = ctx->GetMaxWarmupDurationMs(); + int max_warmup_iter = ctx->GetMaxWarmupIterations(); + int warmup_iter = 1; // default + if (max_warmup_duration > 0) { + int duration_iters = max_warmup_duration / approx_duration; + if (max_warmup_iter > 0) { + warmup_iter = std::min(max_warmup_iter, duration_iters); + } + else { + warmup_iter = duration_iters; + } + } + else if (max_warmup_iter > 0) { + warmup_iter = max_warmup_iter; + } + + // for tuning does user set max duration, max iters, or both? + double max_tuning_duration = ctx->GetMaxTuningDurationMs(); + int max_tuning_iter = ctx->GetMaxTuningIterations(); + int tuning_iter = 100; // default + if (max_tuning_duration > 0) { + int duration_iters = max_tuning_duration / approx_duration; + if (max_tuning_iter > 0) { + tuning_iter = std::min(max_tuning_iter, duration_iters); + } + else { + tuning_iter = duration_iters; + } + } + else if (max_tuning_iter > 0) { + tuning_iter = max_tuning_iter; + } + + // do the full warmup followed by tuning + double warmup_ms = warmup_iter * approx_duration; + double tuning_ms = tuning_iter * approx_duration; + TUNABLE_LOG("├──tuning using " + "warmup iters ", warmup_iter, " [", warmup_ms, " ms] " + "and tuning iters ", tuning_iter, " [", tuning_ms, " ms] ", + "instance id=", i, ", ", op_sig, "(", params_sig, ") ", op_names_[i]); + WarmUp(candidate, reusable_params, warmup_iter); + auto duration_ms = Profile(candidate, reusable_params, tuning_iter); + if (duration_ms < min_duration_ms) { + TUNABLE_LOG("├──found better instance id=", i, ". " , duration_ms, "ms. ", op_names_[i]); + min_duration_ms = duration_ms; + id_name = op_names_[i]; + } + } + + reusable_params->Delete(); + reference_params->Delete(); + + TUNABLE_LOG("└──found fastest for ", op_sig, '(', params_sig, ") ", id_name); + return ResultEntry(id_name, min_duration_ms); + } + + private: + std::string CreateSignature() { +#ifndef _WIN32 + const auto* name = typeid(*this).name(); + char buf[256]; + size_t buf_len = 256; + abi::__cxa_demangle(name, buf, &buf_len, nullptr); + buf[255] = '\0'; + return buf; +#else + return typeid(*this).name(); +#endif + } + + mutable c10::once_flag signature_init_once_; + std::string signature_; + + std::unordered_map>> ops_; + std::vector op_names_; +}; + +struct OpParams { + OpParams() {} + virtual ~OpParams() = default; + virtual std::string Signature() const = 0; +}; + +} // namespace at::cuda::tunable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..8c0dfea6e2b49aa6feccb3cdcafb488e0967343f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h @@ -0,0 +1,546 @@ +#ifndef C10_MACROS_MACROS_H_ +#define C10_MACROS_MACROS_H_ +#include + +/* Main entry for c10/macros. + * + * In your code, include c10/macros/Macros.h directly, instead of individual + * files in this folder. + */ + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#include + +#if defined(__clang__) +#define __ubsan_ignore_float_divide_by_zero__ \ + __attribute__((no_sanitize("float-divide-by-zero"))) +#define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined"))) +#define __ubsan_ignore_signed_int_overflow__ \ + __attribute__((no_sanitize("signed-integer-overflow"))) +#define __ubsan_ignore_pointer_overflow__ \ + __attribute__((no_sanitize("pointer-overflow"))) +#define __ubsan_ignore_function__ __attribute__((no_sanitize("function"))) +#else +#define __ubsan_ignore_float_divide_by_zero__ +#define __ubsan_ignore_undefined__ +#define __ubsan_ignore_signed_int_overflow__ +#define __ubsan_ignore_pointer_overflow__ +#define __ubsan_ignore_function__ +#endif + +// Detect address sanitizer as some stuff doesn't work with it +#undef C10_ASAN_ENABLED + +// for clang +#if defined(__has_feature) +#if ((__has_feature(address_sanitizer))) +#define C10_ASAN_ENABLED 1 +#endif +#endif + +// for gcc +#if defined(__SANITIZE_ADDRESS__) +#if __SANITIZE_ADDRESS__ +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 1 +#endif +#endif +#endif + +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 0 +#endif + +// Disable the copy and assignment operator for a class. Note that this will +// disable the usage of the class in std containers. +#define C10_DISABLE_COPY_AND_ASSIGN(classname) \ + classname(const classname&) = delete; \ + classname& operator=(const classname&) = delete + +#define C10_CONCATENATE_IMPL(s1, s2) s1##s2 +#define C10_CONCATENATE(s1, s2) C10_CONCATENATE_IMPL(s1, s2) + +#define C10_MACRO_EXPAND(args) args + +#define C10_STRINGIZE_IMPL(x) #x +#define C10_STRINGIZE(x) C10_STRINGIZE_IMPL(x) + +/** + * C10_ANONYMOUS_VARIABLE(str) introduces an identifier starting with + * str and ending with a number that varies with the line. + */ +#ifdef __COUNTER__ +#define C10_UID __COUNTER__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __COUNTER__) +#else +#define C10_UID __LINE__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __LINE__) +#endif + +#ifdef __has_cpp_attribute +#define C10_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define C10_HAS_CPP_ATTRIBUTE(x) (0) +#endif + +/// C10_NODISCARD - Warn if a type or return value is discarded. + +// Technically, we should check if __cplusplus > 201402L here, because +// [[nodiscard]] is only defined in C++17. However, some compilers +// we care about don't advertise being C++17 (e.g., clang), but +// support the attribute anyway. In fact, this is not just a good idea, +// it's the law: clang::warn_unused_result doesn't work on nvcc + clang +// and the best workaround for this case is to use [[nodiscard]] +// instead; see https://github.com/pytorch/pytorch/issues/13118 +// +// Note to future editors: if you have noticed that a compiler is +// misbehaving (e.g., it advertises support, but the support doesn't +// actually work, or it is emitting warnings). Some compilers which +// are strict about the matter include MSVC, which will complain: +// +// error C2429: attribute 'nodiscard' requires compiler flag '/std:c++latest' +// +// Exhibits: +// - MSVC 19.14: https://godbolt.org/z/Dzd7gn (requires /std:c++latest) +// - Clang 8.0.0: https://godbolt.org/z/3PYL4Z (always advertises support) +// - gcc 8.3: https://godbolt.org/z/4tLMQS (always advertises support) +#if C10_HAS_CPP_ATTRIBUTE(nodiscard) +#define C10_NODISCARD [[nodiscard]] +// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious +// error when __has_cpp_attribute is given a scoped attribute in C mode. +#elif __cplusplus && C10_HAS_CPP_ATTRIBUTE(clang::warn_unused_result) +// TODO: It's possible this is still triggering +// https://github.com/pytorch/pytorch/issues/13118 on Windows; if it is, better +// fix it. +#define C10_NODISCARD [[clang::warn_unused_result]] +#else +#define C10_NODISCARD +#endif + +// suppress an unused variable. +#if defined(_MSC_VER) && !defined(__clang__) +#define C10_UNUSED __pragma(warning(suppress : 4100 4101)) +#else +#define C10_UNUSED __attribute__((__unused__)) +#endif //_MSC_VER + +#if !defined(__has_attribute) +#define __has_attribute(x) 0 +#endif + +// Direct port of LLVM_ATTRIBUTE_USED. +#if __has_attribute(used) +#define C10_USED __attribute__((__used__)) +#else +#define C10_USED +#endif + +#define C10_RESTRICT __restrict + +// Simply define the namespace, in case a dependent library want to refer to +// the c10 namespace but not any nontrivial files. +namespace c10 {} +namespace c10::cuda {} +namespace c10::hip {} +namespace c10::xpu {} + +// Since C10 is the core library for caffe2 (and aten), we will simply reroute +// all abstractions defined in c10 to be available in caffe2 as well. +// This is only for backwards compatibility. Please use the symbols from the +// c10 namespace where possible. +namespace caffe2 { +using namespace c10; +} +namespace at { +using namespace c10; +} +namespace at::cuda { +using namespace c10::cuda; +} // namespace at::cuda + +// WARNING!!! THIS IS A GIANT HACK!!! +// This line means you cannot simultaneously include c10/hip +// and c10/cuda and then use them from the at::cuda namespace. +// This is true in practice, because HIPIFY works inplace on +// files in ATen/cuda, so it assumes that c10::hip is available +// from at::cuda. This namespace makes that happen. When +// HIPIFY is no longer out-of-place, we can switch the cuda +// here to hip and everyone is happy. +namespace at::cuda { +using namespace c10::hip; +} // namespace at::cuda + +namespace at::xpu { +using namespace c10::xpu; +} // namespace at::xpu + +// C10_LIKELY/C10_UNLIKELY +// +// These macros provide parentheses, so you can use these macros as: +// +// if C10_LIKELY(some_expr) { +// ... +// } +// +// NB: static_cast to boolean is mandatory in C++, because __builtin_expect +// takes a long argument, which means you may trigger the wrong conversion +// without it. +// +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define C10_LIKELY(expr) (__builtin_expect(static_cast(expr), 1)) +#define C10_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) +#else +#define C10_LIKELY(expr) (expr) +#define C10_UNLIKELY(expr) (expr) +#endif + +/// C10_NOINLINE - Functions whose declaration is annotated with this will not +/// be inlined. +#ifdef __GNUC__ +#define C10_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define C10_NOINLINE __declspec(noinline) +#else +#define C10_NOINLINE +#endif + +#if defined(_MSC_VER) +#define C10_ALWAYS_INLINE __forceinline +#elif __has_attribute(always_inline) || defined(__GNUC__) +#define C10_ALWAYS_INLINE __attribute__((__always_inline__)) inline +#else +#define C10_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define C10_ATTR_VISIBILITY_HIDDEN +#elif defined(__GNUC__) +#define C10_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden"))) +#else +#define C10_ATTR_VISIBILITY_HIDDEN +#endif + +#define C10_ERASE C10_ALWAYS_INLINE C10_ATTR_VISIBILITY_HIDDEN + +#include + +#ifdef __HIPCC__ +// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work. +// We do this #include here so that C10_HOST_DEVICE and friends will Just Work. +// See https://github.com/ROCm-Developer-Tools/HIP/issues/441 +#include +#endif + +#if defined(__CUDACC__) || defined(__HIPCC__) +// Designates functions callable from the host (CPU) and the device (GPU) +#define C10_HOST_DEVICE __host__ __device__ +#define C10_DEVICE __device__ +#define C10_HOST __host__ +// constants from +// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications) +// The maximum number of threads per multiprocessor is 1024 for Turing +// architecture (7.5), 1536 for Geforce Ampere (8.6)/Jetson Orin (8.7), and +// 2048 for all other architectures. You'll get warnings if you exceed these +// constants. Hence, the following macros adjust the input values from the user +// to resolve potential warnings. +#if __CUDA_ARCH__ == 750 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; +#elif __CUDA_ARCH__ == 860 || __CUDA_ARCH__ == 870 || __CUDA_ARCH__ == 890 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1536; +#else +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; +#endif +// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently +constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; +// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block +// size. 256 is a good number for this fallback and should give good occupancy +// and versatility across all architectures. +constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; +// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it +// turns out that although __launch_bounds__ can take constexpr, it +// can't take a constexpr that has anything to do with templates. +// Currently we use launch_bounds that depend on template arguments in +// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK +// and C10_MIN_BLOCKS_PER_SM are kept as macros. +// Suppose you were planning to write __launch_bounds__(a, b), based on your +// performance tuning on a modern GPU. Instead, you should write +// __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)), +// which will also properly respect limits on old architectures. +#define C10_MAX_THREADS_PER_BLOCK(val) \ + (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ + : CUDA_THREADS_PER_BLOCK_FALLBACK) +#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ + ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ + ? (blocks_per_sm) \ + : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ + (threads_per_block)))) +// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__ +#define C10_LAUNCH_BOUNDS_0 \ + __launch_bounds__( \ + 256, 4) // default launch bounds that should give good occupancy and + // versatility across all architectures. +#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) \ + __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block)))) +#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ + __launch_bounds__( \ + (C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ + (C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm)))) +#else +#define C10_HOST_DEVICE +#define C10_HOST +#define C10_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_HIP_HOST_DEVICE __host__ __device__ +#else +#define C10_HIP_HOST_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_WARP_SIZE warpSize // = 64 or 32 (Defined in hip_runtime.h) +#else +#define C10_WARP_SIZE 32 +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +// CUDA_KERNEL_ASSERT checks the assertion +// even when NDEBUG is defined. This is useful for important assertions in CUDA +// code that would otherwise be suppressed when building Release. +#if defined(__ANDROID__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + (defined(USE_ROCM) && ROCM_VERSION < 40100) +// Those platforms do not support assert() +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#elif defined(_MSC_VER) +#if defined(NDEBUG) +extern "C" { +C10_IMPORT +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void _wassert( + const wchar_t* wexpr, + const wchar_t* wfile, + unsigned line); +#else +#if defined(__CUDA_ARCH__) +__host__ __device__ +#endif // __CUDA_ARCH__ + void + _wassert(wchar_t const* _Message, wchar_t const* _File, unsigned _Line); +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert( \ + _CRT_WIDE(#cond), \ + _CRT_WIDE(__FILE__), \ + static_cast(__LINE__)), \ + 0); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert( \ + _CRT_WIDE(#cond), \ + _CRT_WIDE(__FILE__), \ + static_cast(__LINE__)), \ + 0); \ + } +#else // __APPLE__, _MSC_VER +#if defined(NDEBUG) +extern "C" { +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void __assert_fail( + const char* expr, + const char* file, + unsigned int line, + const char* func); +#else // __SYCL_DEVICE_ONLY__ +#if (defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__))) +// CUDA supports __assert_fail function which are common for both device +// and host side code. +__host__ __device__ +#endif + + // This forward declaration matching the declaration of __assert_fail + // exactly how it is in glibc in case parts of the program are compiled with + // different NDEBUG settings. Otherwise we might get 'ambiguous declaration' + // error. Note: On ROCm - this declaration serves for host side compilation. + void + __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function) noexcept __attribute__((__noreturn__)); + +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +// ROCm disable kernel assert by default +#if !defined(C10_USE_ROCM_KERNEL_ASSERT) and defined(USE_ROCM) +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#else +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#endif // C10_USE_ROCM_KERNEL_ASSERT and USE_ROCM +#endif // __APPLE__ + +#ifdef __APPLE__ +#include +#endif + +#if defined(__ANDROID__) +#define C10_ANDROID 1 +#define C10_MOBILE 1 +#elif ( \ + defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE)) +#define C10_IOS 1 +#define C10_MOBILE 1 +#endif // ANDROID / IOS + +#if defined(C10_MOBILE) && C10_MOBILE +#define C10_ALWAYS_INLINE_UNLESS_MOBILE inline +#else +#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE +#endif + +#if defined(__CUDA_ARCH__) +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +// Note [static constexpr char* members for windows NVCC] +// The Windows NVCC compiler doesn't handle static constexpr class members, +// although it's fixed in a later version. +// (see +// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425) +// +// If we want to ensure that our field is static under all builds, then we need +// to work around it specifically for windows NVCC by making it (a) const, (b) +// defined outside of the class definition We need to define it outside of the +// class definition because of the C++ standard; char* is not an integral type +// (see +// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in) +// +// So instead of this: +// struct Foo { +// static constexpr const char* name = "foo"; +// } +// In Windows NVCC, we end up with this: +// struct Foo { +// static const char* name; +// } +// const char* Foo::name = "foo"; +// +// This gives us a small perf hit for any code that wants to access these field +// members, but right now it isn't used in any perf-critical code paths. +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#else +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#endif + +#ifndef HAS_DEMANGLE +#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__) +#define HAS_DEMANGLE 0 +#elif defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE) +#define HAS_DEMANGLE 0 +#else +#define HAS_DEMANGLE 1 +#endif +#endif // HAS_DEMANGLE + +#define _C10_PRAGMA__(string) _Pragma(#string) +#define _C10_PRAGMA_(string) _C10_PRAGMA__(string) + +#ifdef __clang__ +#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push") +#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \ + _C10_PRAGMA_(clang diagnostic ignored flag) +#define C10_CLANG_HAS_WARNING(flag) __has_warning(flag) +#else +#define C10_CLANG_DIAGNOSTIC_PUSH() +#define C10_CLANG_DIAGNOSTIC_POP() +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) +#define C10_CLANG_HAS_WARNING(flag) 0 +#endif + +#ifdef __clang__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(clang diagnostic push) \ + _C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \ + _C10_PRAGMA_(clang diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop) + +#elif __GNUC__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(GCC diagnostic push) \ + _C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \ + _C10_PRAGMA_(GCC diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop) + +#else + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) +#define C10_DIAGNOSTIC_POP() + +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Array.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Array.h new file mode 100644 index 0000000000000000000000000000000000000000..b6531429d0140aa061947e622bc541ebbc65246e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Array.h @@ -0,0 +1,16 @@ +#include +#include + +namespace c10 { + +// This helper function creates a constexpr std::array +// From a compile time list of values, without requiring you to explicitly +// write out the length. +// +// See also https://stackoverflow.com/a/26351760/23845 +template +inline constexpr auto array_of(T&&... t) -> std::array { + return {{std::forward(t)...}}; +} + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h new file mode 100644 index 0000000000000000000000000000000000000000..9832cf36d5c52b33eae355c1fc1d3e8772aa2e01 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/FbcodeMaps.h @@ -0,0 +1,29 @@ +#ifndef C10_UTIL_FBCODEMAPS_H_ +#define C10_UTIL_FBCODEMAPS_H_ + +// Map typedefs so that we can use folly's F14 maps in fbcode without +// taking a folly dependency. + +#ifdef FBCODE_CAFFE2 +#include +#include +#else +#include +#include +#endif + +namespace c10 { +#ifdef FBCODE_CAFFE2 +template +using FastMap = folly::F14FastMap; +template +using FastSet = folly::F14FastSet; +#else +template +using FastMap = std::unordered_map; +template +using FastSet = std::unordered_set; +#endif +} // namespace c10 + +#endif // C10_UTIL_FBCODEMAPS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..291899a39537ae356887f0099fa45a03d82b8eac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h @@ -0,0 +1,274 @@ +#pragma once + +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/// Constructors + +inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value) + : x(detail::fp8e4m3fn_from_fp32_value(value)) {} + +/// Implicit conversions + +inline C10_HOST_DEVICE Float8_e4m3fn::operator float() const { + return detail::fp8e4m3fn_to_fp32_value(x); +} + +/// Special values helper + +inline C10_HOST_DEVICE bool Float8_e4m3fn::isnan() const { + return (x & 0b01111111) == 0b01111111; +} + +/// Arithmetic + +inline C10_HOST_DEVICE Float8_e4m3fn +operator+(const Float8_e4m3fn& a, const Float8_e4m3fn& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn +operator-(const Float8_e4m3fn& a, const Float8_e4m3fn& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn +operator*(const Float8_e4m3fn& a, const Float8_e4m3fn& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn operator/( + const Float8_e4m3fn& a, + const Float8_e4m3fn& b) __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn operator-(const Float8_e4m3fn& a) { + return -static_cast(a); +} + +inline C10_HOST_DEVICE Float8_e4m3fn& operator+=( + Float8_e4m3fn& a, + const Float8_e4m3fn& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE Float8_e4m3fn& operator-=( + Float8_e4m3fn& a, + const Float8_e4m3fn& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE Float8_e4m3fn& operator*=( + Float8_e4m3fn& a, + const Float8_e4m3fn& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE Float8_e4m3fn& operator/=( + Float8_e4m3fn& a, + const Float8_e4m3fn& b) { + a = a / b; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(Float8_e4m3fn a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(Float8_e4m3fn a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(Float8_e4m3fn a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(Float8_e4m3fn a, float b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, Float8_e4m3fn b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, Float8_e4m3fn b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, Float8_e4m3fn b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, Float8_e4m3fn b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e4m3fn& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e4m3fn& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e4m3fn& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e4m3fn& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(Float8_e4m3fn a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(Float8_e4m3fn a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(Float8_e4m3fn a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(Float8_e4m3fn a, double b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, Float8_e4m3fn b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, Float8_e4m3fn b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, Float8_e4m3fn b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, Float8_e4m3fn b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn operator+(int a, Float8_e4m3fn b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator-(int a, Float8_e4m3fn b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator*(int a, Float8_e4m3fn b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator/(int a, Float8_e4m3fn b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e4m3fn operator+(int64_t a, Float8_e4m3fn b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator-(int64_t a, Float8_e4m3fn b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator*(int64_t a, Float8_e4m3fn b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Float8_e4m3fn operator/(int64_t a, Float8_e4m3fn b) { + return static_cast(a) / b; +} + +/// NOTE: we do not define comparisons directly and instead rely on the implicit +/// conversion from c10::Float8_e4m3fn to float. + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = false; + static constexpr auto has_denorm = true; + static constexpr auto has_denorm_loss = true; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 4; + static constexpr int digits10 = 0; + static constexpr int max_digits10 = 3; + static constexpr int radix = 2; + static constexpr int min_exponent = -5; + static constexpr int min_exponent10 = -1; + static constexpr int max_exponent = 8; + static constexpr int max_exponent10 = 2; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = false; + + static constexpr c10::Float8_e4m3fn min() { + return c10::Float8_e4m3fn(0x08, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn lowest() { + return c10::Float8_e4m3fn(0xFE, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn max() { + return c10::Float8_e4m3fn(0x7E, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn epsilon() { + return c10::Float8_e4m3fn(0x20, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn round_error() { + return c10::Float8_e4m3fn(0x30, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn quiet_NaN() { + return c10::Float8_e4m3fn(0x7F, c10::Float8_e4m3fn::from_bits()); + } + static constexpr c10::Float8_e4m3fn denorm_min() { + return c10::Float8_e4m3fn(0x01, c10::Float8_e4m3fn::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h new file mode 100644 index 0000000000000000000000000000000000000000..a952b8330e739beff067aa7821cc187b7c7f4b0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn.h @@ -0,0 +1,246 @@ +#pragma once + +/// Defines the Float8_e4m3fn type (8-bit floating-point) including conversions +/// to standard C types and basic arithmetic operations. Note that arithmetic +/// operations are implemented by converting to floating point and +/// performing the operation in float32. +/// Binary configuration: +/// s eeee mmm +/// 1 sign bit +/// 4 exponent bits +/// 3 mantissa bits +/// bias = 7 +/// +/// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf +/// and inspired by Half implementation from pytorch/c10/util/Half.h + +#include +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include // operator typeid + +namespace c10 { + +namespace detail { + +/* + * Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +inline C10_HOST_DEVICE float fp8e4m3fn_to_fp32_value(uint8_t input) { + /* + * Extend the fp8 E4M3FN number to 32 bits and shift to the + * upper part of the 32-bit word: + * +---+----+---+-----------------------------+ + * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000| + * +---+----+---+-----------------------------+ + * Bits 31 27-30 24-26 0-23 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 + * - zero bits. + */ + const uint32_t w = (uint32_t)input << 24; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the bits 0-30 + * of the 32-bit word: + * + * +---+----+---+-----------------------------+ + * | S |EEEE|MMM|0000 0000 0000 0000 0000 0000| + * +---+----+---+-----------------------------+ + * Bits 31 27-30 24-26 0-23 + */ + const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF); + /* + * Renorm shift is the number of bits to shift mantissa left to make the + * half-precision number normalized. If the initial number is normalized, some + * of its high 5 bits (sign == 0 and 4-bit exponent) equals one. In this case + * renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note + * that if we shift denormalized nonsign by renorm_shift, the unit bit of + * mantissa will shift into exponent, turning the biased exponent into 1, and + * making mantissa normalized (i.e. without leading 1). + */ +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + uint32_t renorm_shift = __clz(nonsign); +#elif defined(__SYCL_DEVICE_ONLY__) + // Note: zero is not a supported input into `__builtin_clz` + uint32_t renorm_shift = + nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT; +#elif defined(_MSC_VER) + unsigned long nonsign_bsr; + _BitScanReverse(&nonsign_bsr, (unsigned long)nonsign); + uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31; +#else + // Note: zero is not a supported input into `__builtin_clz` + uint32_t renorm_shift = + nonsign != 0 ? __builtin_clz(nonsign) : sizeof(uint32_t) * CHAR_BIT; +#endif + renorm_shift = renorm_shift > 4 ? renorm_shift - 4 : 0; + /* + * Iff fp8e4m3fn number has all exponent and mantissa bits set to 1, + * the addition overflows it into bit 31, and the subsequent shift turns the + * high 9 bits into 1. Thus inf_nan_mask == 0x7F800000 if the fp8e4m3fn number + * is Nan, 0x00000000 otherwise + */ + const int32_t inf_nan_mask = + ((int32_t)(nonsign + 0x01000000) >> 8) & INT32_C(0x7F800000); + /* + * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31 + * into 1. Otherwise, bit 31 remains 0. The signed shift right by 31 + * broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask == + * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h) + * 0x00000000 otherwise + */ + const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31; + /* + * 1. Shift nonsign left by renorm_shift to normalize it (if the input + * was denormal) + * 2. Shift nonsign right by 4 so the exponent (4 bits originally) + * becomes an 8-bit field and 3-bit mantissa shifts into the 3 high + * bits of the 23-bit mantissa of IEEE single-precision number. + * 3. Add 0x78 to the exponent (starting at bit 23) to compensate the + * different in exponent bias (0x7F for single-precision number less 0x07 + * for fp8e4m3fn number). + * 4. Subtract renorm_shift from the exponent (starting at bit 23) to + * account for renormalization. As renorm_shift is less than 0x78, this + * can be combined with step 3. + * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the + * input was NaN or infinity. + * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent + * into zero if the input was zero. + * 7. Combine with the sign of the input number. + */ + uint32_t result = sign | + ((((nonsign << renorm_shift >> 4) + ((0x78 - renorm_shift) << 23)) | + inf_nan_mask) & + ~zero_mask); + return fp32_from_bits(result); +} + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E4M3FN format, in bit representation. + */ +inline C10_HOST_DEVICE uint8_t fp8e4m3fn_from_fp32_value(float f) { + /* + * Binary representation of 480.0f, which is the first value + * not representable in fp8e4m3fn range: + * 0 1111 111 - fp8e4m3fn + * 0 10000111 11100000000000000000000 - fp32 + */ + constexpr uint32_t fp8_max = UINT32_C(1087) << 20; + + /* + * A mask for converting fp32 numbers lower than fp8e4m3fn normal range + * into denorm representation + * magic number: ((127 - 7) + (23 - 3) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(141) << 23; + + uint32_t f_bits = fp32_to_bits(f); + + uint8_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fp8_max) { + // NaN - all exponent and mantissa bits set to 1 + result = 0x7f; + } else { + if (f_bits < (UINT32_C(121) << 23)) { + // Input number is smaller than 2^(-6), which is the smallest + // fp8e4m3fn normal number + f_bits = + fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + } else { + // resulting mantissa is odd + uint8_t mant_odd = (f_bits >> 20) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(7 - 127) << 23) + 0x7FFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 20); + } + } + + result |= static_cast(sign >> 24); + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e4m3fn { + uint8_t x; + + struct from_bits_t {}; + C10_HOST_DEVICE static constexpr from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e4m3fn() = default; + + constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE Float8_e4m3fn(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; +}; + +C10_API std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h new file mode 100644 index 0000000000000000000000000000000000000000..e51630d1bd71e1653011247de4b72a8efd87e0fd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz.h @@ -0,0 +1,136 @@ +#pragma once + +/// Defines the Float8_e4m3fnuz type (8-bit floating-point) including +/// conversions to standard C types and basic arithmetic operations. Note that +/// arithmetic operations are implemented by converting to floating point and +/// performing the operation in float32. +/// Binary configuration remains the same as Float8_e4m3fn: +/// s eeee mmm +/// 1 sign bit +/// 4 exponent bits +/// 3 mantissa bits +/// The key differences versus Float8_e4m3fn are: +/// bias = 8 +/// no infinities or negative zero +/// NaN only when sign bit is 1, rest all 0s +/// +/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and +/// the existing Float8_e4m3fn implementation. + +#include +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#include +#include + +namespace c10 { + +namespace detail { + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E4M3FNUZ format, in bit representation. + */ +inline C10_HOST_DEVICE uint8_t fp8e4m3fnuz_from_fp32_value(float f) { + /* + * Binary representation of 256.0f, which is the first value not representable + * (i.e. the first value which would overflow in to the sign bit, resulting in + * a NaN) in fp8e4m3fnuz range: + * 1 0000 000 - fp8e4m3fnuz + * 0 10000111 00000000000000000000000 - fp32 + */ + constexpr uint32_t fnuz_max = UINT32_C(0x87) << 23; + + /* + * A mask for converting fp32 numbers lower than fp8e4m3fnuz normal range + * into denorm representation + * magic number: ((127 - 8) + (23 - 3) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(0x8C) << 23; + + uint32_t f_bits = fp32_to_bits(f); + + uint32_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fnuz_max) { + // NaN -- sign bit set to 1, rest 0s. + return 0x80; + } + + if (f_bits < (UINT32_C(0x78) << 23) /* 2^-7 in float32 */) { + // Input exponent is less than -7, the smallest e4m3fnuz exponent, so the + // number will become subnormal. + f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + if (result == 0) { + // fnuz types don't have negative zero. + return 0; + } + } else { + // resulting mantissa is odd + uint8_t mant_odd = (f_bits >> 20) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(8 - 127) << 23) + 0x7FFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 20); + } + + result |= sign >> 24; + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e4m3fnuz { + uint8_t x; + + struct from_bits_t {}; + C10_HOST_DEVICE static constexpr from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e4m3fnuz() = default; + + constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE Float8_e4m3fnuz(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; +}; + +C10_API std::ostream& operator<<( + std::ostream& out, + const Float8_e4m3fnuz& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Load.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Load.h new file mode 100644 index 0000000000000000000000000000000000000000..3a367fc9cf7fef244c45865d7ca200dd521f0bd2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Load.h @@ -0,0 +1,38 @@ +#pragma once +#include +#include + +namespace c10 { +namespace detail { + +template +struct LoadImpl { + C10_HOST_DEVICE static T apply(const void* src) { + return *reinterpret_cast(src); + } +}; + +template <> +struct LoadImpl { + C10_HOST_DEVICE static bool apply(const void* src) { + static_assert(sizeof(bool) == sizeof(char)); + // NOTE: [Loading boolean values] + // Protect against invalid boolean values by loading as a byte + // first, then converting to bool (see gh-54789). + return *reinterpret_cast(src); + } +}; + +} // namespace detail + +template +C10_HOST_DEVICE T load(const void* src) { + return c10::detail::LoadImpl::apply(src); +} + +template +C10_HOST_DEVICE scalar_t load(const scalar_t* src) { + return c10::detail::LoadImpl::apply(src); +} + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ParallelGuard.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ParallelGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..e28289745ae525fa3cda22373e509700ba62a6bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ParallelGuard.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace c10 { + +// RAII thread local guard that tracks whether code is being executed in +// `at::parallel_for` or `at::parallel_reduce` loop function. +class C10_API ParallelGuard { + public: + static bool is_enabled(); + + ParallelGuard(bool state); + ~ParallelGuard(); + + private: + bool previous_state_; +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h new file mode 100644 index 0000000000000000000000000000000000000000..157a4f4be28dc591e8297547ffb03447e3f41494 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h @@ -0,0 +1,211 @@ +#ifndef C10_UTIL_STRINGUTIL_H_ +#define C10_UTIL_STRINGUTIL_H_ + +#include +#include +#include + +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace c10 { + +namespace detail { + +// Obtains the base name from a full path. +C10_API std::string StripBasename(const std::string& full_path); + +C10_API std::string ExcludeFileExtension(const std::string& full_path); + +struct CompileTimeEmptyString { + operator const std::string&() const { + static const std::string empty_string_literal; + return empty_string_literal; + } + operator const char*() const { + return ""; + } +}; + +template +struct CanonicalizeStrTypes { + using type = const T&; +}; + +template +// NOLINTNEXTLINE(*c-arrays*) +struct CanonicalizeStrTypes { + using type = const char*; +}; + +inline std::ostream& _str(std::ostream& ss) { + return ss; +} + +template +inline std::ostream& _str(std::ostream& ss, const T& t) { + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + ss << t; + return ss; +} + +// Overloads of _str for wide types; forces narrowing. +C10_API std::ostream& _str(std::ostream& ss, const wchar_t* wCStr); +C10_API std::ostream& _str(std::ostream& ss, const wchar_t& wChar); +C10_API std::ostream& _str(std::ostream& ss, const std::wstring& wString); + +template <> +inline std::ostream& _str( + std::ostream& ss, + const CompileTimeEmptyString&) { + return ss; +} + +template +inline std::ostream& _str(std::ostream& ss, const T& t, const Args&... args) { + return _str(_str(ss, t), args...); +} + +template +struct _str_wrapper final { + static std::string call(const Args&... args) { + std::ostringstream ss; + _str(ss, args...); + return ss.str(); + } +}; + +// Specializations for already-a-string types. +template <> +struct _str_wrapper final { + // return by reference to avoid the binary size of a string copy + static const std::string& call(const std::string& str) { + return str; + } +}; + +template <> +struct _str_wrapper final { + static const char* call(const char* str) { + return str; + } +}; + +// For c10::str() with an empty argument list (which is common in our assert +// macros), we don't want to pay the binary size for constructing and +// destructing a stringstream or even constructing a string. +template <> +struct _str_wrapper<> final { + static CompileTimeEmptyString call() { + return CompileTimeEmptyString(); + } +}; + +} // namespace detail + +// Convert a list of string-like arguments into a single string. +template +inline decltype(auto) str(const Args&... args) { + return detail::_str_wrapper< + typename detail::CanonicalizeStrTypes::type...>::call(args...); +} + +template +inline std::string Join(const std::string& delimiter, const Container& v) { + std::stringstream s; + int cnt = static_cast(v.size()) - 1; + for (auto i = v.begin(); i != v.end(); ++i, --cnt) { + s << (*i) << (cnt ? delimiter : ""); + } + return s.str(); +} + +// Replace all occurrences of "from" substring to "to" string. +// Returns number of replacements +size_t C10_API +ReplaceAll(std::string& s, c10::string_view from, c10::string_view to); + +/// Represents a location in source code (for debugging). +struct C10_API SourceLocation { + const char* function; + const char* file; + uint32_t line; +}; + +std::ostream& operator<<(std::ostream& out, const SourceLocation& loc); + +// unix isprint but insensitive to locale +inline static bool isPrint(char s) { + return s > 0x1f && s < 0x7f; +} + +inline void printQuotedString(std::ostream& stmt, const string_view str) { + stmt << "\""; + for (auto s : str) { + switch (s) { + case '\\': + stmt << "\\\\"; + break; + case '\'': + stmt << "\\'"; + break; + case '\"': + stmt << "\\\""; + break; + case '\a': + stmt << "\\a"; + break; + case '\b': + stmt << "\\b"; + break; + case '\f': + stmt << "\\f"; + break; + case '\n': + stmt << "\\n"; + break; + case '\r': + stmt << "\\r"; + break; + case '\t': + stmt << "\\t"; + break; + case '\v': + stmt << "\\v"; + break; + default: + if (isPrint(s)) { + stmt << s; + } else { + // C++ io has stateful formatting settings. Messing with + // them is probably worse than doing this manually. + // NOLINTNEXTLINE(*c-arrays*) + char buf[4] = "000"; + // NOLINTNEXTLINE(*narrowing-conversions) + buf[2] += s % 8; + s /= 8; + // NOLINTNEXTLINE(*narrowing-conversions) + buf[1] += s % 8; + s /= 8; + // NOLINTNEXTLINE(*narrowing-conversions) + buf[0] += s; + stmt << "\\" << buf; + } + break; + } + } + stmt << "\""; +} + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() + +#endif // C10_UTIL_STRINGUTIL_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Type.h new file mode 100644 index 0000000000000000000000000000000000000000..3bca940e807884ff89fb1c6fee6dcaf33f137118 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Type.h @@ -0,0 +1,30 @@ +#ifndef C10_UTIL_TYPE_H_ +#define C10_UTIL_TYPE_H_ + +#include +#include +#ifdef __GXX_RTTI +#include +#endif // __GXX_RTTI + +#include + +namespace c10 { + +/// Utility to demangle a C++ symbol name. +C10_API std::string demangle(const char* name); + +/// Returns the printable name of the type. +template +inline const char* demangle_type() { +#ifdef __GXX_RTTI + static const auto& name = *(new std::string(demangle(typeid(T).name()))); + return name.c_str(); +#else // __GXX_RTTI + return "(RTTI disabled, cannot show name)"; +#endif // __GXX_RTTI +} + +} // namespace c10 + +#endif // C10_UTIL_TYPE_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/bits.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/bits.h new file mode 100644 index 0000000000000000000000000000000000000000..89abf454791ef716c63eaaa68bdcb4df8ffc9381 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/bits.h @@ -0,0 +1,61 @@ +#pragma once +#include + +#include + +namespace c10 { + +/** + * bits1x8 is an uninterpreted dtype of a tensor with 1 bit (packed to byte + * boundary), without any semantics defined. + */ +struct alignas(1) bits1x8 { + using underlying = uint8_t; + uint8_t val_; + bits1x8() = default; + C10_HOST_DEVICE explicit bits1x8(uint8_t val) : val_(val) {} +}; + +/** + * bits2x4 is an uninterpreted dtype of a tensor with 2 bits (packed to byte + * boundary), without any semantics defined. + */ +struct alignas(1) bits2x4 { + using underlying = uint8_t; + uint8_t val_; + bits2x4() = default; + C10_HOST_DEVICE explicit bits2x4(uint8_t val) : val_(val) {} +}; + +/** + * bits4x2 is an uninterpreted dtype of a tensor with 4 bits (packed to byte + * boundary), without any semantics defined. + */ +struct alignas(1) bits4x2 { + using underlying = uint8_t; + uint8_t val_; + bits4x2() = default; + C10_HOST_DEVICE explicit bits4x2(uint8_t val) : val_(val) {} +}; + +/** + * bits8 is an uninterpreted dtype of a tensor with 8 bits, without any + * semantics defined. + */ +struct alignas(1) bits8 { + uint8_t val_; + bits8() = default; + C10_HOST_DEVICE explicit bits8(uint8_t val) : val_(val) {} +}; + +/** + * bits16 is an uninterpreted dtype of a tensor with 16 bits, without any + * semantics defined. + */ +struct alignas(2) bits16 { + uint16_t val_; + bits16() = default; + C10_HOST_DEVICE explicit bits16(uint16_t val) : val_(val) {} +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/complex_math.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/complex_math.h new file mode 100644 index 0000000000000000000000000000000000000000..2b591026c94dab06249f52899efb18ece6bc8624 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/complex_math.h @@ -0,0 +1,406 @@ +#if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) +#error \ + "c10/util/complex_math.h is not meant to be individually included. Include c10/util/complex.h instead." +#endif + +namespace c10_complex_math { + +// Exponential functions + +template +C10_HOST_DEVICE inline c10::complex exp(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::exp(static_cast>(x))); +#else + return static_cast>( + std::exp(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex log(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::log(static_cast>(x))); +#else + return static_cast>( + std::log(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex log10(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::log10(static_cast>(x))); +#else + return static_cast>( + std::log10(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex log2(const c10::complex& x) { + const c10::complex log2 = c10::complex(::log(2.0), 0.0); + return c10_complex_math::log(x) / log2; +} + +// Power functions +// +#if defined(_LIBCPP_VERSION) || \ + (defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX)) +namespace _detail { +C10_API c10::complex sqrt(const c10::complex& in); +C10_API c10::complex sqrt(const c10::complex& in); +C10_API c10::complex acos(const c10::complex& in); +C10_API c10::complex acos(const c10::complex& in); +} // namespace _detail +#endif + +template +C10_HOST_DEVICE inline c10::complex sqrt(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::sqrt(static_cast>(x))); +#elif !( \ + defined(_LIBCPP_VERSION) || \ + (defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX))) + return static_cast>( + std::sqrt(static_cast>(x))); +#else + return _detail::sqrt(x); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const c10::complex& x, + const c10::complex& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>(thrust::pow( + static_cast>(x), static_cast>(y))); +#else + return static_cast>(std::pow( + static_cast>(x), static_cast>(y))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const c10::complex& x, + const T& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::pow(static_cast>(x), y)); +#else + return static_cast>( + std::pow(static_cast>(x), y)); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const T& x, + const c10::complex& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::pow(x, static_cast>(y))); +#else + return static_cast>( + std::pow(x, static_cast>(y))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const c10::complex& x, + const c10::complex& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>(thrust::pow( + static_cast>(x), static_cast>(y))); +#else + return static_cast>(std::pow( + static_cast>(x), static_cast>(y))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const c10::complex& x, + const U& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::pow(static_cast>(x), y)); +#else + return static_cast>( + std::pow(static_cast>(x), y)); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex pow( + const T& x, + const c10::complex& y) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::pow(x, static_cast>(y))); +#else + return static_cast>( + std::pow(x, static_cast>(y))); +#endif +} + +// Trigonometric functions + +template +C10_HOST_DEVICE inline c10::complex sin(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::sin(static_cast>(x))); +#else + return static_cast>( + std::sin(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex cos(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::cos(static_cast>(x))); +#else + return static_cast>( + std::cos(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex tan(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::tan(static_cast>(x))); +#else + return static_cast>( + std::tan(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex asin(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::asin(static_cast>(x))); +#else + return static_cast>( + std::asin(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex acos(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::acos(static_cast>(x))); +#elif !defined(_LIBCPP_VERSION) + return static_cast>( + std::acos(static_cast>(x))); +#else + return _detail::acos(x); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex atan(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::atan(static_cast>(x))); +#else + return static_cast>( + std::atan(static_cast>(x))); +#endif +} + +// Hyperbolic functions + +template +C10_HOST_DEVICE inline c10::complex sinh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::sinh(static_cast>(x))); +#else + return static_cast>( + std::sinh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex cosh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::cosh(static_cast>(x))); +#else + return static_cast>( + std::cosh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex tanh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::tanh(static_cast>(x))); +#else + return static_cast>( + std::tanh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex asinh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::asinh(static_cast>(x))); +#else + return static_cast>( + std::asinh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex acosh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::acosh(static_cast>(x))); +#else + return static_cast>( + std::acosh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex atanh(const c10::complex& x) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return static_cast>( + thrust::atanh(static_cast>(x))); +#else + return static_cast>( + std::atanh(static_cast>(x))); +#endif +} + +template +C10_HOST_DEVICE inline c10::complex log1p(const c10::complex& z) { +#if defined(__APPLE__) || defined(__MACOSX) || defined(__CUDACC__) || \ + defined(__HIPCC__) + // For Mac, the new implementation yielded a high relative error. Falling back + // to the old version for now. + // See https://github.com/numpy/numpy/pull/22611#issuecomment-1667945354 + // For CUDA we also use this one, as thrust::log(thrust::complex) takes + // *forever* to compile + + // log1p(z) = log(1 + z) + // Let's define 1 + z = r * e ^ (i * a), then we have + // log(r * e ^ (i * a)) = log(r) + i * a + // With z = x + iy, the term r can be written as + // r = ((1 + x) ^ 2 + y ^ 2) ^ 0.5 + // = (1 + x ^ 2 + 2 * x + y ^ 2) ^ 0.5 + // So, log(r) is + // log(r) = 0.5 * log(1 + x ^ 2 + 2 * x + y ^ 2) + // = 0.5 * log1p(x * (x + 2) + y ^ 2) + // we need to use the expression only on certain condition to avoid overflow + // and underflow from `(x * (x + 2) + y ^ 2)` + T x = z.real(); + T y = z.imag(); + T zabs = std::abs(z); + T theta = std::atan2(y, x + T(1)); + if (zabs < 0.5) { + T r = x * (T(2) + x) + y * y; + if (r == 0) { // handle underflow + return {x, theta}; + } + return {T(0.5) * std::log1p(r), theta}; + } else { + T z0 = std::hypot(x + 1, y); + return {std::log(z0), theta}; + } +#else + // CPU path + // Based on https://github.com/numpy/numpy/pull/22611#issuecomment-1667945354 + c10::complex u = z + T(1); + if (u == T(1)) { + return z; + } else { + auto log_u = log(u); + if (u - T(1) == z) { + return log_u; + } + return log_u * (z / (u - T(1))); + } +#endif +} + +template +C10_HOST_DEVICE inline c10::complex expm1(const c10::complex& z) { + // expm1(z) = exp(z) - 1 + // Define z = x + i * y + // f = e ^ (x + i * y) - 1 + // = e ^ x * e ^ (i * y) - 1 + // = (e ^ x * cos(y) - 1) + i * (e ^ x * sin(y)) + // = (e ^ x - 1) * cos(y) - (1 - cos(y)) + i * e ^ x * sin(y) + // = expm1(x) * cos(y) - 2 * sin(y / 2) ^ 2 + i * e ^ x * sin(y) + T x = z.real(); + T y = z.imag(); + T a = std::sin(y / 2); + T er = std::expm1(x) * std::cos(y) - T(2) * a * a; + T ei = std::exp(x) * std::sin(y); + return {er, ei}; +} + +} // namespace c10_complex_math + +using c10_complex_math::acos; +using c10_complex_math::acosh; +using c10_complex_math::asin; +using c10_complex_math::asinh; +using c10_complex_math::atan; +using c10_complex_math::atanh; +using c10_complex_math::cos; +using c10_complex_math::cosh; +using c10_complex_math::exp; +using c10_complex_math::expm1; +using c10_complex_math::log; +using c10_complex_math::log10; +using c10_complex_math::log1p; +using c10_complex_math::log2; +using c10_complex_math::pow; +using c10_complex_math::sin; +using c10_complex_math::sinh; +using c10_complex_math::sqrt; +using c10_complex_math::tan; +using c10_complex_math::tanh; + +namespace std { + +using c10_complex_math::acos; +using c10_complex_math::acosh; +using c10_complex_math::asin; +using c10_complex_math::asinh; +using c10_complex_math::atan; +using c10_complex_math::atanh; +using c10_complex_math::cos; +using c10_complex_math::cosh; +using c10_complex_math::exp; +using c10_complex_math::expm1; +using c10_complex_math::log; +using c10_complex_math::log10; +using c10_complex_math::log1p; +using c10_complex_math::log2; +using c10_complex_math::pow; +using c10_complex_math::sin; +using c10_complex_math::sinh; +using c10_complex_math::sqrt; +using c10_complex_math::tan; +using c10_complex_math::tanh; + +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/flat_hash_map.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/flat_hash_map.h new file mode 100644 index 0000000000000000000000000000000000000000..8688510b2b81d090be95c7074ad9f7a9d89d9ea4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/flat_hash_map.h @@ -0,0 +1,2102 @@ +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator +// - make use of 'if constexpr' and eliminate AssignIfTrue template + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4624) // destructor was implicitly defined as deleted +#endif + +#ifdef _MSC_VER +#define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +#else +#define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +#endif + +namespace ska { +struct prime_number_hash_policy; +struct power_of_two_hash_policy; +struct fibonacci_hash_policy; + +namespace detailv3 { +template +struct functor_storage : Functor { + functor_storage() = default; + functor_storage(const Functor& functor) : Functor(functor) {} + template + Result operator()(Args&&... args) { + return static_cast(*this)(std::forward(args)...); + } + template + Result operator()(Args&&... args) const { + return static_cast(*this)(std::forward(args)...); + } +}; +template +struct functor_storage { + typedef Result (*function_ptr)(Args...); + function_ptr function; + functor_storage(function_ptr function) : function(function) {} + Result operator()(Args... args) const { + return function(std::forward(args)...); + } + operator function_ptr&() { + return function; + } + operator const function_ptr&() { + return function; + } +}; +template +struct KeyOrValueHasher : functor_storage { + typedef functor_storage hasher_storage; + KeyOrValueHasher() = default; + KeyOrValueHasher(const hasher& hash) : hasher_storage(hash) {} + uint64_t operator()(const key_type& key) { + return static_cast(*this)(key); + } + uint64_t operator()(const key_type& key) const { + return static_cast(*this)(key); + } + uint64_t operator()(const value_type& value) { + return static_cast(*this)(value.first); + } + uint64_t operator()(const value_type& value) const { + return static_cast(*this)(value.first); + } + template + uint64_t operator()(const std::pair& value) { + return static_cast(*this)(value.first); + } + template + uint64_t operator()(const std::pair& value) const { + return static_cast(*this)(value.first); + } +}; +template +struct KeyOrValueEquality : functor_storage { + typedef functor_storage equality_storage; + KeyOrValueEquality() = default; + KeyOrValueEquality(const key_equal& equality) : equality_storage(equality) {} + bool operator()(const key_type& lhs, const key_type& rhs) { + return static_cast(*this)(lhs, rhs); + } + bool operator()(const key_type& lhs, const value_type& rhs) { + return static_cast(*this)(lhs, rhs.first); + } + bool operator()(const value_type& lhs, const key_type& rhs) { + return static_cast(*this)(lhs.first, rhs); + } + bool operator()(const value_type& lhs, const value_type& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const key_type& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs, rhs.first); + } + template + bool operator()(const std::pair& lhs, const key_type& rhs) { + return static_cast(*this)(lhs.first, rhs); + } + template + bool operator()(const value_type& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const std::pair& lhs, const value_type& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } + template + bool operator()(const std::pair& lhs, const std::pair& rhs) { + return static_cast(*this)(lhs.first, rhs.first); + } +}; +static constexpr int8_t min_lookups = 4; +template +struct sherwood_v3_entry { + sherwood_v3_entry() = default; + sherwood_v3_entry(int8_t distance_from_desired) + : distance_from_desired(distance_from_desired) {} + ~sherwood_v3_entry() = default; + + bool has_value() const { + return distance_from_desired >= 0; + } + bool is_empty() const { + return distance_from_desired < 0; + } + bool is_at_desired_position() const { + return distance_from_desired <= 0; + } + template + void emplace(int8_t distance, Args&&... args) { + new (std::addressof(value)) T(std::forward(args)...); + distance_from_desired = distance; + } + + void destroy_value() { + value.~T(); + distance_from_desired = -1; + } + + int8_t distance_from_desired = -1; + static constexpr int8_t special_end_value = 0; + union { + T value; + }; +}; + +inline int8_t log2(uint64_t value) { + // NOLINTNEXTLINE(*c-arrays*) + static constexpr int8_t table[64] = { + 63, 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, + 61, 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, + 62, 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, + 56, 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5}; + value |= value >> 1; + value |= value >> 2; + value |= value >> 4; + value |= value >> 8; + value |= value >> 16; + value |= value >> 32; + return table[((value - (value >> 1)) * 0x07EDD5E59A4E28C2) >> 58]; +} + +inline uint64_t next_power_of_two(uint64_t i) { + --i; + i |= i >> 1; + i |= i >> 2; + i |= i >> 4; + i |= i >> 8; + i |= i >> 16; + i |= i >> 32; + ++i; + return i; +} + +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) +template +struct make_void { + typedef void type; +}; +template +using void_t = typename make_void::type; + +template +struct HashPolicySelector { + typedef fibonacci_hash_policy type; +}; +template +struct HashPolicySelector> { + typedef typename T::hash_policy type; +}; + +template < + typename T, + typename FindKey, + typename ArgumentHash, + typename DetailHasher, + typename ArgumentEqual, + typename Equal, + typename ArgumentAlloc, + typename EntryAlloc> +class sherwood_v3_table : private EntryAlloc, + private DetailHasher, + private Equal { + using Entry = detailv3::sherwood_v3_entry; + using AllocatorTraits = std::allocator_traits; + using EntryPointer = typename AllocatorTraits::pointer; + + public: + struct convertible_to_iterator; + + using value_type = T; + using size_type = uint64_t; + using difference_type = std::ptrdiff_t; + using hasher = ArgumentHash; + using key_equal = ArgumentEqual; + using allocator_type = EntryAlloc; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + + sherwood_v3_table() = default; + explicit sherwood_v3_table( + size_type bucket_count, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : EntryAlloc(alloc), DetailHasher(hash), Equal(equal) { + rehash(bucket_count); + } + sherwood_v3_table(size_type bucket_count, const ArgumentAlloc& alloc) + : sherwood_v3_table( + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table(bucket_count, hash, ArgumentEqual(), alloc) {} + explicit sherwood_v3_table(const ArgumentAlloc& alloc) : EntryAlloc(alloc) {} + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count = 0, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : sherwood_v3_table(bucket_count, hash, equal, alloc) { + insert(first, last); + } + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + first, + last, + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + template + sherwood_v3_table( + It first, + It last, + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + first, + last, + bucket_count, + hash, + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count = 0, + const ArgumentHash& hash = ArgumentHash(), + const ArgumentEqual& equal = ArgumentEqual(), + const ArgumentAlloc& alloc = ArgumentAlloc()) + : sherwood_v3_table(bucket_count, hash, equal, alloc) { + if (bucket_count == 0) + rehash(il.size()); + insert(il.begin(), il.end()); + } + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count, + const ArgumentAlloc& alloc) + : sherwood_v3_table( + il, + bucket_count, + ArgumentHash(), + ArgumentEqual(), + alloc) {} + sherwood_v3_table( + std::initializer_list il, + size_type bucket_count, + const ArgumentHash& hash, + const ArgumentAlloc& alloc) + : sherwood_v3_table(il, bucket_count, hash, ArgumentEqual(), alloc) {} + sherwood_v3_table(const sherwood_v3_table& other) + : sherwood_v3_table( + other, + AllocatorTraits::select_on_container_copy_construction( + other.get_allocator())) {} + sherwood_v3_table(const sherwood_v3_table& other, const ArgumentAlloc& alloc) + : EntryAlloc(alloc), + DetailHasher(other), + Equal(other), + _max_load_factor(other._max_load_factor) { + rehash_for_other_container(other); + try { + insert(other.begin(), other.end()); + } catch (...) { + clear(); + deallocate_data(entries, num_slots_minus_one, max_lookups); + throw; + } + } + sherwood_v3_table(sherwood_v3_table&& other) noexcept + : EntryAlloc(std::move(other)), + DetailHasher(std::move(other)), + Equal(std::move(other)) { + swap_pointers(other); + } + sherwood_v3_table( + sherwood_v3_table&& other, + const ArgumentAlloc& alloc) noexcept + : EntryAlloc(alloc), + DetailHasher(std::move(other)), + Equal(std::move(other)) { + swap_pointers(other); + } + sherwood_v3_table& operator=(const sherwood_v3_table& other) { + if (this == std::addressof(other)) + return *this; + + clear(); + if constexpr (AllocatorTraits::propagate_on_container_copy_assignment:: + value) { + if (static_cast(*this) != + static_cast(other)) { + reset_to_empty_state(); + } + static_cast(*this) = other; + } + _max_load_factor = other._max_load_factor; + static_cast(*this) = other; + static_cast(*this) = other; + rehash_for_other_container(other); + insert(other.begin(), other.end()); + return *this; + } + sherwood_v3_table& operator=(sherwood_v3_table&& other) noexcept { + if (this == std::addressof(other)) + return *this; + else if constexpr (AllocatorTraits::propagate_on_container_move_assignment:: + value) { + clear(); + reset_to_empty_state(); + static_cast(*this) = std::move(other); + swap_pointers(other); + } else if ( + static_cast(*this) == static_cast(other)) { + swap_pointers(other); + } else { + clear(); + _max_load_factor = other._max_load_factor; + rehash_for_other_container(other); + for (T& elem : other) + emplace(std::move(elem)); + other.clear(); + } + static_cast(*this) = std::move(other); + static_cast(*this) = std::move(other); + return *this; + } + ~sherwood_v3_table() { + clear(); + deallocate_data(entries, num_slots_minus_one, max_lookups); + } + + const allocator_type& get_allocator() const { + return static_cast(*this); + } + const ArgumentEqual& key_eq() const { + return static_cast(*this); + } + const ArgumentHash& hash_function() const { + return static_cast(*this); + } + + template + struct templated_iterator { + templated_iterator() = default; + templated_iterator(EntryPointer current) : current(current) {} + EntryPointer current = EntryPointer(); + + using iterator_category = std::forward_iterator_tag; + using value_type = ValueType; + using difference_type = ptrdiff_t; + using pointer = ValueType*; + using reference = ValueType&; + + friend bool operator==( + const templated_iterator& lhs, + const templated_iterator& rhs) { + return lhs.current == rhs.current; + } + friend bool operator!=( + const templated_iterator& lhs, + const templated_iterator& rhs) { + return !(lhs == rhs); + } + + templated_iterator& operator++() { + do { + ++current; + } while (current->is_empty()); + return *this; + } + templated_iterator operator++(int) { + templated_iterator copy(*this); + ++*this; + return copy; + } + + ValueType& operator*() const { + return current->value; + } + ValueType* operator->() const { + return std::addressof(current->value); + } + + // the template automatically disables the operator when value_type is + // already const, because that would cause a lot of compiler warnings + // otherwise. + template < + class target_type = const value_type, + class = std::enable_if_t< + std::is_same_v && + !std::is_same_v>> + operator templated_iterator() const { + return {current}; + } + }; + using iterator = templated_iterator; + using const_iterator = templated_iterator; + + iterator begin() { + for (EntryPointer it = entries;; ++it) { + if (it->has_value()) + return {it}; + } + } + const_iterator begin() const { + for (EntryPointer it = entries;; ++it) { + if (it->has_value()) + return {it}; + } + } + const_iterator cbegin() const { + return begin(); + } + iterator end() { + return { + entries + static_cast(num_slots_minus_one + max_lookups)}; + } + const_iterator end() const { + return { + entries + static_cast(num_slots_minus_one + max_lookups)}; + } + const_iterator cend() const { + return end(); + } + + iterator find(const FindKey& key) { + uint64_t index = + hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + EntryPointer it = entries + ptrdiff_t(index); + for (int8_t distance = 0; it->distance_from_desired >= distance; + ++distance, ++it) { + if (compares_equal(key, it->value)) + return {it}; + } + return end(); + } + const_iterator find(const FindKey& key) const { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + return const_cast(this)->find(key); + } + uint64_t count(const FindKey& key) const { + return find(key) == end() ? 0 : 1; + } + std::pair equal_range(const FindKey& key) { + iterator found = find(key); + if (found == end()) + return {found, found}; + else + return {found, std::next(found)}; + } + std::pair equal_range( + const FindKey& key) const { + const_iterator found = find(key); + if (found == end()) + return {found, found}; + else + return {found, std::next(found)}; + } + + template + std::pair emplace(Key&& key, Args&&... args) { + uint64_t index = + hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + EntryPointer current_entry = entries + ptrdiff_t(index); + int8_t distance_from_desired = 0; + for (; current_entry->distance_from_desired >= distance_from_desired; + ++current_entry, ++distance_from_desired) { + if (compares_equal(key, current_entry->value)) + return {{current_entry}, false}; + } + return emplace_new_key( + distance_from_desired, + current_entry, + std::forward(key), + std::forward(args)...); + } + + std::pair insert(const value_type& value) { + return emplace(value); + } + std::pair insert(value_type&& value) { + return emplace(std::move(value)); + } + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + iterator insert(const_iterator, const value_type& value) { + return emplace(value).first; + } + iterator insert(const_iterator, value_type&& value) { + return emplace(std::move(value)).first; + } + + template + void insert(It begin, It end) { + for (; begin != end; ++begin) { + emplace(*begin); + } + } + void insert(std::initializer_list il) { + insert(il.begin(), il.end()); + } + + void rehash(uint64_t num_buckets) { + num_buckets = std::max( + num_buckets, + static_cast( + std::ceil(num_elements / static_cast(_max_load_factor)))); + if (num_buckets == 0) { + reset_to_empty_state(); + return; + } + auto new_prime_index = hash_policy.next_size_over(num_buckets); + if (num_buckets == bucket_count()) + return; + int8_t new_max_lookups = compute_max_lookups(num_buckets); + EntryPointer new_buckets( + AllocatorTraits::allocate(*this, num_buckets + new_max_lookups)); + EntryPointer special_end_item = + new_buckets + static_cast(num_buckets + new_max_lookups - 1); + for (EntryPointer it = new_buckets; it != special_end_item; ++it) + it->distance_from_desired = -1; + special_end_item->distance_from_desired = Entry::special_end_value; + std::swap(entries, new_buckets); + std::swap(num_slots_minus_one, num_buckets); + --num_slots_minus_one; + hash_policy.commit(new_prime_index); + int8_t old_max_lookups = max_lookups; + max_lookups = new_max_lookups; + num_elements = 0; + for (EntryPointer + it = new_buckets, + end = it + static_cast(num_buckets + old_max_lookups); + it != end; + ++it) { + if (it->has_value()) { + emplace(std::move(it->value)); + it->destroy_value(); + } + } + deallocate_data(new_buckets, num_buckets, old_max_lookups); + } + + void reserve(uint64_t num_elements_) { + uint64_t required_buckets = num_buckets_for_reserve(num_elements_); + if (required_buckets > bucket_count()) + rehash(required_buckets); + } + + // the return value is a type that can be converted to an iterator + // the reason for doing this is that it's not free to find the + // iterator pointing at the next element. if you care about the + // next iterator, turn the return value into an iterator + convertible_to_iterator erase(const_iterator to_erase) { + EntryPointer current = to_erase.current; + current->destroy_value(); + --num_elements; + for (EntryPointer next = current + ptrdiff_t(1); + !next->is_at_desired_position(); + ++current, ++next) { + current->emplace(next->distance_from_desired - 1, std::move(next->value)); + next->destroy_value(); + } + return {to_erase.current}; + } + + iterator erase(const_iterator begin_it, const_iterator end_it) { + if (begin_it == end_it) + return {begin_it.current}; + for (EntryPointer it = begin_it.current, end = end_it.current; it != end; + ++it) { + if (it->has_value()) { + it->destroy_value(); + --num_elements; + } + } + if (end_it == this->end()) + return this->end(); + ptrdiff_t num_to_move = std::min( + static_cast(end_it.current->distance_from_desired), + end_it.current - begin_it.current); + EntryPointer to_return = end_it.current - num_to_move; + for (EntryPointer it = end_it.current; !it->is_at_desired_position();) { + EntryPointer target = it - num_to_move; + target->emplace( + it->distance_from_desired - num_to_move, std::move(it->value)); + it->destroy_value(); + ++it; + num_to_move = std::min( + static_cast(it->distance_from_desired), num_to_move); + } + return {to_return}; + } + + uint64_t erase(const FindKey& key) { + auto found = find(key); + if (found == end()) + return 0; + else { + erase(found); + return 1; + } + } + + void clear() { + for (EntryPointer it = entries, + end = it + + static_cast(num_slots_minus_one + max_lookups); + it != end; + ++it) { + if (it->has_value()) + it->destroy_value(); + } + num_elements = 0; + } + + void shrink_to_fit() { + rehash_for_other_container(*this); + } + + void swap(sherwood_v3_table& other) noexcept { + using std::swap; + swap_pointers(other); + swap(static_cast(*this), static_cast(other)); + swap( + static_cast(*this), static_cast(other)); + if (AllocatorTraits::propagate_on_container_swap::value) + swap(static_cast(*this), static_cast(other)); + } + + uint64_t size() const { + return num_elements; + } + uint64_t max_size() const { + return (AllocatorTraits::max_size(*this)) / sizeof(Entry); + } + uint64_t bucket_count() const { + return num_slots_minus_one ? num_slots_minus_one + 1 : 0; + } + size_type max_bucket_count() const { + return (AllocatorTraits::max_size(*this) - min_lookups) / sizeof(Entry); + } + uint64_t bucket(const FindKey& key) const { + return hash_policy.index_for_hash(hash_object(key), num_slots_minus_one); + } + float load_factor() const { + uint64_t buckets = bucket_count(); + if (buckets) + return static_cast(num_elements) / bucket_count(); + else + return 0; + } + void max_load_factor(float value) { + _max_load_factor = value; + } + float max_load_factor() const { + return _max_load_factor; + } + + bool empty() const { + return num_elements == 0; + } + + private: + EntryPointer entries = empty_default_table(); + uint64_t num_slots_minus_one = 0; + typename HashPolicySelector::type hash_policy; + int8_t max_lookups = detailv3::min_lookups - 1; + float _max_load_factor = 0.5f; + uint64_t num_elements = 0; + + EntryPointer empty_default_table() { + EntryPointer result = + AllocatorTraits::allocate(*this, detailv3::min_lookups); + EntryPointer special_end_item = + result + static_cast(detailv3::min_lookups - 1); + for (EntryPointer it = result; it != special_end_item; ++it) + it->distance_from_desired = -1; + special_end_item->distance_from_desired = Entry::special_end_value; + return result; + } + + static int8_t compute_max_lookups(uint64_t num_buckets) { + int8_t desired = detailv3::log2(num_buckets); + return std::max(detailv3::min_lookups, desired); + } + + uint64_t num_buckets_for_reserve(uint64_t num_elements_) const { + return static_cast(std::ceil( + static_cast(num_elements_) / + std::min(0.5, static_cast(_max_load_factor)))); + } + void rehash_for_other_container(const sherwood_v3_table& other) { + rehash( + std::min(num_buckets_for_reserve(other.size()), other.bucket_count())); + } + + void swap_pointers(sherwood_v3_table& other) { + using std::swap; + swap(hash_policy, other.hash_policy); + swap(entries, other.entries); + swap(num_slots_minus_one, other.num_slots_minus_one); + swap(num_elements, other.num_elements); + swap(max_lookups, other.max_lookups); + swap(_max_load_factor, other._max_load_factor); + } + + template + SKA_NOINLINE(std::pair) + emplace_new_key( + int8_t distance_from_desired, + EntryPointer current_entry, + Key&& key, + Args&&... args) { + using std::swap; + if (num_slots_minus_one == 0 || distance_from_desired == max_lookups || + num_elements + 1 > + (num_slots_minus_one + 1) * static_cast(_max_load_factor)) { + grow(); + return emplace(std::forward(key), std::forward(args)...); + } else if (current_entry->is_empty()) { + current_entry->emplace( + distance_from_desired, + std::forward(key), + std::forward(args)...); + ++num_elements; + return {{current_entry}, true}; + } + value_type to_insert(std::forward(key), std::forward(args)...); + swap(distance_from_desired, current_entry->distance_from_desired); + swap(to_insert, current_entry->value); + iterator result = {current_entry}; + for (++distance_from_desired, ++current_entry;; ++current_entry) { + if (current_entry->is_empty()) { + current_entry->emplace(distance_from_desired, std::move(to_insert)); + ++num_elements; + return {result, true}; + } else if (current_entry->distance_from_desired < distance_from_desired) { + swap(distance_from_desired, current_entry->distance_from_desired); + swap(to_insert, current_entry->value); + ++distance_from_desired; + } else { + ++distance_from_desired; + if (distance_from_desired == max_lookups) { + swap(to_insert, result.current->value); + grow(); + return emplace(std::move(to_insert)); + } + } + } + } + + void grow() { + rehash(std::max(uint64_t(4), 2 * bucket_count())); + } + + void deallocate_data( + EntryPointer begin, + uint64_t num_slots_minus_one_, + int8_t max_lookups_) { + AllocatorTraits::deallocate( + *this, begin, num_slots_minus_one_ + max_lookups_ + 1); + } + + void reset_to_empty_state() { + deallocate_data(entries, num_slots_minus_one, max_lookups); + entries = empty_default_table(); + num_slots_minus_one = 0; + hash_policy.reset(); + max_lookups = detailv3::min_lookups - 1; + } + + template + uint64_t hash_object(const U& key) { + return static_cast(*this)(key); + } + template + uint64_t hash_object(const U& key) const { + return static_cast(*this)(key); + } + template + bool compares_equal(const L& lhs, const R& rhs) { + return static_cast(*this)(lhs, rhs); + } + + public: + struct convertible_to_iterator { + EntryPointer it; + + operator iterator() { + if (it->has_value()) + return {it}; + else + return ++iterator{it}; + } + operator const_iterator() { + if (it->has_value()) + return {it}; + else + return ++const_iterator{it}; + } + }; +}; +} // namespace detailv3 + +struct prime_number_hash_policy { + static uint64_t mod0(uint64_t) { + return 0llu; + } + static uint64_t mod2(uint64_t hash) { + return hash % 2llu; + } + static uint64_t mod3(uint64_t hash) { + return hash % 3llu; + } + static uint64_t mod5(uint64_t hash) { + return hash % 5llu; + } + static uint64_t mod7(uint64_t hash) { + return hash % 7llu; + } + static uint64_t mod11(uint64_t hash) { + return hash % 11llu; + } + static uint64_t mod13(uint64_t hash) { + return hash % 13llu; + } + static uint64_t mod17(uint64_t hash) { + return hash % 17llu; + } + static uint64_t mod23(uint64_t hash) { + return hash % 23llu; + } + static uint64_t mod29(uint64_t hash) { + return hash % 29llu; + } + static uint64_t mod37(uint64_t hash) { + return hash % 37llu; + } + static uint64_t mod47(uint64_t hash) { + return hash % 47llu; + } + static uint64_t mod59(uint64_t hash) { + return hash % 59llu; + } + static uint64_t mod73(uint64_t hash) { + return hash % 73llu; + } + static uint64_t mod97(uint64_t hash) { + return hash % 97llu; + } + static uint64_t mod127(uint64_t hash) { + return hash % 127llu; + } + static uint64_t mod151(uint64_t hash) { + return hash % 151llu; + } + static uint64_t mod197(uint64_t hash) { + return hash % 197llu; + } + static uint64_t mod251(uint64_t hash) { + return hash % 251llu; + } + static uint64_t mod313(uint64_t hash) { + return hash % 313llu; + } + static uint64_t mod397(uint64_t hash) { + return hash % 397llu; + } + static uint64_t mod499(uint64_t hash) { + return hash % 499llu; + } + static uint64_t mod631(uint64_t hash) { + return hash % 631llu; + } + static uint64_t mod797(uint64_t hash) { + return hash % 797llu; + } + static uint64_t mod1009(uint64_t hash) { + return hash % 1009llu; + } + static uint64_t mod1259(uint64_t hash) { + return hash % 1259llu; + } + static uint64_t mod1597(uint64_t hash) { + return hash % 1597llu; + } + static uint64_t mod2011(uint64_t hash) { + return hash % 2011llu; + } + static uint64_t mod2539(uint64_t hash) { + return hash % 2539llu; + } + static uint64_t mod3203(uint64_t hash) { + return hash % 3203llu; + } + static uint64_t mod4027(uint64_t hash) { + return hash % 4027llu; + } + static uint64_t mod5087(uint64_t hash) { + return hash % 5087llu; + } + static uint64_t mod6421(uint64_t hash) { + return hash % 6421llu; + } + static uint64_t mod8089(uint64_t hash) { + return hash % 8089llu; + } + static uint64_t mod10193(uint64_t hash) { + return hash % 10193llu; + } + static uint64_t mod12853(uint64_t hash) { + return hash % 12853llu; + } + static uint64_t mod16193(uint64_t hash) { + return hash % 16193llu; + } + static uint64_t mod20399(uint64_t hash) { + return hash % 20399llu; + } + static uint64_t mod25717(uint64_t hash) { + return hash % 25717llu; + } + static uint64_t mod32401(uint64_t hash) { + return hash % 32401llu; + } + static uint64_t mod40823(uint64_t hash) { + return hash % 40823llu; + } + static uint64_t mod51437(uint64_t hash) { + return hash % 51437llu; + } + static uint64_t mod64811(uint64_t hash) { + return hash % 64811llu; + } + static uint64_t mod81649(uint64_t hash) { + return hash % 81649llu; + } + static uint64_t mod102877(uint64_t hash) { + return hash % 102877llu; + } + static uint64_t mod129607(uint64_t hash) { + return hash % 129607llu; + } + static uint64_t mod163307(uint64_t hash) { + return hash % 163307llu; + } + static uint64_t mod205759(uint64_t hash) { + return hash % 205759llu; + } + static uint64_t mod259229(uint64_t hash) { + return hash % 259229llu; + } + static uint64_t mod326617(uint64_t hash) { + return hash % 326617llu; + } + static uint64_t mod411527(uint64_t hash) { + return hash % 411527llu; + } + static uint64_t mod518509(uint64_t hash) { + return hash % 518509llu; + } + static uint64_t mod653267(uint64_t hash) { + return hash % 653267llu; + } + static uint64_t mod823117(uint64_t hash) { + return hash % 823117llu; + } + static uint64_t mod1037059(uint64_t hash) { + return hash % 1037059llu; + } + static uint64_t mod1306601(uint64_t hash) { + return hash % 1306601llu; + } + static uint64_t mod1646237(uint64_t hash) { + return hash % 1646237llu; + } + static uint64_t mod2074129(uint64_t hash) { + return hash % 2074129llu; + } + static uint64_t mod2613229(uint64_t hash) { + return hash % 2613229llu; + } + static uint64_t mod3292489(uint64_t hash) { + return hash % 3292489llu; + } + static uint64_t mod4148279(uint64_t hash) { + return hash % 4148279llu; + } + static uint64_t mod5226491(uint64_t hash) { + return hash % 5226491llu; + } + static uint64_t mod6584983(uint64_t hash) { + return hash % 6584983llu; + } + static uint64_t mod8296553(uint64_t hash) { + return hash % 8296553llu; + } + static uint64_t mod10453007(uint64_t hash) { + return hash % 10453007llu; + } + static uint64_t mod13169977(uint64_t hash) { + return hash % 13169977llu; + } + static uint64_t mod16593127(uint64_t hash) { + return hash % 16593127llu; + } + static uint64_t mod20906033(uint64_t hash) { + return hash % 20906033llu; + } + static uint64_t mod26339969(uint64_t hash) { + return hash % 26339969llu; + } + static uint64_t mod33186281(uint64_t hash) { + return hash % 33186281llu; + } + static uint64_t mod41812097(uint64_t hash) { + return hash % 41812097llu; + } + static uint64_t mod52679969(uint64_t hash) { + return hash % 52679969llu; + } + static uint64_t mod66372617(uint64_t hash) { + return hash % 66372617llu; + } + static uint64_t mod83624237(uint64_t hash) { + return hash % 83624237llu; + } + static uint64_t mod105359939(uint64_t hash) { + return hash % 105359939llu; + } + static uint64_t mod132745199(uint64_t hash) { + return hash % 132745199llu; + } + static uint64_t mod167248483(uint64_t hash) { + return hash % 167248483llu; + } + static uint64_t mod210719881(uint64_t hash) { + return hash % 210719881llu; + } + static uint64_t mod265490441(uint64_t hash) { + return hash % 265490441llu; + } + static uint64_t mod334496971(uint64_t hash) { + return hash % 334496971llu; + } + static uint64_t mod421439783(uint64_t hash) { + return hash % 421439783llu; + } + static uint64_t mod530980861(uint64_t hash) { + return hash % 530980861llu; + } + static uint64_t mod668993977(uint64_t hash) { + return hash % 668993977llu; + } + static uint64_t mod842879579(uint64_t hash) { + return hash % 842879579llu; + } + static uint64_t mod1061961721(uint64_t hash) { + return hash % 1061961721llu; + } + static uint64_t mod1337987929(uint64_t hash) { + return hash % 1337987929llu; + } + static uint64_t mod1685759167(uint64_t hash) { + return hash % 1685759167llu; + } + static uint64_t mod2123923447(uint64_t hash) { + return hash % 2123923447llu; + } + static uint64_t mod2675975881(uint64_t hash) { + return hash % 2675975881llu; + } + static uint64_t mod3371518343(uint64_t hash) { + return hash % 3371518343llu; + } + static uint64_t mod4247846927(uint64_t hash) { + return hash % 4247846927llu; + } + static uint64_t mod5351951779(uint64_t hash) { + return hash % 5351951779llu; + } + static uint64_t mod6743036717(uint64_t hash) { + return hash % 6743036717llu; + } + static uint64_t mod8495693897(uint64_t hash) { + return hash % 8495693897llu; + } + static uint64_t mod10703903591(uint64_t hash) { + return hash % 10703903591llu; + } + static uint64_t mod13486073473(uint64_t hash) { + return hash % 13486073473llu; + } + static uint64_t mod16991387857(uint64_t hash) { + return hash % 16991387857llu; + } + static uint64_t mod21407807219(uint64_t hash) { + return hash % 21407807219llu; + } + static uint64_t mod26972146961(uint64_t hash) { + return hash % 26972146961llu; + } + static uint64_t mod33982775741(uint64_t hash) { + return hash % 33982775741llu; + } + static uint64_t mod42815614441(uint64_t hash) { + return hash % 42815614441llu; + } + static uint64_t mod53944293929(uint64_t hash) { + return hash % 53944293929llu; + } + static uint64_t mod67965551447(uint64_t hash) { + return hash % 67965551447llu; + } + static uint64_t mod85631228929(uint64_t hash) { + return hash % 85631228929llu; + } + static uint64_t mod107888587883(uint64_t hash) { + return hash % 107888587883llu; + } + static uint64_t mod135931102921(uint64_t hash) { + return hash % 135931102921llu; + } + static uint64_t mod171262457903(uint64_t hash) { + return hash % 171262457903llu; + } + static uint64_t mod215777175787(uint64_t hash) { + return hash % 215777175787llu; + } + static uint64_t mod271862205833(uint64_t hash) { + return hash % 271862205833llu; + } + static uint64_t mod342524915839(uint64_t hash) { + return hash % 342524915839llu; + } + static uint64_t mod431554351609(uint64_t hash) { + return hash % 431554351609llu; + } + static uint64_t mod543724411781(uint64_t hash) { + return hash % 543724411781llu; + } + static uint64_t mod685049831731(uint64_t hash) { + return hash % 685049831731llu; + } + static uint64_t mod863108703229(uint64_t hash) { + return hash % 863108703229llu; + } + static uint64_t mod1087448823553(uint64_t hash) { + return hash % 1087448823553llu; + } + static uint64_t mod1370099663459(uint64_t hash) { + return hash % 1370099663459llu; + } + static uint64_t mod1726217406467(uint64_t hash) { + return hash % 1726217406467llu; + } + static uint64_t mod2174897647073(uint64_t hash) { + return hash % 2174897647073llu; + } + static uint64_t mod2740199326961(uint64_t hash) { + return hash % 2740199326961llu; + } + static uint64_t mod3452434812973(uint64_t hash) { + return hash % 3452434812973llu; + } + static uint64_t mod4349795294267(uint64_t hash) { + return hash % 4349795294267llu; + } + static uint64_t mod5480398654009(uint64_t hash) { + return hash % 5480398654009llu; + } + static uint64_t mod6904869625999(uint64_t hash) { + return hash % 6904869625999llu; + } + static uint64_t mod8699590588571(uint64_t hash) { + return hash % 8699590588571llu; + } + static uint64_t mod10960797308051(uint64_t hash) { + return hash % 10960797308051llu; + } + static uint64_t mod13809739252051(uint64_t hash) { + return hash % 13809739252051llu; + } + static uint64_t mod17399181177241(uint64_t hash) { + return hash % 17399181177241llu; + } + static uint64_t mod21921594616111(uint64_t hash) { + return hash % 21921594616111llu; + } + static uint64_t mod27619478504183(uint64_t hash) { + return hash % 27619478504183llu; + } + static uint64_t mod34798362354533(uint64_t hash) { + return hash % 34798362354533llu; + } + static uint64_t mod43843189232363(uint64_t hash) { + return hash % 43843189232363llu; + } + static uint64_t mod55238957008387(uint64_t hash) { + return hash % 55238957008387llu; + } + static uint64_t mod69596724709081(uint64_t hash) { + return hash % 69596724709081llu; + } + static uint64_t mod87686378464759(uint64_t hash) { + return hash % 87686378464759llu; + } + static uint64_t mod110477914016779(uint64_t hash) { + return hash % 110477914016779llu; + } + static uint64_t mod139193449418173(uint64_t hash) { + return hash % 139193449418173llu; + } + static uint64_t mod175372756929481(uint64_t hash) { + return hash % 175372756929481llu; + } + static uint64_t mod220955828033581(uint64_t hash) { + return hash % 220955828033581llu; + } + static uint64_t mod278386898836457(uint64_t hash) { + return hash % 278386898836457llu; + } + static uint64_t mod350745513859007(uint64_t hash) { + return hash % 350745513859007llu; + } + static uint64_t mod441911656067171(uint64_t hash) { + return hash % 441911656067171llu; + } + static uint64_t mod556773797672909(uint64_t hash) { + return hash % 556773797672909llu; + } + static uint64_t mod701491027718027(uint64_t hash) { + return hash % 701491027718027llu; + } + static uint64_t mod883823312134381(uint64_t hash) { + return hash % 883823312134381llu; + } + static uint64_t mod1113547595345903(uint64_t hash) { + return hash % 1113547595345903llu; + } + static uint64_t mod1402982055436147(uint64_t hash) { + return hash % 1402982055436147llu; + } + static uint64_t mod1767646624268779(uint64_t hash) { + return hash % 1767646624268779llu; + } + static uint64_t mod2227095190691797(uint64_t hash) { + return hash % 2227095190691797llu; + } + static uint64_t mod2805964110872297(uint64_t hash) { + return hash % 2805964110872297llu; + } + static uint64_t mod3535293248537579(uint64_t hash) { + return hash % 3535293248537579llu; + } + static uint64_t mod4454190381383713(uint64_t hash) { + return hash % 4454190381383713llu; + } + static uint64_t mod5611928221744609(uint64_t hash) { + return hash % 5611928221744609llu; + } + static uint64_t mod7070586497075177(uint64_t hash) { + return hash % 7070586497075177llu; + } + static uint64_t mod8908380762767489(uint64_t hash) { + return hash % 8908380762767489llu; + } + static uint64_t mod11223856443489329(uint64_t hash) { + return hash % 11223856443489329llu; + } + static uint64_t mod14141172994150357(uint64_t hash) { + return hash % 14141172994150357llu; + } + static uint64_t mod17816761525534927(uint64_t hash) { + return hash % 17816761525534927llu; + } + static uint64_t mod22447712886978529(uint64_t hash) { + return hash % 22447712886978529llu; + } + static uint64_t mod28282345988300791(uint64_t hash) { + return hash % 28282345988300791llu; + } + static uint64_t mod35633523051069991(uint64_t hash) { + return hash % 35633523051069991llu; + } + static uint64_t mod44895425773957261(uint64_t hash) { + return hash % 44895425773957261llu; + } + static uint64_t mod56564691976601587(uint64_t hash) { + return hash % 56564691976601587llu; + } + static uint64_t mod71267046102139967(uint64_t hash) { + return hash % 71267046102139967llu; + } + static uint64_t mod89790851547914507(uint64_t hash) { + return hash % 89790851547914507llu; + } + static uint64_t mod113129383953203213(uint64_t hash) { + return hash % 113129383953203213llu; + } + static uint64_t mod142534092204280003(uint64_t hash) { + return hash % 142534092204280003llu; + } + static uint64_t mod179581703095829107(uint64_t hash) { + return hash % 179581703095829107llu; + } + static uint64_t mod226258767906406483(uint64_t hash) { + return hash % 226258767906406483llu; + } + static uint64_t mod285068184408560057(uint64_t hash) { + return hash % 285068184408560057llu; + } + static uint64_t mod359163406191658253(uint64_t hash) { + return hash % 359163406191658253llu; + } + static uint64_t mod452517535812813007(uint64_t hash) { + return hash % 452517535812813007llu; + } + static uint64_t mod570136368817120201(uint64_t hash) { + return hash % 570136368817120201llu; + } + static uint64_t mod718326812383316683(uint64_t hash) { + return hash % 718326812383316683llu; + } + static uint64_t mod905035071625626043(uint64_t hash) { + return hash % 905035071625626043llu; + } + static uint64_t mod1140272737634240411(uint64_t hash) { + return hash % 1140272737634240411llu; + } + static uint64_t mod1436653624766633509(uint64_t hash) { + return hash % 1436653624766633509llu; + } + static uint64_t mod1810070143251252131(uint64_t hash) { + return hash % 1810070143251252131llu; + } + static uint64_t mod2280545475268481167(uint64_t hash) { + return hash % 2280545475268481167llu; + } + static uint64_t mod2873307249533267101(uint64_t hash) { + return hash % 2873307249533267101llu; + } + static uint64_t mod3620140286502504283(uint64_t hash) { + return hash % 3620140286502504283llu; + } + static uint64_t mod4561090950536962147(uint64_t hash) { + return hash % 4561090950536962147llu; + } + static uint64_t mod5746614499066534157(uint64_t hash) { + return hash % 5746614499066534157llu; + } + static uint64_t mod7240280573005008577(uint64_t hash) { + return hash % 7240280573005008577llu; + } + static uint64_t mod9122181901073924329(uint64_t hash) { + return hash % 9122181901073924329llu; + } + static uint64_t mod11493228998133068689(uint64_t hash) { + return hash % 11493228998133068689llu; + } + static uint64_t mod14480561146010017169(uint64_t hash) { + return hash % 14480561146010017169llu; + } + static uint64_t mod18446744073709551557(uint64_t hash) { + return hash % 18446744073709551557llu; + } + + using mod_function = uint64_t (*)(uint64_t); + + mod_function next_size_over(uint64_t& size) const { + // prime numbers generated by the following method: + // 1. start with a prime p = 2 + // 2. go to wolfram alpha and get p = NextPrime(2 * p) + // 3. repeat 2. until you overflow 64 bits + // you now have large gaps which you would hit if somebody called reserve() + // with an unlucky number. + // 4. to fill the gaps for every prime p go to wolfram alpha and get + // ClosestPrime(p * 2^(1/3)) and ClosestPrime(p * 2^(2/3)) and put those in + // the gaps + // 5. get PrevPrime(2^64) and put it at the end + // NOLINTNEXTLINE(*c-arrays*) + static constexpr const uint64_t prime_list[] = { + 2llu, + 3llu, + 5llu, + 7llu, + 11llu, + 13llu, + 17llu, + 23llu, + 29llu, + 37llu, + 47llu, + 59llu, + 73llu, + 97llu, + 127llu, + 151llu, + 197llu, + 251llu, + 313llu, + 397llu, + 499llu, + 631llu, + 797llu, + 1009llu, + 1259llu, + 1597llu, + 2011llu, + 2539llu, + 3203llu, + 4027llu, + 5087llu, + 6421llu, + 8089llu, + 10193llu, + 12853llu, + 16193llu, + 20399llu, + 25717llu, + 32401llu, + 40823llu, + 51437llu, + 64811llu, + 81649llu, + 102877llu, + 129607llu, + 163307llu, + 205759llu, + 259229llu, + 326617llu, + 411527llu, + 518509llu, + 653267llu, + 823117llu, + 1037059llu, + 1306601llu, + 1646237llu, + 2074129llu, + 2613229llu, + 3292489llu, + 4148279llu, + 5226491llu, + 6584983llu, + 8296553llu, + 10453007llu, + 13169977llu, + 16593127llu, + 20906033llu, + 26339969llu, + 33186281llu, + 41812097llu, + 52679969llu, + 66372617llu, + 83624237llu, + 105359939llu, + 132745199llu, + 167248483llu, + 210719881llu, + 265490441llu, + 334496971llu, + 421439783llu, + 530980861llu, + 668993977llu, + 842879579llu, + 1061961721llu, + 1337987929llu, + 1685759167llu, + 2123923447llu, + 2675975881llu, + 3371518343llu, + 4247846927llu, + 5351951779llu, + 6743036717llu, + 8495693897llu, + 10703903591llu, + 13486073473llu, + 16991387857llu, + 21407807219llu, + 26972146961llu, + 33982775741llu, + 42815614441llu, + 53944293929llu, + 67965551447llu, + 85631228929llu, + 107888587883llu, + 135931102921llu, + 171262457903llu, + 215777175787llu, + 271862205833llu, + 342524915839llu, + 431554351609llu, + 543724411781llu, + 685049831731llu, + 863108703229llu, + 1087448823553llu, + 1370099663459llu, + 1726217406467llu, + 2174897647073llu, + 2740199326961llu, + 3452434812973llu, + 4349795294267llu, + 5480398654009llu, + 6904869625999llu, + 8699590588571llu, + 10960797308051llu, + 13809739252051llu, + 17399181177241llu, + 21921594616111llu, + 27619478504183llu, + 34798362354533llu, + 43843189232363llu, + 55238957008387llu, + 69596724709081llu, + 87686378464759llu, + 110477914016779llu, + 139193449418173llu, + 175372756929481llu, + 220955828033581llu, + 278386898836457llu, + 350745513859007llu, + 441911656067171llu, + 556773797672909llu, + 701491027718027llu, + 883823312134381llu, + 1113547595345903llu, + 1402982055436147llu, + 1767646624268779llu, + 2227095190691797llu, + 2805964110872297llu, + 3535293248537579llu, + 4454190381383713llu, + 5611928221744609llu, + 7070586497075177llu, + 8908380762767489llu, + 11223856443489329llu, + 14141172994150357llu, + 17816761525534927llu, + 22447712886978529llu, + 28282345988300791llu, + 35633523051069991llu, + 44895425773957261llu, + 56564691976601587llu, + 71267046102139967llu, + 89790851547914507llu, + 113129383953203213llu, + 142534092204280003llu, + 179581703095829107llu, + 226258767906406483llu, + 285068184408560057llu, + 359163406191658253llu, + 452517535812813007llu, + 570136368817120201llu, + 718326812383316683llu, + 905035071625626043llu, + 1140272737634240411llu, + 1436653624766633509llu, + 1810070143251252131llu, + 2280545475268481167llu, + 2873307249533267101llu, + 3620140286502504283llu, + 4561090950536962147llu, + 5746614499066534157llu, + 7240280573005008577llu, + 9122181901073924329llu, + 11493228998133068689llu, + 14480561146010017169llu, + 18446744073709551557llu}; + // NOLINTNEXTLINE(*c-arrays*) + static constexpr uint64_t (*const mod_functions[])(uint64_t) = { + &mod0, + &mod2, + &mod3, + &mod5, + &mod7, + &mod11, + &mod13, + &mod17, + &mod23, + &mod29, + &mod37, + &mod47, + &mod59, + &mod73, + &mod97, + &mod127, + &mod151, + &mod197, + &mod251, + &mod313, + &mod397, + &mod499, + &mod631, + &mod797, + &mod1009, + &mod1259, + &mod1597, + &mod2011, + &mod2539, + &mod3203, + &mod4027, + &mod5087, + &mod6421, + &mod8089, + &mod10193, + &mod12853, + &mod16193, + &mod20399, + &mod25717, + &mod32401, + &mod40823, + &mod51437, + &mod64811, + &mod81649, + &mod102877, + &mod129607, + &mod163307, + &mod205759, + &mod259229, + &mod326617, + &mod411527, + &mod518509, + &mod653267, + &mod823117, + &mod1037059, + &mod1306601, + &mod1646237, + &mod2074129, + &mod2613229, + &mod3292489, + &mod4148279, + &mod5226491, + &mod6584983, + &mod8296553, + &mod10453007, + &mod13169977, + &mod16593127, + &mod20906033, + &mod26339969, + &mod33186281, + &mod41812097, + &mod52679969, + &mod66372617, + &mod83624237, + &mod105359939, + &mod132745199, + &mod167248483, + &mod210719881, + &mod265490441, + &mod334496971, + &mod421439783, + &mod530980861, + &mod668993977, + &mod842879579, + &mod1061961721, + &mod1337987929, + &mod1685759167, + &mod2123923447, + &mod2675975881, + &mod3371518343, + &mod4247846927, + &mod5351951779, + &mod6743036717, + &mod8495693897, + &mod10703903591, + &mod13486073473, + &mod16991387857, + &mod21407807219, + &mod26972146961, + &mod33982775741, + &mod42815614441, + &mod53944293929, + &mod67965551447, + &mod85631228929, + &mod107888587883, + &mod135931102921, + &mod171262457903, + &mod215777175787, + &mod271862205833, + &mod342524915839, + &mod431554351609, + &mod543724411781, + &mod685049831731, + &mod863108703229, + &mod1087448823553, + &mod1370099663459, + &mod1726217406467, + &mod2174897647073, + &mod2740199326961, + &mod3452434812973, + &mod4349795294267, + &mod5480398654009, + &mod6904869625999, + &mod8699590588571, + &mod10960797308051, + &mod13809739252051, + &mod17399181177241, + &mod21921594616111, + &mod27619478504183, + &mod34798362354533, + &mod43843189232363, + &mod55238957008387, + &mod69596724709081, + &mod87686378464759, + &mod110477914016779, + &mod139193449418173, + &mod175372756929481, + &mod220955828033581, + &mod278386898836457, + &mod350745513859007, + &mod441911656067171, + &mod556773797672909, + &mod701491027718027, + &mod883823312134381, + &mod1113547595345903, + &mod1402982055436147, + &mod1767646624268779, + &mod2227095190691797, + &mod2805964110872297, + &mod3535293248537579, + &mod4454190381383713, + &mod5611928221744609, + &mod7070586497075177, + &mod8908380762767489, + &mod11223856443489329, + &mod14141172994150357, + &mod17816761525534927, + &mod22447712886978529, + &mod28282345988300791, + &mod35633523051069991, + &mod44895425773957261, + &mod56564691976601587, + &mod71267046102139967, + &mod89790851547914507, + &mod113129383953203213, + &mod142534092204280003, + &mod179581703095829107, + &mod226258767906406483, + &mod285068184408560057, + &mod359163406191658253, + &mod452517535812813007, + &mod570136368817120201, + &mod718326812383316683, + &mod905035071625626043, + &mod1140272737634240411, + &mod1436653624766633509, + &mod1810070143251252131, + &mod2280545475268481167, + &mod2873307249533267101, + &mod3620140286502504283, + &mod4561090950536962147, + &mod5746614499066534157, + &mod7240280573005008577, + &mod9122181901073924329, + &mod11493228998133068689, + &mod14480561146010017169, + &mod18446744073709551557}; + const uint64_t* found = std::lower_bound( + std::begin(prime_list), std::end(prime_list) - 1, size); + size = *found; + return mod_functions[1 + found - prime_list]; + } + void commit(mod_function new_mod_function) { + current_mod_function = new_mod_function; + } + void reset() { + current_mod_function = &mod0; + } + + uint64_t index_for_hash(uint64_t hash, uint64_t /*num_slots_minus_one*/) + const { + return current_mod_function(hash); + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index > num_slots_minus_one ? current_mod_function(index) : index; + } + + private: + mod_function current_mod_function = &mod0; +}; + +struct power_of_two_hash_policy { + uint64_t index_for_hash(uint64_t hash, uint64_t num_slots_minus_one) const { + return hash & num_slots_minus_one; + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index_for_hash(index, num_slots_minus_one); + } + int8_t next_size_over(uint64_t& size) const { + size = detailv3::next_power_of_two(size); + return 0; + } + void commit(int8_t) {} + void reset() {} +}; + +struct fibonacci_hash_policy { + uint64_t index_for_hash(uint64_t hash, uint64_t /*num_slots_minus_one*/) + const { + return (11400714819323198485ull * hash) >> shift; + } + uint64_t keep_in_range(uint64_t index, uint64_t num_slots_minus_one) const { + return index & num_slots_minus_one; + } + + int8_t next_size_over(uint64_t& size) const { + size = std::max(uint64_t(2), detailv3::next_power_of_two(size)); + return static_cast(64 - detailv3::log2(size)); + } + void commit(int8_t shift_) { + shift = shift_; + } + void reset() { + shift = 63; + } + + private: + int8_t shift = 63; +}; + +template < + typename K, + typename V, + typename H = std::hash, + typename E = std::equal_to, + typename A = std::allocator>> +class flat_hash_map + : public detailv3::sherwood_v3_table< + std::pair, + K, + H, + detailv3::KeyOrValueHasher, H>, + E, + detailv3::KeyOrValueEquality, E>, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>> { + using Table = detailv3::sherwood_v3_table< + std::pair, + K, + H, + detailv3::KeyOrValueHasher, H>, + E, + detailv3::KeyOrValueEquality, E>, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>>; + + public: + using key_type = K; + using mapped_type = V; + + using Table::Table; + flat_hash_map() = default; + + inline V& operator[](const K& key) { + return emplace(key, convertible_to_value()).first->second; + } + inline V& operator[](K&& key) { + return emplace(std::move(key), convertible_to_value()).first->second; + } + V& at(const K& key) { + auto found = this->find(key); + if (found == this->end()) + throw std::out_of_range("Argument passed to at() was not in the map."); + return found->second; + } + const V& at(const K& key) const { + auto found = this->find(key); + if (found == this->end()) + throw std::out_of_range("Argument passed to at() was not in the map."); + return found->second; + } + + using Table::emplace; + std::pair emplace() { + return emplace(key_type(), convertible_to_value()); + } + template + std::pair insert_or_assign( + const key_type& key, + M&& m) { + auto emplace_result = emplace(key, std::forward(m)); + if (!emplace_result.second) + emplace_result.first->second = std::forward(m); + return emplace_result; + } + template + std::pair insert_or_assign( + key_type&& key, + M&& m) { + auto emplace_result = emplace(std::move(key), std::forward(m)); + if (!emplace_result.second) + emplace_result.first->second = std::forward(m); + return emplace_result; + } + template + typename Table::iterator insert_or_assign( + typename Table::const_iterator, + const key_type& key, + M&& m) { + return insert_or_assign(key, std::forward(m)).first; + } + template + typename Table::iterator insert_or_assign( + typename Table::const_iterator, + key_type&& key, + M&& m) { + return insert_or_assign(std::move(key), std::forward(m)).first; + } + + friend bool operator==(const flat_hash_map& lhs, const flat_hash_map& rhs) { + if (lhs.size() != rhs.size()) + return false; + for (const typename Table::value_type& value : lhs) { + auto found = rhs.find(value.first); + if (found == rhs.end() || value.second != found->second) + return false; + } + return true; + } + friend bool operator!=(const flat_hash_map& lhs, const flat_hash_map& rhs) { + return !(lhs == rhs); + } + + private: + struct convertible_to_value { + operator V() const { + return V(); + } + }; +}; + +template < + typename T, + typename H = std::hash, + typename E = std::equal_to, + typename A = std::allocator> +class flat_hash_set + : public detailv3::sherwood_v3_table< + T, + T, + H, + detailv3::functor_storage, + E, + detailv3::functor_storage, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>> { + using Table = detailv3::sherwood_v3_table< + T, + T, + H, + detailv3::functor_storage, + E, + detailv3::functor_storage, + A, + typename std::allocator_traits::template rebind_alloc< + detailv3::sherwood_v3_entry>>; + + public: + using key_type = T; + + using Table::Table; + flat_hash_set() = default; + + template + std::pair emplace(Args&&... args) { + return Table::emplace(T(std::forward(args)...)); + } + std::pair emplace(const key_type& arg) { + return Table::emplace(arg); + } + std::pair emplace(key_type& arg) { + return Table::emplace(arg); + } + std::pair emplace(const key_type&& arg) { + return Table::emplace(std::move(arg)); + } + std::pair emplace(key_type&& arg) { + return Table::emplace(std::move(arg)); + } + + friend bool operator==(const flat_hash_set& lhs, const flat_hash_set& rhs) { + if (lhs.size() != rhs.size()) + return false; + for (const T& value : lhs) { + if (rhs.find(value) == rhs.end()) + return false; + } + return true; + } + friend bool operator!=(const flat_hash_set& lhs, const flat_hash_set& rhs) { + return !(lhs == rhs); + } +}; + +template +struct power_of_two_std_hash : std::hash { + typedef ska::power_of_two_hash_policy hash_policy; +}; + +} // end namespace ska + +C10_CLANG_DIAGNOSTIC_POP() + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/intrusive_ptr.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/intrusive_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..035f22e3c1867bdfc676c34302adea086ce37640 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/intrusive_ptr.h @@ -0,0 +1,1074 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace pybind11 { +template +class class_; +} + +namespace c10 { +class intrusive_ptr_target; +namespace raw { +namespace weak_intrusive_ptr { +inline void incref(intrusive_ptr_target* self); +} +namespace intrusive_ptr { +inline void incref(intrusive_ptr_target* self); +} + +// constructor tag used by intrusive_ptr constructors +struct DontIncreaseRefcount {}; +} // namespace raw + +namespace detail { +constexpr uint32_t kImpracticallyHugeReferenceCount = 0x0FFFFFFF; +} // namespace detail + +/** + * intrusive_ptr is an alternative to shared_ptr that has better + * performance because it does the refcounting intrusively + * (i.e. in a member of the object itself). + * Your class T needs to inherit from intrusive_ptr_target to allow it to be + * used in an intrusive_ptr. Your class's constructor should not allow + *`this` to escape to other threads or create an intrusive_ptr from `this`. + */ + +// Note [Stack allocated intrusive_ptr_target safety] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A well known problem with std::enable_shared_from_this is that it +// allows you to create a std::shared_ptr from a stack allocated object, +// which is totally bogus because the object will die once you return +// from the stack. In intrusive_ptr, we can detect that this has occurred, +// because we set the refcount/weakcount of objects which inherit from +// intrusive_ptr_target to zero, *unless* we can prove that the object +// was dynamically allocated (e.g., via make_intrusive). +// +// Thus, whenever you transmute a T* into a intrusive_ptr, we check +// and make sure that the refcount isn't zero (or, a more subtle +// test for weak_intrusive_ptr, for which the refcount may validly +// be zero, but the weak refcount better not be zero), because that +// tells us if the object was allocated by us. If it wasn't, no +// intrusive_ptr for you! + +// NOLINTNEXTLINE(cppcoreguidelines-virtual-class-destructor) +class C10_API intrusive_ptr_target { + // Note [Weak references for intrusive refcounting] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Here's the scheme: + // + // - refcount == number of strong references to the object + // weakcount == number of weak references to the object, + // plus one more if refcount > 0 + // An invariant: refcount > 0 => weakcount > 0 + // + // - c10::StorageImpl stays live as long as there are any strong + // or weak pointers to it (weakcount > 0, since strong + // references count as a +1 to weakcount) + // + // - finalizers are called and data_ptr is deallocated when refcount == 0 + // + // - Once refcount == 0, it can never again be > 0 (the transition + // from > 0 to == 0 is monotonic) + // + // - When you access c10::StorageImpl via a weak pointer, you must + // atomically increment the use count, if it is greater than 0. + // If it is not, you must report that the storage is dead. + // + mutable std::atomic refcount_; + mutable std::atomic weakcount_; + + template + friend class intrusive_ptr; + friend inline void raw::intrusive_ptr::incref(intrusive_ptr_target* self); + + template + friend class weak_intrusive_ptr; + friend inline void raw::weak_intrusive_ptr::incref( + intrusive_ptr_target* self); + + template + friend struct ExclusivelyOwnedTensorTraits; + + protected: + // protected destructor. We never want to destruct intrusive_ptr_target* + // directly. + virtual ~intrusive_ptr_target() { +// Disable -Wterminate and -Wexceptions so we're allowed to use assertions +// (i.e. throw exceptions) in a destructor. +// We also have to disable -Wunknown-warning-option and -Wpragmas, because +// some other compilers don't know about -Wterminate or -Wexceptions and +// will show a warning about unknown warning options otherwise. +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning( \ + disable : 4297) // function assumed not to throw an exception but does +#else +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Wterminate" +#pragma GCC diagnostic ignored "-Wexceptions" +#endif + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + // Second condition is there to accommodate + // unsafe_adapt_non_heap_allocated: since we are doing our own + // deallocation in that case, it is correct for each + // expected_decref to have happened (some user code tried to + // decref and thus free the object, but it didn't happen right + // away) or not (no user code tried to free the object, and + // now it's getting destroyed through whatever mechanism the + // caller of unsafe_adapt_non_heap_allocated wanted to + // use). We choose our reference count such that the count + // will not dip below kImpracticallyHugeReferenceCount regardless. + refcount_.load() == 0 || + refcount_.load() >= detail::kImpracticallyHugeReferenceCount, + "Tried to destruct an intrusive_ptr_target that still has intrusive_ptr to it; refcount was ", + refcount_.load()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + // See ~intrusive_ptr for optimization that will frequently result in 1 + // at destruction time. + weakcount_.load() == 1 || weakcount_.load() == 0 || + weakcount_.load() == detail::kImpracticallyHugeReferenceCount - 1 || + weakcount_.load() == detail::kImpracticallyHugeReferenceCount, + "Tried to destruct an intrusive_ptr_target that still has weak_intrusive_ptr to it"); +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#else +#pragma GCC diagnostic pop +#endif + } + + constexpr intrusive_ptr_target() noexcept : refcount_(0), weakcount_(0) {} + + // intrusive_ptr_target supports copy and move: but refcount and weakcount + // don't participate (since they are intrinsic properties of the memory + // location) + intrusive_ptr_target(intrusive_ptr_target&& /*other*/) noexcept + : intrusive_ptr_target() {} + + intrusive_ptr_target& operator=(intrusive_ptr_target&& /*other*/) noexcept { + return *this; + } + + intrusive_ptr_target(const intrusive_ptr_target& /*other*/) noexcept + : intrusive_ptr_target() {} + + intrusive_ptr_target& operator=( + const intrusive_ptr_target& /*other*/) noexcept { + return *this; + } + + private: + /** + * This is called when refcount reaches zero. + * You can override this to release expensive resources. + * There might still be weak references, so your object might not get + * destructed yet, but you can assume the object isn't used anymore, + * i.e. no more calls to methods or accesses to members (we just can't + * destruct it yet because we need the weakcount accessible). + * + * If there are no weak references (i.e. your class is about to be + * destructed), this function WILL NOT be called. + */ + virtual void release_resources() {} +}; + +namespace detail { +template +struct intrusive_target_default_null_type final { + static constexpr TTarget* singleton() noexcept { + return nullptr; + } +}; + +template +TTarget* assign_ptr_(TTarget* rhs) { + if (FromNullType::singleton() == rhs) { + return ToNullType::singleton(); + } else { + return rhs; + } +} + +// Increment needs to be acquire-release to make use_count() and +// unique() reliable. +inline uint32_t atomic_refcount_increment(std::atomic& refcount) { + return refcount.fetch_add(1, std::memory_order_acq_rel) + 1; +} + +// weak_use_count() is only used for testing, so we don't need it to +// be reliable. Relaxed should be fine. +inline uint32_t atomic_weakcount_increment(std::atomic& weakcount) { + return weakcount.fetch_add(1, std::memory_order_relaxed) + 1; +} + +// Both decrements need to be acquire-release for correctness. See +// e.g. std::shared_ptr implementation. +inline uint32_t atomic_refcount_decrement(std::atomic& refcount) { + return refcount.fetch_sub(1, std::memory_order_acq_rel) - 1; +} + +inline uint32_t atomic_weakcount_decrement(std::atomic& weakcount) { + return weakcount.fetch_sub(1, std::memory_order_acq_rel) - 1; +} + +} // namespace detail + +template +class weak_intrusive_ptr; + +template < + class TTarget, + class NullType = detail::intrusive_target_default_null_type> +class intrusive_ptr final { + private: +// the following static assert would be nice to have but it requires +// the target class T to be fully defined when intrusive_ptr is instantiated +// this is a problem for classes that contain pointers to themselves +// static_assert( +// std::is_base_of::value, +// "intrusive_ptr can only be used for classes that inherit from +// intrusive_ptr_target."); +#ifndef _WIN32 + // This static_assert triggers on MSVC + // error C2131: expression did not evaluate to a constant + static_assert( + // NOLINTNEXTLINE(misc-redundant-expression) + NullType::singleton() == NullType::singleton(), + "NullType must have a constexpr singleton() method"); +#endif + static_assert( + std::is_base_of_v< + TTarget, + std::remove_pointer_t>, + "NullType::singleton() must return a element_type* pointer"); + + TTarget* target_; + + template + friend struct ExclusivelyOwnedTensorTraits; + template + friend class intrusive_ptr; + friend class weak_intrusive_ptr; + + // Make pybind11::class_ be a friend class of intrusive_ptr, so that custom + // smart holder in pybind11 could access the private constructor of + // intrusive_ptr(T*) which took the ownership of the object. This is required + // by customer holder macro PYBIND11_DECLARE_HOLDER_TYPE, where it uses + // intrusive_ptr(TTarget*) to initialize and take ownership of the object. For + // details, see + // https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers + template + friend class pybind11::class_; + + void retain_() { + if (target_ != NullType::singleton()) { + uint32_t new_refcount = + detail::atomic_refcount_increment(target_->refcount_); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + new_refcount != 1, + "intrusive_ptr: Cannot increase refcount after it reached zero."); + } + } + + void reset_() noexcept { + if (target_ != NullType::singleton() && + detail::atomic_refcount_decrement(target_->refcount_) == 0) { + // See comment above about weakcount. As long as refcount>0, + // weakcount is one larger than the actual number of weak references. + // So we need to decrement it here. + bool should_delete = + target_->weakcount_.load(std::memory_order_acquire) == 1; + if (!should_delete) { + // justification for const_cast: release_resources is basically a + // destructor and a destructor always mutates the object, even for const + // objects. NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const_cast*>(target_)->release_resources(); + should_delete = + detail::atomic_weakcount_decrement(target_->weakcount_) == 0; + } + if (should_delete) { + delete target_; + } + } + } + + // raw pointer constructors are not public because we shouldn't make + // intrusive_ptr out of raw pointers except from inside the make_intrusive(), + // reclaim() and weak_intrusive_ptr::lock() implementations. + + // This constructor will increase the ref counter for you. + // This constructor will be used by the make_intrusive(), and also pybind11, + // which wrap the intrusive_ptr holder around the raw pointer and incref + // correspondingly (pybind11 requires raw pointer constructor to incref by + // default). + explicit intrusive_ptr(TTarget* target) + : intrusive_ptr(target, raw::DontIncreaseRefcount{}) { + if (target_ != NullType::singleton()) { + // We just created result.target_, so we know no other thread has + // access to it, so we know we needn't care about memory ordering. + // (On x86_64, a store with memory_order_relaxed generates a plain old + // `mov`, whereas an atomic increment does a lock-prefixed `add`, which is + // much more expensive: https://godbolt.org/z/eKPzj8.) + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + target_->refcount_ == 0 && target_->weakcount_ == 0, + "intrusive_ptr: Newly-created target had non-zero refcounts. Does its " + "constructor do something strange like incref or create an " + "intrusive_ptr from `this`?"); + target_->refcount_.store(1, std::memory_order_relaxed); + target_->weakcount_.store(1, std::memory_order_relaxed); + } + } + + public: + using element_type = TTarget; + + intrusive_ptr() noexcept + : intrusive_ptr(NullType::singleton(), raw::DontIncreaseRefcount{}) {} + + intrusive_ptr(std::nullptr_t) noexcept + : intrusive_ptr(NullType::singleton(), raw::DontIncreaseRefcount{}) {} + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + explicit intrusive_ptr(TTarget* target, raw::DontIncreaseRefcount) noexcept + : target_(target) {} + + explicit intrusive_ptr(std::unique_ptr rhs) noexcept + : intrusive_ptr(rhs.release()) {} + + intrusive_ptr(intrusive_ptr&& rhs) noexcept : target_(rhs.target_) { + rhs.target_ = NullType::singleton(); + } + + template + // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved) + /* implicit */ intrusive_ptr(intrusive_ptr&& rhs) noexcept + : target_( + detail::assign_ptr_(rhs.target_)) { + static_assert( + std::is_convertible::value, + "Type mismatch. intrusive_ptr move constructor got pointer of wrong type."); + rhs.target_ = FromNullType::singleton(); + } + + intrusive_ptr(const intrusive_ptr& rhs) : target_(rhs.target_) { + retain_(); + } + + template + /* implicit */ intrusive_ptr(const intrusive_ptr& rhs) + : target_( + detail::assign_ptr_(rhs.target_)) { + static_assert( + std::is_convertible::value, + "Type mismatch. intrusive_ptr copy constructor got pointer of wrong type."); + retain_(); + } + + ~intrusive_ptr() noexcept { + reset_(); + } + + intrusive_ptr& operator=(intrusive_ptr&& rhs) & noexcept { + // NOLINTNEXTLINE(*assign*) + return operator= (std::move(rhs)); + } + + template + intrusive_ptr& operator=(intrusive_ptr&& rhs) & noexcept { + static_assert( + std::is_convertible::value, + "Type mismatch. intrusive_ptr move assignment got pointer of wrong type."); + intrusive_ptr tmp = std::move(rhs); + swap(tmp); + return *this; + } + + // Assignment is implemented using copy and swap. That's safe for self + // assignment. + // NOLINTNEXTLINE(bugprone-unhandled-self-assignment) + intrusive_ptr& operator=(const intrusive_ptr& rhs) & noexcept { + // NOLINTNEXTLINE(*assign-operator, *assignment-signature) + return operator= (rhs); + } + + template + intrusive_ptr& operator=( + const intrusive_ptr& rhs) & noexcept { + static_assert( + std::is_convertible::value, + "Type mismatch. intrusive_ptr copy assignment got pointer of wrong type."); + intrusive_ptr tmp = rhs; + swap(tmp); + return *this; + } + + TTarget* get() const noexcept { + return target_; + } + + TTarget& operator*() const noexcept { + return *target_; + } + + TTarget* operator->() const noexcept { + return target_; + } + + operator bool() const noexcept { + return target_ != NullType::singleton(); + } + + void reset() noexcept { + reset_(); + target_ = NullType::singleton(); + } + + void swap(intrusive_ptr& rhs) noexcept { + std::swap(target_, rhs.target_); + } + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + bool defined() const noexcept { + return target_ != NullType::singleton(); + } + + uint32_t use_count() const noexcept { + if (target_ == NullType::singleton()) { + return 0; + } + return target_->refcount_.load(std::memory_order_acquire); + } + + uint32_t weak_use_count() const noexcept { + if (target_ == NullType::singleton()) { + return 0; + } + return target_->weakcount_.load(std::memory_order_acquire); + } + + bool unique() const noexcept { + return use_count() == 1; + } + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + TTarget* release() noexcept { + // NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign) + TTarget* result = target_; + target_ = NullType::singleton(); + return result; + } + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + static intrusive_ptr reclaim(TTarget* owning_ptr) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + owning_ptr == NullType::singleton() || + owning_ptr->refcount_.load() == 0 || owning_ptr->weakcount_.load(), + "TTarget violates the invariant that refcount > 0 => weakcount > 0"); + return intrusive_ptr(owning_ptr, raw::DontIncreaseRefcount{}); + } + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + static intrusive_ptr reclaim_copy(TTarget* owning_ptr) { + auto ret = reclaim(owning_ptr); + ret.retain_(); + return ret; + } + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + template + static intrusive_ptr make(Args&&... args) { + return intrusive_ptr(new TTarget(std::forward(args)...)); + } + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + static intrusive_ptr unsafe_steal_from_new(TTarget* raw_ptr) { + return intrusive_ptr(raw_ptr); + } + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * `expected_decrefs` is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + static intrusive_ptr unsafe_adapt_non_heap_allocated( + TTarget* raw_ptr, + uint32_t expected_decrefs) { + intrusive_ptr result(raw_ptr, raw::DontIncreaseRefcount{}); + // kImpracticallyHugeReferenceCount is impractically huge for a reference + // count, while being in no danger of overflowing uint32_t. We actually only + // need to initialize the refcount to 2 -- we are just doing an unbalanced + // incref to prevent the non-heap-allocated target from being + // freed, and we are optimizing that incref by directly + // initializing the refcounts rather than doing an expensive + // atomic increment. The reason to use kImpracticallyHugeReferenceCount is + // to accommodate the debug assertions in ~intrusive_ptr_target. +#ifdef NDEBUG + expected_decrefs = 0; +#endif + result.target_->refcount_.store( + detail::kImpracticallyHugeReferenceCount + expected_decrefs, + std::memory_order_relaxed); + result.target_->weakcount_.store( + detail::kImpracticallyHugeReferenceCount, std::memory_order_relaxed); + return result; + } + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + static intrusive_ptr unsafe_reclaim_from_nonowning(TTarget* raw_ptr) { + // See Note [Stack allocated intrusive_ptr_target safety] + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + raw_ptr == NullType::singleton() || raw_ptr->refcount_.load() > 0, + "intrusive_ptr: Can only reclaim pointers that are owned by someone"); + auto ptr = reclaim(raw_ptr); // doesn't increase refcount + ptr.retain_(); + return ptr; + } +}; + +template < + class TTarget, + class NullType = detail::intrusive_target_default_null_type, + class... Args> +inline intrusive_ptr make_intrusive(Args&&... args) { + return intrusive_ptr::make(std::forward(args)...); +} + +template +inline void swap( + intrusive_ptr& lhs, + intrusive_ptr& rhs) noexcept { + lhs.swap(rhs); +} + +// To allow intrusive_ptr inside std::map or std::set, we need operator< +template +inline bool operator<( + const intrusive_ptr& lhs, + const intrusive_ptr& rhs) noexcept { + return lhs.get() < rhs.get(); +} + +template +inline bool operator==( + const intrusive_ptr& lhs, + const intrusive_ptr& rhs) noexcept { + return lhs.get() == rhs.get(); +} + +template +inline bool operator==( + const intrusive_ptr& lhs, + std::nullptr_t) noexcept { + return lhs.get() == nullptr; +} + +template +inline bool operator==( + std::nullptr_t, + const intrusive_ptr& rhs) noexcept { + return nullptr == rhs.get(); +} + +template +inline bool operator!=( + const intrusive_ptr& lhs, + const intrusive_ptr& rhs) noexcept { + return !operator==(lhs, rhs); +} + +template +inline bool operator!=( + const intrusive_ptr& lhs, + std::nullptr_t) noexcept { + return !operator==(lhs, nullptr); +} + +template +inline bool operator!=( + std::nullptr_t, + const intrusive_ptr& rhs) noexcept { + return !operator==(nullptr, rhs); +} +template +struct MaybeOwnedTraits> { + using owned_type = c10::intrusive_ptr; + using borrow_type = c10::intrusive_ptr; + + static borrow_type createBorrow(const owned_type& from) { + return borrow_type::reclaim(from.get()); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.release(); + lhs = borrow_type::reclaim(rhs.get()); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.release(); + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template < + typename TTarget, + class NullType = detail::intrusive_target_default_null_type> +class weak_intrusive_ptr final { + private: + static_assert( + std::is_base_of_v, + "intrusive_ptr can only be used for classes that inherit from intrusive_ptr_target."); +#ifndef _WIN32 + // This static_assert triggers on MSVC + // error C2131: expression did not evaluate to a constant + static_assert( + NullType::singleton() == NullType::singleton(), + "NullType must have a constexpr singleton() method"); +#endif + static_assert( + std::is_base_of_v< + TTarget, + std::remove_pointer_t>, + "NullType::singleton() must return a element_type* pointer"); + + TTarget* target_; + + template + friend class weak_intrusive_ptr; + + void retain_() { + if (target_ != NullType::singleton()) { + uint32_t new_weakcount = + detail::atomic_weakcount_increment(target_->weakcount_); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + new_weakcount != 1, + "weak_intrusive_ptr: Cannot increase weakcount after it reached zero."); + } + } + + void reset_() noexcept { + if (target_ != NullType::singleton() && + detail::atomic_weakcount_decrement(target_->weakcount_) == 0) { + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDelete) + delete target_; + } + target_ = NullType::singleton(); + } + + constexpr explicit weak_intrusive_ptr(TTarget* target) : target_(target) {} + + public: + using element_type = TTarget; + + explicit weak_intrusive_ptr(const intrusive_ptr& ptr) + : weak_intrusive_ptr(ptr.get()) { + retain_(); + } + + weak_intrusive_ptr(weak_intrusive_ptr&& rhs) noexcept : target_(rhs.target_) { + rhs.target_ = NullType::singleton(); + } + + template + /* implicit */ weak_intrusive_ptr( + // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved) + weak_intrusive_ptr&& rhs) noexcept + : target_( + detail::assign_ptr_(rhs.target_)) { + static_assert( + std::is_convertible::value, + "Type mismatch. weak_intrusive_ptr move constructor got pointer of wrong type."); + rhs.target_ = FromNullType::singleton(); + } + + weak_intrusive_ptr(const weak_intrusive_ptr& rhs) : target_(rhs.target_) { + retain_(); + } + + template + /* implicit */ weak_intrusive_ptr( + const weak_intrusive_ptr& rhs) + : target_( + detail::assign_ptr_(rhs.target_)) { + static_assert( + std::is_convertible::value, + "Type mismatch. weak_intrusive_ptr copy constructor got pointer of wrong type."); + retain_(); + } + + ~weak_intrusive_ptr() noexcept { + reset_(); + } + + weak_intrusive_ptr& operator=(weak_intrusive_ptr&& rhs) & noexcept { + // NOLINTNEXTLINE(*assign*) + return operator= (std::move(rhs)); + } + + template + weak_intrusive_ptr& operator=( + weak_intrusive_ptr&& rhs) & noexcept { + static_assert( + std::is_convertible::value, + "Type mismatch. weak_intrusive_ptr move assignment got pointer of wrong type."); + weak_intrusive_ptr tmp = std::move(rhs); + swap(tmp); + return *this; + } + + weak_intrusive_ptr& operator=(const weak_intrusive_ptr& rhs) & noexcept { + if (this == &rhs) { + return *this; + } + // NOLINTNEXTLINE(*assign*) + return operator= (rhs); + } + + weak_intrusive_ptr& operator=( + const intrusive_ptr& rhs) & noexcept { + weak_intrusive_ptr tmp(rhs); + swap(tmp); + return *this; + } + + template + weak_intrusive_ptr& operator=( + const weak_intrusive_ptr& rhs) & noexcept { + static_assert( + std::is_convertible::value, + "Type mismatch. weak_intrusive_ptr copy assignment got pointer of wrong type."); + weak_intrusive_ptr tmp = rhs; + swap(tmp); + return *this; + } + + void reset() noexcept { + reset_(); + } + + void swap(weak_intrusive_ptr& rhs) noexcept { + TTarget* tmp = target_; + target_ = rhs.target_; + rhs.target_ = tmp; + } + + // NB: This should ONLY be used by the std::hash implementation + // for weak_intrusive_ptr. Another way you could do this is + // friend std::hash, but this triggers two + // bugs: + // + // (1) It triggers an nvcc bug, where std::hash in a friend class + // declaration gets preprocessed into hash, which then cannot + // actually be found. The error in this case looks like: + // + // error: no template named 'hash'; did you mean 'std::hash'? + // + // (2) On OS X, std::hash is declared as a struct, not a class. + // This twings: + // + // error: class 'hash' was previously declared as a struct + // [-Werror,-Wmismatched-tags] + // + // Both of these are work-aroundable, but on the whole, I decided + // it would be simpler and easier to make work if we just expose + // an unsafe getter for target_ + // + TTarget* _unsafe_get_target() const noexcept { + return target_; + } + + uint32_t use_count() const noexcept { + if (target_ == NullType::singleton()) { + return 0; + } + return target_->refcount_.load( + std::memory_order_acquire); // refcount, not weakcount! + } + + uint32_t weak_use_count() const noexcept { + if (target_ == NullType::singleton()) { + return 0; + } + return target_->weakcount_.load(std::memory_order_acquire); + } + + bool expired() const noexcept { + return use_count() == 0; + } + + intrusive_ptr lock() const noexcept { + if (expired()) { + return intrusive_ptr(); + } else { + auto refcount = target_->refcount_.load(std::memory_order_seq_cst); + do { + if (refcount == 0) { + // Object already destructed, no strong references left anymore. + // Return nullptr. + return intrusive_ptr(); + } + } while ( + !target_->refcount_.compare_exchange_weak(refcount, refcount + 1)); + return intrusive_ptr( + target_, raw::DontIncreaseRefcount{}); + } + } + + /** + * Returns an owning (but still only weakly referenced) pointer to the + * underlying object and makes the weak_intrusive_ptr instance invalid. + * That means the weakcount is not decreased. + * You *must* put the returned pointer back into a weak_intrusive_ptr using + * weak_intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + TTarget* release() noexcept { + TTarget* result = target_; + target_ = NullType::singleton(); + return result; + } + + /** + * Takes an owning (but must be weakly referenced) pointer to TTarget* and + * creates a weak_intrusive_ptr that takes over ownership. + * This means that the weakcount is not increased. + * This is the counter-part to weak_intrusive_ptr::release() and the pointer + * passed in *must* have been created using weak_intrusive_ptr::release(). + */ + static weak_intrusive_ptr reclaim(TTarget* owning_weak_ptr) { + // See Note [Stack allocated intrusive_ptr_target safety] + // if refcount > 0, weakcount must be >1 for weak references to exist. + // see weak counting explanation at top of this file. + // if refcount == 0, weakcount only must be >0. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + owning_weak_ptr == NullType::singleton() || + owning_weak_ptr->weakcount_.load() > 1 || + (owning_weak_ptr->refcount_.load() == 0 && + owning_weak_ptr->weakcount_.load() > 0), + "weak_intrusive_ptr: Can only weak_intrusive_ptr::reclaim() owning pointers that were created using weak_intrusive_ptr::release()."); + return weak_intrusive_ptr(owning_weak_ptr); + } + + /** + * Takes a pointer to TTarget* (may be weak or strong) and creates a + * new weak_intrusive_ptr representing a new weak reference, i.e. + * the raw pointer retains ownership. + */ + static weak_intrusive_ptr reclaim_copy(TTarget* owning_ptr) { + auto ret = reclaim(owning_ptr); + ret.retain_(); + return ret; + } + + template + friend bool operator<( + const weak_intrusive_ptr& lhs, + const weak_intrusive_ptr& rhs) noexcept; + template + friend bool operator==( + const weak_intrusive_ptr& lhs, + const weak_intrusive_ptr& rhs) noexcept; +}; + +template +inline void swap( + weak_intrusive_ptr& lhs, + weak_intrusive_ptr& rhs) noexcept { + lhs.swap(rhs); +} + +// To allow weak_intrusive_ptr inside std::map or std::set, we need operator< +template +inline bool operator<( + const weak_intrusive_ptr& lhs, + const weak_intrusive_ptr& rhs) noexcept { + return lhs.target_ < rhs.target_; +} + +template +inline bool operator==( + const weak_intrusive_ptr& lhs, + const weak_intrusive_ptr& rhs) noexcept { + return lhs.target_ == rhs.target_; +} + +template +inline bool operator!=( + const weak_intrusive_ptr& lhs, + const weak_intrusive_ptr& rhs) noexcept { + return !operator==(lhs, rhs); +} + +// Alias for documentary purposes, to more easily distinguish +// weak raw intrusive pointers from intrusive pointers. +using weak_intrusive_ptr_target = intrusive_ptr_target; + +// This namespace provides some methods for working with +// raw pointers that subclass intrusive_ptr_target. They are not provided +// as methods on intrusive_ptr_target, because ideally you would not need these +// methods at all (use smart pointers), but if you are dealing with legacy code +// that still needs to pass around raw pointers, you may find these quite +// useful. +// +// An important usage note: some functions are only valid if you have a +// strong raw pointer to the object, while others are only valid if you +// have a weak raw pointer to the object. ONLY call intrusive_ptr namespace +// functions on strong pointers, and weak_intrusive_ptr namespace functions +// on weak pointers. If you mix it up, you may get an assert failure. +namespace raw { + +namespace intrusive_ptr { + +// WARNING: Unlike the reclaim() API, it is NOT valid to pass +// NullType::singleton to this function +inline void incref(intrusive_ptr_target* self) { + if (self) { + detail::atomic_refcount_increment(self->refcount_); + } +} + +// WARNING: Unlike the reclaim() API, it is NOT valid to pass +// NullType::singleton to this function +inline void decref(intrusive_ptr_target* self) { + // Let it die + c10::intrusive_ptr::reclaim(self); + // NB: Caller still has 'self' pointer, but it's now invalid. + // If you want more safety, used the actual c10::intrusive_ptr class +} + +template +inline T* make_weak(T* self) { + // NB: 'this' is a strong pointer, but we return a weak pointer + auto ptr = c10::intrusive_ptr::reclaim(self); + c10::weak_intrusive_ptr wptr(ptr); + ptr.release(); + return wptr.release(); +} + +inline uint32_t use_count(intrusive_ptr_target* self) { + auto ptr = c10::intrusive_ptr::reclaim(self); + auto r = ptr.use_count(); + ptr.release(); + return r; +} + +} // namespace intrusive_ptr + +namespace weak_intrusive_ptr { + +inline void incref(weak_intrusive_ptr_target* self) { + detail::atomic_weakcount_increment(self->weakcount_); +} + +inline void decref(weak_intrusive_ptr_target* self) { + // Let it die + c10::weak_intrusive_ptr::reclaim(self); + // NB: You still "have" the 'self' pointer, but it's now invalid. + // If you want more safety, used the actual c10::weak_intrusive_ptr class +} + +template +inline T* lock(T* self) { + auto wptr = c10::weak_intrusive_ptr::reclaim(self); + auto ptr = wptr.lock(); + wptr.release(); + return ptr.release(); +} + +// This gives the STRONG refcount of a WEAK pointer +inline uint32_t use_count(weak_intrusive_ptr_target* self) { + auto wptr = c10::weak_intrusive_ptr::reclaim(self); + auto r = wptr.use_count(); + wptr.release(); + return r; +} + +} // namespace weak_intrusive_ptr + +} // namespace raw + +} // namespace c10 + +namespace std { +// To allow intrusive_ptr and weak_intrusive_ptr inside std::unordered_map or +// std::unordered_set, we need std::hash +template +struct hash> { + size_t operator()(const c10::intrusive_ptr& x) const { + return std::hash()(x.get()); + } +}; +template +struct hash> { + size_t operator()(const c10::weak_intrusive_ptr& x) const { + return std::hash()(x._unsafe_get_target()); + } +}; +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/llvmMathExtras.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/llvmMathExtras.h new file mode 100644 index 0000000000000000000000000000000000000000..82651f7858c773fe4592dc915a3c1fc967038ddb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/llvmMathExtras.h @@ -0,0 +1,906 @@ +//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some functions that are useful for math stuff. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __ANDROID_NDK__ +#include +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#ifndef LLVM_GNUC_PREREQ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) +#define LLVM_GNUC_PREREQ(maj, min, patch) \ + ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \ + ((maj) << 20) + ((min) << 10) + (patch)) +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +#define LLVM_GNUC_PREREQ(maj, min, patch) \ + ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10)) +#else +#define LLVM_GNUC_PREREQ(maj, min, patch) 0 +#endif +#endif + +#ifdef _MSC_VER +// Declare these intrinsics manually rather including intrin.h. It's very +// expensive, and MathExtras.h is popular. +// #include +extern "C" { +unsigned char _BitScanForward(unsigned long* _Index, unsigned long _Mask); +unsigned char _BitScanForward64(unsigned long* _Index, unsigned __int64 _Mask); +unsigned char _BitScanReverse(unsigned long* _Index, unsigned long _Mask); +unsigned char _BitScanReverse64(unsigned long* _Index, unsigned __int64 _Mask); +} +#endif + +namespace c10::llvm { +/// The behavior an operation has on an input of 0. +enum ZeroBehavior { + /// The returned value is undefined. + ZB_Undefined, + /// The returned value is numeric_limits::max() + ZB_Max, + /// The returned value is numeric_limits::digits + ZB_Width +}; + +namespace detail { +template +struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior) { + if (!Val) + return std::numeric_limits::digits; + if (Val & 0x1) + return 0; + + // Bisection method. + std::size_t ZeroBits = 0; + T Shift = std::numeric_limits::digits >> 1; + T Mask = std::numeric_limits::max() >> Shift; + while (Shift) { + if ((Val & Mask) == 0) { + Val >>= Shift; + ZeroBits |= Shift; + } + Shift >>= 1; + Mask >>= Shift; + } + return ZeroBits; + } +}; + +#if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER) +template +struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 32; + +#if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_ctz(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanForward(&Index, Val); + return Index; +#endif + } +}; + +#if !defined(_MSC_VER) || defined(_M_X64) +template +struct TrailingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 64; + +#if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_ctzll(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanForward64(&Index, Val); + return Index; +#endif + } +}; +#endif +#endif +} // namespace detail + +/// Count number of 0's from the least significant bit to the most +/// stopping at the first 1. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are +/// valid arguments. +template +std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { + static_assert( + std::numeric_limits::is_integer && !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return llvm::detail::TrailingZerosCounter::count(Val, ZB); +} + +namespace detail { +template +struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior) { + if (!Val) + return std::numeric_limits::digits; + + // Bisection method. + std::size_t ZeroBits = 0; + for (T Shift = std::numeric_limits::digits >> 1; Shift; Shift >>= 1) { + T Tmp = Val >> Shift; + if (Tmp) + Val = Tmp; + else + ZeroBits |= Shift; + } + return ZeroBits; + } +}; + +#if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER) +template +struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 32; + +#if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_clz(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanReverse(&Index, Val); + return Index ^ 31; +#endif + } +}; + +#if !defined(_MSC_VER) || defined(_M_X64) +template +struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 64; + +#if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0) + return __builtin_clzll(Val); +#elif defined(_MSC_VER) + unsigned long Index; + _BitScanReverse64(&Index, Val); + return Index ^ 63; +#endif + } +}; +#endif +#endif +} // namespace detail + +/// Count number of 0's from the most significant bit to the least +/// stopping at the first 1. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are +/// valid arguments. +template +std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { + static_assert( + std::numeric_limits::is_integer && !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return llvm::detail::LeadingZerosCounter::count(Val, ZB); +} + +/// Get the index of the first set bit starting from the least +/// significant bit. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are +/// valid arguments. +template +T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { + if (ZB == ZB_Max && Val == 0) + return std::numeric_limits::max(); + + return countTrailingZeros(Val, ZB_Undefined); +} + +/// Create a bitmask with the N right-most bits set to 1, and all other +/// bits set to 0. Only unsigned types are allowed. +template +T maskTrailingOnes(unsigned N) { + static_assert(std::is_unsigned_v, "Invalid type!"); + const unsigned Bits = CHAR_BIT * sizeof(T); + assert(N <= Bits && "Invalid bit index"); + return N == 0 ? 0 : (T(-1) >> (Bits - N)); +} + +/// Create a bitmask with the N left-most bits set to 1, and all other +/// bits set to 0. Only unsigned types are allowed. +template +T maskLeadingOnes(unsigned N) { + return ~maskTrailingOnes(CHAR_BIT * sizeof(T) - N); +} + +/// Create a bitmask with the N right-most bits set to 0, and all other +/// bits set to 1. Only unsigned types are allowed. +template +T maskTrailingZeros(unsigned N) { + return maskLeadingOnes(CHAR_BIT * sizeof(T) - N); +} + +/// Create a bitmask with the N left-most bits set to 0, and all other +/// bits set to 1. Only unsigned types are allowed. +template +T maskLeadingZeros(unsigned N) { + return maskTrailingOnes(CHAR_BIT * sizeof(T) - N); +} + +/// Get the index of the last set bit starting from the least +/// significant bit. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are +/// valid arguments. +template +T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { + if (ZB == ZB_Max && Val == 0) + return std::numeric_limits::max(); + + // Use ^ instead of - because both gcc and llvm can remove the associated ^ + // in the __builtin_clz intrinsic on x86. + return countLeadingZeros(Val, ZB_Undefined) ^ + (std::numeric_limits::digits - 1); +} + +/// Macro compressed bit reversal table for 256 bits. +/// +/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable +/// NOLINTNEXTLINE(*c-arrays*) +static constexpr unsigned char BitReverseTable256[256] = { +#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 +#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) +#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) + R6(0), + R6(2), + R6(1), + R6(3) +#undef R2 +#undef R4 +#undef R6 +}; + +/// Reverse the bits in \p Val. +template +T reverseBits(T Val) { + // NOLINTNEXTLINE(*c-arrays*) + unsigned char in[sizeof(Val)]; + // NOLINTNEXTLINE(*c-arrays*) + unsigned char out[sizeof(Val)]; + std::memcpy(in, &Val, sizeof(Val)); + for (unsigned i = 0; i < sizeof(Val); ++i) + out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; + std::memcpy(&Val, out, sizeof(Val)); + return Val; +} + +// NOTE: The following support functions use the _32/_64 extensions instead of +// type overloading so that signed and unsigned integers can be used without +// ambiguity. + +/// Return the high 32 bits of a 64 bit value. +constexpr inline uint32_t Hi_32(uint64_t Value) { + return static_cast(Value >> 32); +} + +/// Return the low 32 bits of a 64 bit value. +constexpr inline uint32_t Lo_32(uint64_t Value) { + return static_cast(Value); +} + +/// Make a 64-bit integer from a high / low pair of 32-bit integers. +constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { + return ((uint64_t)High << 32) | (uint64_t)Low; +} + +/// Checks if an integer fits into the given bit width. +template +constexpr inline bool isInt(int64_t x) { + return N >= 64 || + (-(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1))); +} +// Template specializations to get better code for common cases. +template <> +constexpr inline bool isInt<8>(int64_t x) { + return static_cast(x) == x; +} +template <> +constexpr inline bool isInt<16>(int64_t x) { + return static_cast(x) == x; +} +template <> +constexpr inline bool isInt<32>(int64_t x) { + return static_cast(x) == x; +} + +/// Checks if a signed integer is an N bit number shifted left by S. +template +constexpr inline bool isShiftedInt(int64_t x) { + static_assert( + N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); + static_assert(N + S <= 64, "isShiftedInt with N + S > 64 is too wide."); + return isInt(x) && (x % (UINT64_C(1) << S) == 0); +} + +/// Checks if an unsigned integer fits into the given bit width. +/// +/// This is written as two functions rather than as simply +/// +/// return N >= 64 || X < (UINT64_C(1) << N); +/// +/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting +/// left too many places. +template +constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) { + static_assert(N > 0, "isUInt<0> doesn't make sense"); + return X < (UINT64_C(1) << (N)); +} +template +constexpr inline std::enable_if_t= 64, bool> isUInt(uint64_t /*X*/) { + return true; +} + +// Template specializations to get better code for common cases. +template <> +constexpr inline bool isUInt<8>(uint64_t x) { + return static_cast(x) == x; +} +template <> +constexpr inline bool isUInt<16>(uint64_t x) { + return static_cast(x) == x; +} +template <> +constexpr inline bool isUInt<32>(uint64_t x) { + return static_cast(x) == x; +} + +/// Checks if a unsigned integer is an N bit number shifted left by S. +template +constexpr inline bool isShiftedUInt(uint64_t x) { + static_assert( + N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); + static_assert( + N + S <= 64, "isShiftedUInt with N + S > 64 is too wide."); + // Per the two static_asserts above, S must be strictly less than 64. So + // 1 << S is not undefined behavior. + return isUInt(x) && (x % (UINT64_C(1) << S) == 0); +} + +/// Gets the maximum value for a N-bit unsigned integer. +inline uint64_t maxUIntN(uint64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + + // uint64_t(1) << 64 is undefined behavior, so we can't do + // (uint64_t(1) << N) - 1 + // without checking first that N != 64. But this works and doesn't have a + // branch. + return UINT64_MAX >> (64 - N); +} + +// Ignore the false warning "Arithmetic overflow" for MSVC +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4146) +#endif + +/// Gets the minimum value for a N-bit signed integer. +inline int64_t minIntN(int64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + // NOLINTNEXTLINE(*-narrowing-conversions) + return -(UINT64_C(1) << (N - 1)); +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +/// Gets the maximum value for a N-bit signed integer. +inline int64_t maxIntN(int64_t N) { + assert(N > 0 && N <= 64 && "integer width out of range"); + + // This relies on two's complement wraparound when N == 64, so we convert to + // int64_t only at the very end to avoid UB. + // NOLINTNEXTLINE(*-narrowing-conversions) + return (UINT64_C(1) << (N - 1)) - 1; +} + +/// Checks if an unsigned integer fits into the given (dynamic) bit width. +inline bool isUIntN(unsigned N, uint64_t x) { + return N >= 64 || x <= maxUIntN(N); +} + +/// Checks if an signed integer fits into the given (dynamic) bit width. +inline bool isIntN(unsigned N, int64_t x) { + return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); +} + +/// Return true if the argument is a non-empty sequence of ones starting at the +/// least significant bit with the remainder zero (32 bit version). +/// Ex. isMask_32(0x0000FFFFU) == true. +constexpr inline bool isMask_32(uint32_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// Return true if the argument is a non-empty sequence of ones starting at the +/// least significant bit with the remainder zero (64 bit version). +constexpr inline bool isMask_64(uint64_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// Return true if the argument contains a non-empty sequence of ones with the +/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. +constexpr inline bool isShiftedMask_32(uint32_t Value) { + return Value && isMask_32((Value - 1) | Value); +} + +/// Return true if the argument contains a non-empty sequence of ones with the +/// remainder zero (64 bit version.) +constexpr inline bool isShiftedMask_64(uint64_t Value) { + return Value && isMask_64((Value - 1) | Value); +} + +/// Return true if the argument is a power of two > 0. +/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) +constexpr inline bool isPowerOf2_32(uint32_t Value) { + return Value && !(Value & (Value - 1)); +} + +/// Return true if the argument is a power of two > 0 (64 bit edition.) +constexpr inline bool isPowerOf2_64(uint64_t Value) { + return Value && !(Value & (Value - 1)); +} + +/// Count the number of ones from the most significant bit to the first +/// zero bit. +/// +/// Ex. countLeadingOnes(0xFF0FFF00) == 8. +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of all ones. Only ZB_Width and +/// ZB_Undefined are valid arguments. +template +std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { + static_assert( + std::numeric_limits::is_integer && !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return countLeadingZeros(~Value, ZB); +} + +/// Count the number of ones from the least significant bit to the first +/// zero bit. +/// +/// Ex. countTrailingOnes(0x00FF00FF) == 8. +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of all ones. Only ZB_Width and +/// ZB_Undefined are valid arguments. +template +std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { + static_assert( + std::numeric_limits::is_integer && !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return countTrailingZeros(~Value, ZB); +} + +namespace detail { +template +struct PopulationCounter { + static unsigned count(T Value) { + // Generic version, forward to 32 bits. + static_assert(SizeOfT <= 4, "Not implemented!"); +#if defined(__GNUC__) && __GNUC__ >= 4 + return __builtin_popcount(Value); +#else + uint32_t v = Value; + v = v - ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; +#endif + } +}; + +template +struct PopulationCounter { + static unsigned count(T Value) { +#if defined(__GNUC__) && __GNUC__ >= 4 + return __builtin_popcountll(Value); +#else + uint64_t v = Value; + v = v - ((v >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); + v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); +#endif + } +}; +} // namespace detail + +/// Count the number of set bits in a value. +/// Ex. countPopulation(0xF000F000) = 8 +/// Returns 0 if the word is zero. +template +inline unsigned countPopulation(T Value) { + static_assert( + std::numeric_limits::is_integer && !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return detail::PopulationCounter::count(Value); +} + +/// Return the log base 2 of the specified value. +inline double Log2(double Value) { +#if defined(__ANDROID_API__) && __ANDROID_API__ < 18 + return __builtin_log(Value) / __builtin_log(2.0); +#else + return log2(Value); +#endif +} + +/// Return the floor log base 2 of the specified value, -1 if the value is zero. +/// (32 bit edition.) +/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 +inline unsigned Log2_32(uint32_t Value) { + return static_cast(31 - countLeadingZeros(Value)); +} + +/// Return the floor log base 2 of the specified value, -1 if the value is zero. +/// (64 bit edition.) +inline unsigned Log2_64(uint64_t Value) { + return static_cast(63 - countLeadingZeros(Value)); +} + +/// Return the ceil log base 2 of the specified value, 32 if the value is zero. +/// (32 bit edition). +/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 +inline unsigned Log2_32_Ceil(uint32_t Value) { + return static_cast(32 - countLeadingZeros(Value - 1)); +} + +/// Return the ceil log base 2 of the specified value, 64 if the value is zero. +/// (64 bit edition.) +inline unsigned Log2_64_Ceil(uint64_t Value) { + return static_cast(64 - countLeadingZeros(Value - 1)); +} + +/// Return the greatest common divisor of the values using Euclid's algorithm. +inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { + while (B) { + uint64_t T = B; + B = A % B; + A = T; + } + return A; +} + +/// This function takes a 64-bit integer and returns the bit equivalent double. +inline double BitsToDouble(uint64_t Bits) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + double D; + static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); + memcpy(&D, &Bits, sizeof(Bits)); + return D; +} + +/// This function takes a 32-bit integer and returns the bit equivalent float. +inline float BitsToFloat(uint32_t Bits) { + // TODO: Use std::bit_cast once C++20 becomes available. + return c10::bit_cast(Bits); +} + +/// This function takes a double and returns the bit equivalent 64-bit integer. +/// Note that copying doubles around changes the bits of NaNs on some hosts, +/// notably x86, so this routine cannot be used if these bits are needed. +inline uint64_t DoubleToBits(double Double) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + uint64_t Bits; + static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); + memcpy(&Bits, &Double, sizeof(Double)); + return Bits; +} + +/// This function takes a float and returns the bit equivalent 32-bit integer. +/// Note that copying floats around changes the bits of NaNs on some hosts, +/// notably x86, so this routine cannot be used if these bits are needed. +inline uint32_t FloatToBits(float Float) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + uint32_t Bits; + static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); + memcpy(&Bits, &Float, sizeof(Float)); + return Bits; +} + +/// A and B are either alignments or offsets. Return the minimum alignment that +/// may be assumed after adding the two together. +constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { + // The largest power of 2 that divides both A and B. + // + // Replace "-Value" by "1+~Value" in the following commented code to avoid + // MSVC warning C4146 + // return (A | B) & -(A | B); + return (A | B) & (1 + ~(A | B)); +} + +/// Aligns \c Addr to \c Alignment bytes, rounding up. +/// +/// Alignment should be a power of two. This method rounds up, so +/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. +inline uintptr_t alignAddr(const void* Addr, size_t Alignment) { + assert( + Alignment && isPowerOf2_64((uint64_t)Alignment) && + "Alignment is not a power of two!"); + + assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr); + + return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); +} + +/// Returns the necessary adjustment for aligning \c Ptr to \c Alignment +/// bytes, rounding up. +inline size_t alignmentAdjustment(const void* Ptr, size_t Alignment) { + return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; +} + +/// Returns the next power of two (in 64-bits) that is strictly greater than A. +/// Returns zero on overflow. +inline uint64_t NextPowerOf2(uint64_t A) { + A |= (A >> 1); + A |= (A >> 2); + A |= (A >> 4); + A |= (A >> 8); + A |= (A >> 16); + A |= (A >> 32); + return A + 1; +} + +/// Returns the power of two which is less than or equal to the given value. +/// Essentially, it is a floor operation across the domain of powers of two. +inline uint64_t PowerOf2Floor(uint64_t A) { + if (!A) + return 0; + return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); +} + +/// Returns the power of two which is greater than or equal to the given value. +/// Essentially, it is a ceil operation across the domain of powers of two. +inline uint64_t PowerOf2Ceil(uint64_t A) { + if (!A) + return 0; + return NextPowerOf2(A - 1); +} + +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \p Align. \p Align must be non-zero. +/// +/// If non-zero \p Skew is specified, the return value will be a minimal +/// integer that is greater than or equal to \p Value and equal to +/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than +/// \p Align, its value is adjusted to '\p Skew mod \p Align'. +/// +/// Examples: +/// \code +/// alignTo(5, 8) = 8 +/// alignTo(17, 8) = 24 +/// alignTo(~0LL, 8) = 0 +/// alignTo(321, 255) = 510 +/// +/// alignTo(5, 8, 7) = 7 +/// alignTo(17, 8, 1) = 17 +/// alignTo(~0LL, 8, 3) = 3 +/// alignTo(321, 255, 42) = 552 +/// \endcode +inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { + assert(Align != 0u && "Align can't be 0."); + Skew %= Align; + return (Value + Align - 1 - Skew) / Align * Align + Skew; +} + +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \c Align. \c Align must be non-zero. +template +constexpr inline uint64_t alignTo(uint64_t Value) { + static_assert(Align != 0u, "Align must be non-zero"); + return (Value + Align - 1) / Align * Align; +} + +/// Returns the integer ceil(Numerator / Denominator). +inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { + return alignTo(Numerator, Denominator) / Denominator; +} + +/// \c alignTo for contexts where a constant expression is required. +/// \sa alignTo +/// +/// \todo FIXME: remove when \c constexpr becomes really \c constexpr +template +struct AlignTo { + static_assert(Align != 0u, "Align must be non-zero"); + template + struct from_value { + static const uint64_t value = (Value + Align - 1) / Align * Align; + }; +}; + +/// Returns the largest uint64_t less than or equal to \p Value and is +/// \p Skew mod \p Align. \p Align must be non-zero +inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { + assert(Align != 0u && "Align can't be 0."); + Skew %= Align; + return (Value - Skew) / Align * Align + Skew; +} + +/// Returns the offset to the next integer (mod 2**64) that is greater than +/// or equal to \p Value and is a multiple of \p Align. \p Align must be +/// non-zero. +inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { + return alignTo(Value, Align) - Value; +} + +/// Sign-extend the number in the bottom B bits of X to a 32-bit integer. +/// Requires 0 < B <= 32. +template +constexpr inline int32_t SignExtend32(uint32_t X) { + static_assert(B > 0, "Bit width can't be 0."); + static_assert(B <= 32, "Bit width out of range."); + return int32_t(X << (32 - B)) >> (32 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 32-bit integer. +/// Requires 0 < B < 32. +inline int32_t SignExtend32(uint32_t X, unsigned B) { + assert(B > 0 && "Bit width can't be 0."); + assert(B <= 32 && "Bit width out of range."); + return int32_t(X << (32 - B)) >> (32 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 64-bit integer. +/// Requires 0 < B < 64. +template +constexpr inline int64_t SignExtend64(uint64_t x) { + static_assert(B > 0, "Bit width can't be 0."); + static_assert(B <= 64, "Bit width out of range."); + return int64_t(x << (64 - B)) >> (64 - B); +} + +/// Sign-extend the number in the bottom B bits of X to a 64-bit integer. +/// Requires 0 < B < 64. +inline int64_t SignExtend64(uint64_t X, unsigned B) { + assert(B > 0 && "Bit width can't be 0."); + assert(B <= 64 && "Bit width out of range."); + return int64_t(X << (64 - B)) >> (64 - B); +} + +/// Subtract two unsigned integers, X and Y, of type T and return the absolute +/// value of the result. +template +std::enable_if_t, T> AbsoluteDifference(T X, T Y) { + return std::max(X, Y) - std::min(X, Y); +} + +/// Add two unsigned integers, X and Y, of type T. Clamp the result to the +/// maximum representable value of T on overflow. ResultOverflowed indicates if +/// the result is larger than the maximum representable value of type T. +template +std::enable_if_t, T> SaturatingAdd( + T X, + T Y, + bool* ResultOverflowed = nullptr) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool Dummy; + bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + // Hacker's Delight, p. 29 + T Z = X + Y; + Overflowed = (Z < X || Z < Y); + if (Overflowed) + return std::numeric_limits::max(); + else + return Z; +} + +/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the +/// maximum representable value of T on overflow. ResultOverflowed indicates if +/// the result is larger than the maximum representable value of type T. +template +std::enable_if_t, T> SaturatingMultiply( + T X, + T Y, + bool* ResultOverflowed = nullptr) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool Dummy; + bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + + // Hacker's Delight, p. 30 has a different algorithm, but we don't use that + // because it fails for uint16_t (where multiplication can have undefined + // behavior due to promotion to int), and requires a division in addition + // to the multiplication. + + Overflowed = false; + + // Log2(Z) would be either Log2Z or Log2Z + 1. + // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z + // will necessarily be less than Log2Max as desired. + int Log2Z = Log2_64(X) + Log2_64(Y); + const T Max = std::numeric_limits::max(); + int Log2Max = Log2_64(Max); + if (Log2Z < Log2Max) { + return X * Y; + } + if (Log2Z > Log2Max) { + Overflowed = true; + return Max; + } + + // We're going to use the top bit, and maybe overflow one + // bit past it. Multiply all but the bottom bit then add + // that on at the end. + T Z = (X >> 1) * Y; + if (Z & ~(Max >> 1)) { + Overflowed = true; + return Max; + } + Z <<= 1; + if (X & 1) + return SaturatingAdd(Z, Y, ResultOverflowed); + + return Z; +} + +/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to +/// the product. Clamp the result to the maximum representable value of T on +/// overflow. ResultOverflowed indicates if the result is larger than the +/// maximum representable value of type T. +template +std::enable_if_t, T> SaturatingMultiplyAdd( + T X, + T Y, + T A, + bool* ResultOverflowed = nullptr) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool Dummy; + bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; + + T Product = SaturatingMultiply(X, Y, &Overflowed); + if (Overflowed) + return Product; + + return SaturatingAdd(A, Product, &Overflowed); +} + +/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. +extern const float huge_valf; +} // namespace c10::llvm diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/qint8.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/qint8.h new file mode 100644 index 0000000000000000000000000000000000000000..439aeec8aafb8ed989709d50e497466077c7fb46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/qint8.h @@ -0,0 +1,20 @@ +#pragma once +#include + +#include + +namespace c10 { + +/** + * This is the data type for quantized Tensors. Right now we only have + * qint8 which is for 8 bit Tensors, and qint32 for 32 bit int Tensors, + * we might have 4 bit, 2 bit or 1 bit data types in the future. + */ +struct alignas(1) qint8 { + using underlying = int8_t; + int8_t val_; + qint8() = default; + C10_HOST_DEVICE explicit qint8(int8_t val) : val_(val) {} +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint_elfx86.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint_elfx86.h new file mode 100644 index 0000000000000000000000000000000000000000..da87fb7149b38e0a03403ad67e0f67c99c1de502 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/static_tracepoint_elfx86.h @@ -0,0 +1,132 @@ +#pragma once + +// clang-format off + +// Default constraint for the probe arguments as operands. +#ifndef TORCH_SDT_ARG_CONSTRAINT +#define TORCH_SDT_ARG_CONSTRAINT "nor" +#endif + +// Instruction to emit for the probe. +#define TORCH_SDT_NOP nop + +// Note section properties. +#define TORCH_SDT_NOTE_NAME "stapsdt" +#define TORCH_SDT_NOTE_TYPE 3 + +// Semaphore variables are put in this section +#define TORCH_SDT_SEMAPHORE_SECTION ".probes" + +// Size of address depending on platform. +#ifdef __LP64__ +#define TORCH_SDT_ASM_ADDR .8byte +#else +#define TORCH_SDT_ASM_ADDR .4byte +#endif + +// Assembler helper Macros. +#define TORCH_SDT_S(x) #x +#define TORCH_SDT_ASM_1(x) TORCH_SDT_S(x) "\n" +#define TORCH_SDT_ASM_2(a, b) TORCH_SDT_S(a) "," TORCH_SDT_S(b) "\n" +#define TORCH_SDT_ASM_3(a, b, c) TORCH_SDT_S(a) "," TORCH_SDT_S(b) "," \ + TORCH_SDT_S(c) "\n" +#define TORCH_SDT_ASM_STRING(x) TORCH_SDT_ASM_1(.asciz TORCH_SDT_S(x)) + +// Helper to determine the size of an argument. +#define TORCH_SDT_IS_ARRAY_POINTER(x) ((__builtin_classify_type(x) == 14) || \ + (__builtin_classify_type(x) == 5)) +#define TORCH_SDT_ARGSIZE(x) (TORCH_SDT_IS_ARRAY_POINTER(x) \ + ? sizeof(void*) \ + : sizeof(x)) + +// Format of each probe arguments as operand. +// Size of the argument tagged with TORCH_SDT_Sn, with "n" constraint. +// Value of the argument tagged with TORCH_SDT_An, with configured constraint. +#define TORCH_SDT_ARG(n, x) \ + [TORCH_SDT_S##n] "n" ((size_t)TORCH_SDT_ARGSIZE(x)), \ + [TORCH_SDT_A##n] TORCH_SDT_ARG_CONSTRAINT (x) + +// Templates to append arguments as operands. +#define TORCH_SDT_OPERANDS_0() [__sdt_dummy] "g" (0) +#define TORCH_SDT_OPERANDS_1(_1) TORCH_SDT_ARG(1, _1) +#define TORCH_SDT_OPERANDS_2(_1, _2) \ + TORCH_SDT_OPERANDS_1(_1), TORCH_SDT_ARG(2, _2) +#define TORCH_SDT_OPERANDS_3(_1, _2, _3) \ + TORCH_SDT_OPERANDS_2(_1, _2), TORCH_SDT_ARG(3, _3) +#define TORCH_SDT_OPERANDS_4(_1, _2, _3, _4) \ + TORCH_SDT_OPERANDS_3(_1, _2, _3), TORCH_SDT_ARG(4, _4) +#define TORCH_SDT_OPERANDS_5(_1, _2, _3, _4, _5) \ + TORCH_SDT_OPERANDS_4(_1, _2, _3, _4), TORCH_SDT_ARG(5, _5) +#define TORCH_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6) \ + TORCH_SDT_OPERANDS_5(_1, _2, _3, _4, _5), TORCH_SDT_ARG(6, _6) +#define TORCH_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7) \ + TORCH_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6), TORCH_SDT_ARG(7, _7) +#define TORCH_SDT_OPERANDS_8(_1, _2, _3, _4, _5, _6, _7, _8) \ + TORCH_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7), TORCH_SDT_ARG(8, _8) +#define TORCH_SDT_OPERANDS_9(_1, _2, _3, _4, _5, _6, _7, _8, _9) \ + TORCH_SDT_OPERANDS_8(_1, _2, _3, _4, _5, _6, _7, _8), TORCH_SDT_ARG(9, _9) + +// Templates to reference the arguments from operands in note section. +#define TORCH_SDT_ARGFMT(no) %n[TORCH_SDT_S##no]@%[TORCH_SDT_A##no] +#define TORCH_SDT_ARG_TEMPLATE_0 /*No arguments*/ +#define TORCH_SDT_ARG_TEMPLATE_1 TORCH_SDT_ARGFMT(1) +#define TORCH_SDT_ARG_TEMPLATE_2 TORCH_SDT_ARG_TEMPLATE_1 TORCH_SDT_ARGFMT(2) +#define TORCH_SDT_ARG_TEMPLATE_3 TORCH_SDT_ARG_TEMPLATE_2 TORCH_SDT_ARGFMT(3) +#define TORCH_SDT_ARG_TEMPLATE_4 TORCH_SDT_ARG_TEMPLATE_3 TORCH_SDT_ARGFMT(4) +#define TORCH_SDT_ARG_TEMPLATE_5 TORCH_SDT_ARG_TEMPLATE_4 TORCH_SDT_ARGFMT(5) +#define TORCH_SDT_ARG_TEMPLATE_6 TORCH_SDT_ARG_TEMPLATE_5 TORCH_SDT_ARGFMT(6) +#define TORCH_SDT_ARG_TEMPLATE_7 TORCH_SDT_ARG_TEMPLATE_6 TORCH_SDT_ARGFMT(7) +#define TORCH_SDT_ARG_TEMPLATE_8 TORCH_SDT_ARG_TEMPLATE_7 TORCH_SDT_ARGFMT(8) +#define TORCH_SDT_ARG_TEMPLATE_9 TORCH_SDT_ARG_TEMPLATE_8 TORCH_SDT_ARGFMT(9) + +// Semaphore define, declare and probe note format + +#define TORCH_SDT_SEMAPHORE(provider, name) \ + torch_sdt_semaphore_##provider##_##name + +#define TORCH_SDT_DEFINE_SEMAPHORE(name) \ + extern "C" { \ + volatile unsigned short TORCH_SDT_SEMAPHORE(pytorch, name) \ + __attribute__((section(TORCH_SDT_SEMAPHORE_SECTION), used)) = 0; \ + } + +#define TORCH_SDT_DECLARE_SEMAPHORE(name) \ + extern "C" volatile unsigned short TORCH_SDT_SEMAPHORE(pytorch, name) + +#define TORCH_SDT_SEMAPHORE_NOTE_0(provider, name) \ + TORCH_SDT_ASM_1( TORCH_SDT_ASM_ADDR 0) /*No Semaphore*/ \ + +#define TORCH_SDT_SEMAPHORE_NOTE_1(provider, name) \ + TORCH_SDT_ASM_1(TORCH_SDT_ASM_ADDR TORCH_SDT_SEMAPHORE(provider, name)) + +// Structure of note section for the probe. +#define TORCH_SDT_NOTE_CONTENT(provider, name, has_semaphore, arg_template) \ + TORCH_SDT_ASM_1(990: TORCH_SDT_NOP) \ + TORCH_SDT_ASM_3( .pushsection .note.stapsdt,"","note") \ + TORCH_SDT_ASM_1( .balign 4) \ + TORCH_SDT_ASM_3( .4byte 992f-991f, 994f-993f, TORCH_SDT_NOTE_TYPE) \ + TORCH_SDT_ASM_1(991: .asciz TORCH_SDT_NOTE_NAME) \ + TORCH_SDT_ASM_1(992: .balign 4) \ + TORCH_SDT_ASM_1(993: TORCH_SDT_ASM_ADDR 990b) \ + TORCH_SDT_ASM_1( TORCH_SDT_ASM_ADDR 0) /*Reserved for Base Address*/ \ + TORCH_SDT_SEMAPHORE_NOTE_##has_semaphore(provider, name) \ + TORCH_SDT_ASM_STRING(provider) \ + TORCH_SDT_ASM_STRING(name) \ + TORCH_SDT_ASM_STRING(arg_template) \ + TORCH_SDT_ASM_1(994: .balign 4) \ + TORCH_SDT_ASM_1( .popsection) + +// Main probe Macro. +#define TORCH_SDT_PROBE(provider, name, has_semaphore, n, arglist) \ + __asm__ __volatile__ ( \ + TORCH_SDT_NOTE_CONTENT( \ + provider, name, has_semaphore, TORCH_SDT_ARG_TEMPLATE_##n) \ + :: TORCH_SDT_OPERANDS_##n arglist \ + ) \ + +// Helper Macros to handle variadic arguments. +#define TORCH_SDT_NARG_(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, N, ...) N +#define TORCH_SDT_NARG(...) \ + TORCH_SDT_NARG_(__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define TORCH_SDT_PROBE_N(provider, name, has_semaphore, N, ...) \ + TORCH_SDT_PROBE(provider, name, has_semaphore, N, (__VA_ARGS__)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strides.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strides.h new file mode 100644 index 0000000000000000000000000000000000000000..980540b5b97a4d2ad8fae0c0ea5364078eab860f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strides.h @@ -0,0 +1,24 @@ +#pragma once +#include +#include +#include + +namespace c10 { + +// Computes the contiguous strides of a tensor, given its sizes. +static inline DimVector contiguous_strides(const IntArrayRef sizes) { + using Int = IntArrayRef::value_type; + const Int dims = static_cast(sizes.size()); + + // With this initialisation we get the case dim == 0 or 1 right + DimVector strides(dims, 1); + + for (auto i = dims - 2; i >= 0; --i) { + // Strides can't be 0 even if sizes are 0. + strides[i] = strides[i + 1] * std::max(sizes[i + 1], Int{1}); + } + + return strides; +} + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strong_type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strong_type.h new file mode 100644 index 0000000000000000000000000000000000000000..da17e2da89345cf2885c623245176ec3b6ee2906 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/strong_type.h @@ -0,0 +1,1682 @@ +/* + * strong_type C++14/17/20 strong typedef library + * + * Copyright (C) Björn Fahller + * + * Use, modification and distribution is subject to the + * Boost Software License, Version 1.0. (See accompanying + * file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * Project home: https://github.com/rollbear/strong_type + */ + +#ifndef ROLLBEAR_STRONG_TYPE_HPP_INCLUDED +#define ROLLBEAR_STRONG_TYPE_HPP_INCLUDED + +#include +#include +#include +#include +#include + +#if __cplusplus >= 201703L +#define STRONG_NODISCARD [[nodiscard]] +#else +#define STRONG_NODISCARD +#endif + +#if defined(_MSC_VER) && !defined(__clang__) && __MSC_VER < 1922 +#define STRONG_CONSTEXPR +#else +#define STRONG_CONSTEXPR constexpr +#endif + +#ifndef STRONG_HAS_STD_FORMAT +#define STRONG_HAS_STD_FORMAT 0 +#endif + +#ifndef STRONG_HAS_FMT_FORMAT +#define STRONG_HAS_FMT_FORMAT 0 +#endif + +#if STRONG_HAS_STD_FORMAT +#include +#if !defined(__cpp_lib_format) || __cpp_lib_format < 201907 +#undef STRONG_HAS_STD_FORMAT +#define STRONG_HAS_STD_FORMAT 0 +#endif +#endif + +#if STRONG_HAS_FMT_FORMAT +#include +#endif + +namespace strong +{ + +namespace impl +{ + template + using WhenConstructible = std::enable_if_t::value>; +} + +template +using modifier = typename M::template modifier; + +struct uninitialized_t {}; +static constexpr uninitialized_t uninitialized{}; + +struct default_constructible +{ + template + class modifier + { + }; +}; + +namespace impl { + template + constexpr bool supports_default_construction(const ::strong::default_constructible::modifier*) + { + return true; + } +} + +template +class type : public modifier>... +{ +public: + template {}>> + explicit type(uninitialized_t) + noexcept + { + } + template (nullptr))> + constexpr + type() + noexcept(noexcept(T{})) + : val{} + { + } + + template >> + constexpr + explicit + type( + std::initializer_list us + ) + noexcept(noexcept(T{us})) + : val{us} + { + } + template ::value && (sizeof...(U) > 0)>> + constexpr + explicit + type( + U&& ... u) + noexcept(std::is_nothrow_constructible::value) + : val(std::forward(u)...) + {} + + friend STRONG_CONSTEXPR void swap(type& a, type& b) noexcept( + std::is_nothrow_move_constructible::value && + std::is_nothrow_move_assignable::value + ) + { + using std::swap; + swap(a.val, b.val); + } + + STRONG_NODISCARD + constexpr T& value_of() & noexcept { return val;} + STRONG_NODISCARD + constexpr const T& value_of() const & noexcept { return val;} + STRONG_NODISCARD + constexpr T&& value_of() && noexcept { return std::move(val);} + + STRONG_NODISCARD + friend constexpr T& value_of(type& t) noexcept { return t.val;} + STRONG_NODISCARD + friend constexpr const T& value_of(const type& t) noexcept { return t.val;} + STRONG_NODISCARD + friend constexpr T&& value_of(type&& t) noexcept { return std::move(t).val;} +private: + T val; +}; + +namespace impl { + template + constexpr bool is_strong_type_func(const strong::type*) { return true;} + constexpr bool is_strong_type_func(...) { return false;} + template + constexpr T underlying_type(strong::type*); + +} + +template +struct is_strong_type : std::integral_constant(nullptr))> {}; + +namespace impl { + template + using WhenStrongType = std::enable_if_t>::value>; + template + using WhenNotStrongType = std::enable_if_t>::value>; +} + +template ::value> +struct underlying_type +{ + using type = decltype(impl::underlying_type(static_cast(nullptr))); +}; + +template +struct underlying_type +{ + using type = T; +}; + +template +using underlying_type_t = typename underlying_type::type; + + +namespace impl { + template< + typename T, + typename = impl::WhenNotStrongType> + constexpr + T && + access(T &&t) + noexcept { + return std::forward(t); + } + template < + typename T, + typename = impl::WhenStrongType> + STRONG_NODISCARD + constexpr + auto + access(T&& t) + noexcept + -> decltype(value_of(std::forward(t))) + { + return value_of(std::forward(t)); + } + +} +struct equality +{ + template + class modifier; +}; + + +template +class equality::modifier<::strong::type> +{ + using type = ::strong::type; +public: + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator==( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() == std::declval())) + -> decltype(std::declval() == std::declval()) + { + return value_of(lh) == value_of(rh); + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator!=( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() != std::declval())) + -> decltype(std::declval() != std::declval()) + { + return value_of(lh) != value_of(rh); + } +}; + +namespace impl +{ + template + class typed_equality + { + private: + using TT = underlying_type_t; + using OT = underlying_type_t; + public: + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator==(const T& lh, const Other& rh) + noexcept(noexcept(std::declval() == std::declval())) + -> decltype(std::declval() == std::declval()) + { + return value_of(lh) == impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator==(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() == std::declval())) + -> decltype(std::declval() == std::declval()) + { + return impl::access(lh) == value_of(rh) ; + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator!=(const T& lh, const Other rh) + noexcept(noexcept(std::declval() != std::declval())) + -> decltype(std::declval() != std::declval()) + { + return value_of(lh) != impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator!=(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() != std::declval())) + -> decltype(std::declval() != std::declval()) + { + return impl::access(lh) != value_of(rh) ; + } + }; +} +template +struct equality_with +{ + template + class modifier : public impl::typed_equality... + { + }; +}; + +namespace impl +{ + template + class typed_ordering + { + private: + using TT = underlying_type_t; + using OT = underlying_type_t; + public: + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator<(const T& lh, const Other& rh) + noexcept(noexcept(std::declval() < std::declval())) + -> decltype(std::declval() < std::declval()) + { + return value_of(lh) < impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator<(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() < std::declval())) + -> decltype(std::declval() < std::declval()) + { + return impl::access(lh) < value_of(rh) ; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator<=(const T& lh, const Other& rh) + noexcept(noexcept(std::declval() <= std::declval())) + -> decltype(std::declval() <= std::declval()) + { + return value_of(lh) <= impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator<=(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() <= std::declval())) + -> decltype(std::declval() <= std::declval()) + { + return impl::access(lh) <= value_of(rh) ; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator>(const T& lh, const Other& rh) + noexcept(noexcept(std::declval() > std::declval())) + -> decltype(std::declval() > std::declval()) + { + return value_of(lh) > impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator>(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() > std::declval())) + -> decltype(std::declval() > std::declval()) + { + return impl::access(lh) > value_of(rh) ; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator>=(const T& lh, const Other& rh) + noexcept(noexcept(std::declval() >= std::declval())) + -> decltype(std::declval() >= std::declval()) + { + return value_of(lh) >= impl::access(rh); + } + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto operator>=(const Other& lh, const T& rh) + noexcept(noexcept(std::declval() >= std::declval())) + -> decltype(std::declval() >= std::declval()) + { + return impl::access(lh) >= value_of(rh) ; + } + }; +} + +template +struct ordered_with +{ + template + class modifier : public impl::typed_ordering... + { + }; +}; + +namespace impl +{ + template + struct require_copy_constructible + { + static constexpr bool value = std::is_copy_constructible>::value; + static_assert(value, "underlying type must be copy constructible"); + }; + template + struct require_move_constructible + { + static constexpr bool value = std::is_move_constructible>::value; + static_assert(value, "underlying type must be move constructible"); + }; + template + struct require_copy_assignable + { + static constexpr bool value = std::is_copy_assignable>::value; + static_assert(value, "underlying type must be copy assignable"); + }; + template + struct require_move_assignable + { + static constexpr bool value = std::is_move_assignable>::value; + static_assert(value, "underlying type must be move assignable"); + }; + + template struct valid_type; + template <> + struct valid_type {}; + + template + struct require_semiregular + : valid_type::value && + require_move_constructible::value && + require_copy_assignable::value && + require_move_assignable::value> + { + }; + +} +struct semiregular +{ + template + class modifier; +}; + +template +class semiregular::modifier<::strong::type> + : public default_constructible::modifier + , private impl::require_semiregular +{ +}; + +struct regular +{ + template + class modifier + : public semiregular::modifier + , public equality::modifier + { + }; +}; + +struct unique +{ + template + class modifier + : private impl::valid_type< + impl::require_move_constructible::value && + impl::require_move_assignable::value + > + { + public: + constexpr modifier() = default; + modifier(const modifier&) = delete; + constexpr modifier(modifier&&) = default; + modifier& operator=(const modifier&) = delete; + constexpr modifier& operator=(modifier&&) = default; + }; +}; +struct ordered +{ + template + class modifier; +}; + + +template +class ordered::modifier<::strong::type> +{ + using type = ::strong::type; +public: + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator<( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() < std::declval())) + -> decltype(std::declval() < std::declval()) + { + return value_of(lh) < value_of(rh); + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator<=( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() <= std::declval())) + -> decltype(std::declval() <= std::declval()) + { + return value_of(lh) <= value_of(rh); + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator>( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() > std::declval())) + -> decltype(std::declval() > std::declval()) + { + return value_of(lh) > value_of(rh); + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + + auto + operator>=( + const type& lh, + const type& rh) + noexcept(noexcept(std::declval() >= std::declval())) + -> decltype(std::declval() >= std::declval()) + { + return value_of(lh) >= value_of(rh); + } +}; + +struct ostreamable +{ + template + class modifier + { + public: + friend + std::ostream& + operator<<( + std::ostream &os, + const T &t) + { + return os << value_of(t); + } + }; +}; + +struct istreamable +{ + template + class modifier + { + public: + friend + std::istream& + operator>>( + std::istream &is, + T &t) + { + return is >> value_of(t); + } + }; +}; + +struct iostreamable +{ + template + class modifier + : public ostreamable::modifier + , public istreamable::modifier + { + }; +}; + +struct incrementable +{ + template + class modifier + { + public: + friend + STRONG_CONSTEXPR + T& + operator++(T& t) + noexcept(noexcept(++std::declval().value_of())) + { + ++value_of(t); + return t; + } + + friend + STRONG_CONSTEXPR + T + operator++(T& t, int) + { + auto copy = t; + ++t; + return copy; + } + }; +}; + +struct decrementable +{ + template + class modifier + { + public: + friend + STRONG_CONSTEXPR + T& + operator--(T& t) + noexcept(noexcept(--std::declval().value_of())) + { + --value_of(t); + return t; + } + + friend + STRONG_CONSTEXPR + T + operator--(T& t, int) + { + auto copy = t; + --t; + return copy; + } + }; +}; + +struct bicrementable +{ + template + class modifier + : public incrementable::modifier + , public decrementable::modifier + { + }; +}; + +struct boolean +{ + template + class modifier + { + public: + explicit STRONG_CONSTEXPR operator bool() const + noexcept(noexcept(static_cast(value_of(std::declval())))) + { + const auto& self = static_cast(*this); + return static_cast(value_of(self)); + } + }; +}; + +struct hashable +{ + template + class modifier{}; +}; + +struct difference +{ + template + class modifier; +}; + +template +class difference::modifier<::strong::type> +: public ordered::modifier<::strong::type> +, public equality::modifier<::strong::type> +{ + using type = ::strong::type; +public: + friend + STRONG_CONSTEXPR + type& operator+=(type& lh, const type& rh) + noexcept(noexcept(value_of(lh) += value_of(rh))) + { + value_of(lh) += value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + type& operator-=(type& lh, const type& rh) + noexcept(noexcept(value_of(lh) -= value_of(rh))) + { + value_of(lh) -= value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + type& operator*=(type& lh, const T& rh) + noexcept(noexcept(value_of(lh) *= rh)) + { + value_of(lh) *= rh; + return lh; + } + + friend + STRONG_CONSTEXPR + type& operator/=(type& lh, const T& rh) + noexcept(noexcept(value_of(lh) /= rh)) + { + value_of(lh) /= rh; + return lh; + } + + template ()%= std::declval())> + friend + STRONG_CONSTEXPR + type& operator%=(type& lh, const T& rh) + noexcept(noexcept(value_of(lh) %= rh)) + { + value_of(lh)%= rh; + return lh; + } + + friend + STRONG_CONSTEXPR + type operator+(type lh, const type& rh) + { + lh += rh; + return lh; + } + + friend + STRONG_CONSTEXPR + type operator-(type lh, const type& rh) + { + lh -= rh; + return lh; + } + + friend + STRONG_CONSTEXPR + type operator*(type lh, const T& rh) + { + lh *= rh; + return lh; + } + + friend + STRONG_CONSTEXPR + type operator*(const T& lh, type rh) + { + rh *= lh; + return rh; + } + + friend + STRONG_CONSTEXPR + type operator/(type lh, const T& rh) + { + lh /= rh; + return lh; + } + + friend + STRONG_CONSTEXPR + T operator/(const type& lh, const type& rh) + { + return value_of(lh) / value_of(rh); + } + + template () %= std::declval())> + friend + STRONG_CONSTEXPR + type operator%(type lh, const T& rh) + noexcept(noexcept(lh%= rh)) + { + lh %= rh; + return lh; + } + + template () % std::declval())> + friend + STRONG_CONSTEXPR + T operator%(type lh, type rh) + noexcept(noexcept(value_of(lh) % value_of(rh))) + { + return value_of(lh) % value_of(rh); + } +}; + +template +struct affine_point +{ + template + class modifier; +}; + +namespace impl +{ + template + using void_t = void; + + template + struct subtractable : std::false_type {}; + + template + struct subtractable() - std::declval())>> + : std::true_type {}; +} + + +template +template +class affine_point::modifier<::strong::type> +{ + using type = ::strong::type; + static_assert(impl::subtractable::value, "it must be possible to subtract instances of your underlying type"); + using base_diff_type = decltype(std::declval() - std::declval()); +public: + using difference = std::conditional_t{}, strong::type, D>; + static_assert(std::is_constructible::value, ""); + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + difference + operator-( + const type& lh, + const type& rh) + { + return difference(value_of(lh) - value_of(rh)); + } + + friend + STRONG_CONSTEXPR + type& + operator+=( + type& lh, + const difference& d) + noexcept(noexcept(value_of(lh) += impl::access(d))) + { + value_of(lh) += impl::access(d); + return lh; + } + + friend + STRONG_CONSTEXPR + type& + operator-=( + type& lh, + const difference& d) + noexcept(noexcept(value_of(lh) -= impl::access(d))) + { + value_of(lh) -= impl::access(d); + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + type + operator+( + type lh, + const difference& d) + { + return lh += d; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + type + operator+( + const difference& d, + type rh) + { + return rh+= d; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + type + operator-( + type lh, + const difference& d) + { + return lh -= d; + } +}; + + +struct pointer +{ + template + class modifier; +}; + +template +class pointer::modifier<::strong::type> +{ + using type = strong::type; +public: + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator==( + const type& t, + std::nullptr_t) + noexcept(noexcept(std::declval() == nullptr)) + -> decltype(std::declval() == nullptr) + { + return value_of(t) == nullptr; + } + + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator==( + std::nullptr_t, + const type& t) + noexcept(noexcept(nullptr == std::declval())) + -> decltype(nullptr == std::declval()) + { + return value_of(t) == nullptr; + } + + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator!=( + const type& t, + std::nullptr_t) + noexcept(noexcept(std::declval() != nullptr)) + -> decltype(std::declval() != nullptr) + { + return value_of(t) != nullptr; + } + + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + auto + operator!=( + std::nullptr_t, + const type& t) + noexcept(noexcept(nullptr != std::declval())) + -> decltype(nullptr != std::declval()) + { + return value_of(t) != nullptr; + } + + STRONG_NODISCARD + STRONG_CONSTEXPR + decltype(*std::declval()) + operator*() + const + { + auto& self = static_cast(*this); + return *value_of(self); + } + + STRONG_NODISCARD + STRONG_CONSTEXPR + decltype(&(*std::declval())) operator->() const { return &operator*();} +}; + +struct arithmetic +{ + template + class modifier + { + public: + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator-( + const T &lh) + { + return T{-value_of(lh)}; + } + + friend + STRONG_CONSTEXPR + T& + operator+=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) += value_of(rh))) + { + value_of(lh) += value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + T& + operator-=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) -= value_of(rh))) + { + value_of(lh) -= value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + T& + operator*=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) *= value_of(rh))) + { + value_of(lh) *= value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + T& + operator/=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) /= value_of(rh))) + { + value_of(lh) /= value_of(rh); + return lh; + } + + template ()) % value_of(std::declval()))> + friend + STRONG_CONSTEXPR + T& + operator%=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) %= value_of(rh))) + { + value_of(lh) %= value_of(rh); + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator+( + T lh, + const T &rh) + { + lh += rh; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator-( + T lh, + const T &rh) + { + lh -= rh; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator*( + T lh, + const T &rh) + { + lh *= rh; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator/( + T lh, + const T &rh) + { + lh /= rh; + return lh; + } + + template ()) % value_of(std::declval()))> + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator%( + T lh, + const T &rh) + { + lh %= rh; + return lh; + } + + }; +}; + + +struct bitarithmetic +{ + template + class modifier + { + public: + friend + STRONG_CONSTEXPR + T& + operator&=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) &= value_of(rh))) + { + value_of(lh) &= value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + T& + operator|=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) |= value_of(rh))) + { + value_of(lh) |= value_of(rh); + return lh; + } + + friend + STRONG_CONSTEXPR + T& + operator^=( + T &lh, + const T &rh) + noexcept(noexcept(value_of(lh) ^= value_of(rh))) + { + value_of(lh) ^= value_of(rh); + return lh; + } + + template + friend + STRONG_CONSTEXPR + T& + operator<<=( + T &lh, + C c) + noexcept(noexcept(value_of(lh) <<= c)) + { + value_of(lh) <<= c; + return lh; + } + + template + friend + STRONG_CONSTEXPR + T& + operator>>=( + T &lh, + C c) + noexcept(noexcept(value_of(lh) >>= c)) + { + value_of(lh) >>= c; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator~( + const T &lh) + { + auto v = value_of(lh); + v = ~v; + return T(v); + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator&( + T lh, + const T &rh) + { + lh &= rh; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator|( + T lh, + const T &rh) + { + lh |= rh; + return lh; + } + + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator^( + T lh, + const T &rh) + { + lh ^= rh; + return lh; + } + + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator<<( + T lh, + C c) + { + lh <<= c; + return lh; + } + + template + STRONG_NODISCARD + friend + STRONG_CONSTEXPR + T + operator>>( + T lh, + C c) + { + lh >>= c; + return lh; + } + }; +}; +template +struct indexed +{ + template + class modifier; +}; + +template <> +struct indexed { + template + class modifier; + + template + class modifier> { + using ref = T&; + using cref = const T&; + using rref = T&&; + using type = strong::type; + public: + template + STRONG_NODISCARD + auto + operator[]( + const I &i) + const & + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) { + auto& self = static_cast(*this); + return value_of(self)[impl::access(i)]; + } + + template + STRONG_NODISCARD + auto + operator[]( + const I &i) + & + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) { + auto& self = static_cast(*this); + return value_of(self)[impl::access(i)]; + } + + template + STRONG_NODISCARD + auto + operator[]( + const I &i) + && + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) { + auto& self = static_cast(*this); + return value_of(std::move(self))[impl::access(i)]; + } + + template + STRONG_NODISCARD + auto + at( + const I &i) + const & + -> decltype(std::declval().at(impl::access(i))) { + auto& self = static_cast(*this); + return value_of(self).at(impl::access(i)); + } + + template + STRONG_NODISCARD + auto + at( + const I &i) + & + -> decltype(std::declval().at(impl::access(i))) { + auto& self = static_cast(*this); + return value_of(self).at(impl::access(i)); + } + + template + STRONG_NODISCARD + auto + at( + const I &i) + && + -> decltype(std::declval().at(impl::access(i))) { + auto& self = static_cast(*this); + return value_of(std::move(self)).at(impl::access(i)); + } + }; +}; + +template +template +class indexed::modifier> +{ + using type = ::strong::type; +public: + STRONG_NODISCARD + auto + operator[]( + const I& i) + const & + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) + { + auto& self = static_cast(*this); + return value_of(self)[impl::access(i)]; + } + + STRONG_NODISCARD + auto + operator[]( + const I& i) + & + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) + { + auto& self = static_cast(*this); + return value_of(self)[impl::access(i)]; + } + + STRONG_NODISCARD + auto + operator[]( + const I& i) + && + noexcept(noexcept(std::declval()[impl::access(i)])) + -> decltype(std::declval()[impl::access(i)]) + { + auto& self = static_cast(*this); + return value_of(std::move(self))[impl::access(i)]; + } + + template + STRONG_NODISCARD + auto + at( + const I& i) + const & + -> decltype(std::declval().at(impl::access(i))) + { + auto& self = static_cast(*this); + return value_of(self).at(impl::access(i)); + } + + template + STRONG_NODISCARD + auto + at( + const I& i) + & + -> decltype(std::declval().at(impl::access(i))) + { + auto& self = static_cast(*this); + return value_of(self).at(impl::access(i)); + } + + template + STRONG_NODISCARD + auto + at( + const I& i) + && + -> decltype(std::declval().at(impl::access(i))) + { + auto& self = static_cast(*this); + return value_of(std::move(self)).at(impl::access(i)); + } +}; + +class iterator +{ +public: + template >::iterator_category> + class modifier + : public pointer::modifier + , public equality::modifier + , public incrementable::modifier + { + public: + using difference_type = typename std::iterator_traits>::difference_type; + using value_type = typename std::iterator_traits>::value_type; + using pointer = typename std::iterator_traits>::value_type; + using reference = typename std::iterator_traits>::reference; + using iterator_category = typename std::iterator_traits>::iterator_category; + }; + + template + class modifier + : public modifier + , public decrementable::modifier + { + }; + template + class modifier + : public modifier + , public affine_point>::difference_type>::template modifier + , public indexed<>::modifier + , public ordered::modifier + { + }; +}; + +class range +{ +public: + template + class modifier; +}; + +template +class range::modifier> +{ + using type = ::strong::type; + using r_iterator = decltype(std::declval().begin()); + using r_const_iterator = decltype(std::declval().begin()); +public: + using iterator = ::strong::type; + using const_iterator = ::strong::type; + + iterator + begin() + noexcept(noexcept(std::declval().begin())) + { + auto& self = static_cast(*this); + return iterator{value_of(self).begin()}; + } + + iterator + end() + noexcept(noexcept(std::declval().end())) + { + auto& self = static_cast(*this); + return iterator{value_of(self).end()}; + } + + const_iterator + cbegin() + const + noexcept(noexcept(std::declval().begin())) + { + auto& self = static_cast(*this); + return const_iterator{value_of(self).begin()}; + } + + const_iterator + cend() + const + noexcept(noexcept(std::declval().end())) + { + auto& self = static_cast(*this); + return const_iterator{value_of(self).end()}; + } + + const_iterator + begin() + const + noexcept(noexcept(std::declval().begin())) + { + auto& self = static_cast(*this); + return const_iterator{value_of(self).begin()}; + } + + const_iterator + end() + const + noexcept(noexcept(std::declval().end())) + { + auto& self = static_cast(*this); + return const_iterator{value_of(self).end()}; + } +}; + +namespace impl { + + template + struct converter + { + STRONG_CONSTEXPR explicit operator D() const + noexcept(noexcept(static_cast(std::declval&>()))) + { + auto& self = static_cast(*this); + return static_cast(value_of(self)); + } + }; + template + struct implicit_converter + { + STRONG_CONSTEXPR operator D() const + noexcept(noexcept(static_cast(std::declval&>()))) + { + auto& self = static_cast(*this); + return static_cast(value_of(self)); + } + }; +} +template +struct convertible_to +{ + template + struct modifier : impl::converter... + { + }; +}; + +template +struct implicitly_convertible_to +{ + template + struct modifier : impl::implicit_converter... + { + }; + +}; + +struct formattable +{ + template + class modifier{}; +}; + +} + +namespace std { +template +struct hash<::strong::type> + : std::conditional_t< + std::is_base_of< + ::strong::hashable::modifier< + ::strong::type + >, + ::strong::type + >::value, + hash, + std::false_type> +{ + using type = ::strong::type; + decltype(auto) + operator()( + const ::strong::hashable::modifier& t) + const + noexcept(noexcept(std::declval>()(value_of(std::declval())))) + { + auto& tt = static_cast(t); + return hash::operator()(value_of(tt)); + } +}; +template +struct is_arithmetic<::strong::type> + : is_base_of<::strong::arithmetic::modifier<::strong::type>, + ::strong::type> +{ +}; + +#if STRONG_HAS_STD_FORMAT +template +struct formatter<::strong::type, Char, + std::enable_if_t< + std::is_base_of< + ::strong::formattable::modifier< + ::strong::type + >, + ::strong::type + >::value + >> + : formatter +{ + using type = ::strong::type; + template + STRONG_CONSTEXPR + decltype(auto) + format(const ::strong::formattable::modifier& t, FormatContext& fc) + noexcept(noexcept(std::declval>().format(value_of(std::declval()), fc))) + { + const auto& tt = static_cast(t); + return formatter::format(value_of(tt), fc); + } +}; +#endif + +} + +#if STRONG_HAS_FMT_FORMAT +namespace fmt +{ +template +struct formatter<::strong::type, Char, + std::enable_if_t< + std::is_base_of< + ::strong::formattable::modifier< + ::strong::type + >, + ::strong::type + >::value + >> + : formatter +{ + using type = ::strong::type; + template + STRONG_CONSTEXPR + decltype(auto) + format(const ::strong::formattable::modifier& t, FormatContext& fc) + noexcept(noexcept(std::declval>().format(value_of(std::declval()), fc))) + { + const auto& tt = static_cast(t); + return formatter::format(value_of(tt), fc); + } +}; +} +#endif +#endif //ROLLBEAR_STRONG_TYPE_HPP_INCLUDED diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h new file mode 100644 index 0000000000000000000000000000000000000000..2420abf7ad06f726998e94439066fc1040c32455 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +#include + +namespace c10 { + +C10_API void setThreadName(std::string name); + +} // namespace c10