diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/basic/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/basic/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..b9c55316d32facf8fcff501d9308aee90b4033a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/basic/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace channel { +namespace basic { + +std::shared_ptr create(); + +} // namespace basic +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..eb71e1496af576f443c91f5ff335b83041a9ddaa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/cma/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace channel { +namespace cma { + +std::shared_ptr create(); + +} // namespace cma +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/context.h new file mode 100644 index 0000000000000000000000000000000000000000..cbcef807f6b4c345252504bdbef518405d4584d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/context.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace tensorpipe { +namespace channel { + +enum class Endpoint : bool { kConnect, kListen }; + +class Channel; + +// Abstract base class for channel context classes. +// +// Instances of these classes are expected to be registered with a +// context. All registered instances are assumed to be eligible +// channels for all pairs. +// +class Context { + public: + // Return whether the context is able to operate correctly. + // + // Some channel types may be unable to perform as intended under some + // circumstances (e.g., specialized hardware unavailable, lack of + // permissions). They can report it through this method in order for + // the core context to avoid registering them in the first place. + // + virtual bool isViable() const = 0; + + // Return the number of control connections needed to create an instance of + // this channel. + // + // Most channels require only one, but some require more (cuda_basic), and + // some might require none. + // + virtual size_t numConnectionsNeeded() const = 0; + + // Return a map from supported devices to strings describing the device from + // the channel's perspective. + // + // Two processes with a channel context of the same type can leverage this + // channel to make two devices communicate if one side's device descriptor is + // "accepted" by the other one, using the canCommunicateWithRemote method + // below. That method must be symmetric, and unless overridden defaults to + // string comparison. + // + virtual const std::unordered_map& deviceDescriptors() + const = 0; + + // Compare local and remote device descriptors for compatibility. + // + // Determine whether a channel can be opened between a local device and + // a remote one that has the given device descriptor. This function + // needs to be symmetric: if we called this method on the remote + // context with the local descriptor we should get the same answer. + // Unless overridden it defaults to string comparison. + // + virtual bool canCommunicateWithRemote( + const std::string& localDeviceDescriptor, + const std::string& remoteDeviceDescriptor) const = 0; + + // Return newly created channel using the specified connections. + // + // It is up to the channel to either use these connections for further + // initialization, or use them directly. Either way, the returned + // channel should be immediately usable. If the channel isn't fully + // initialized yet, take care to queue these operations to execute + // as soon as initialization has completed. + // + virtual std::shared_ptr createChannel( + std::vector>, + Endpoint) = 0; + + // Tell the context what its identifier is. + // + // This is only supposed to be called from the high-level context. It will + // only used for logging and debugging purposes. + virtual void setId(std::string id) = 0; + + // Put the channel context in a terminal state, in turn closing all of its + // channels, and release its resources. This may be done asynchronously, in + // background. + virtual void close() = 0; + + // Wait for all resources to be released and all background activity to stop. + virtual void join() = 0; + + virtual ~Context() = default; + + private: + std::string name_; +}; + +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/error.h new file mode 100644 index 0000000000000000000000000000000000000000..8d4bbec479f33b122403cc35937e58dbf32ce755 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/error.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace channel { + +class ContextClosedError final : public BaseError { + public: + ContextClosedError() {} + + std::string what() const override; +}; + +class ChannelClosedError final : public BaseError { + public: + ChannelClosedError() {} + + std::string what() const override; +}; + +class ContextNotViableError final : public BaseError { + public: + ContextNotViableError() {} + + std::string what() const override; +}; + +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/mpt/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/mpt/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..d1c22a1598ec765ad4768a8393250bbbc73956e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/mpt/factory.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include +#include + +namespace tensorpipe { +namespace channel { +namespace mpt { + +std::shared_ptr create( + std::vector> contexts, + std::vector> listeners); + +} // namespace mpt +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/xth/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/xth/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..533c7676fdddf9884d891de9e2a009610af5ee09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/channel/xth/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace channel { +namespace xth { + +std::shared_ptr create(); + +} // namespace xth +} // namespace channel +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..f91cea36c4a09d29289cbb698aed343c13ce2373 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/buffer.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace tensorpipe { + +class Buffer { + class AbstractBufferWrapper { + public: + virtual Device device() const = 0; + virtual void copyConstructInto(void* ptr) const = 0; + virtual void moveConstructInto(void* ptr) = 0; + virtual ~AbstractBufferWrapper() = default; + }; + + template + class BufferWrapper : public AbstractBufferWrapper { + static_assert( + std::is_trivially_copyable::value, + "wrapping non-trivially copyable class"); + + public: + TBuffer buffer; + + explicit BufferWrapper(TBuffer buffer) : buffer(std::move(buffer)) {} + + Device device() const override { + return buffer.getDevice(); + } + + void copyConstructInto(void* ptr) const override { + new (ptr) BufferWrapper(*this); + } + + void moveConstructInto(void* ptr) override { + new (ptr) BufferWrapper(std::move(*this)); + } + }; + + public: + template + /* implicit */ Buffer(TBuffer b) { + static_assert( + sizeof(BufferWrapper) <= kStructSize, "kStructSize too small"); + static_assert( + alignof(BufferWrapper) <= kStructAlign, + "kStructAlign too small"); + new (&raw_) BufferWrapper(std::move(b)); + } + + Buffer() : Buffer(CpuBuffer{}) {} + + Buffer(const Buffer& other) { + other.ptr()->copyConstructInto(&raw_); + } + + Buffer& operator=(const Buffer& other) { + if (this != &other) { + ptr()->~AbstractBufferWrapper(); + other.ptr()->copyConstructInto(&raw_); + } + return *this; + } + + Buffer(Buffer&& other) noexcept { + other.ptr()->moveConstructInto(&raw_); + } + + Buffer& operator=(Buffer&& other) { + if (this != &other) { + ptr()->~AbstractBufferWrapper(); + other.ptr()->moveConstructInto(&raw_); + } + return *this; + } + + ~Buffer() { + ptr()->~AbstractBufferWrapper(); + } + + template + TBuffer& unwrap() { + BufferWrapper* wrapperPtr = + dynamic_cast*>(ptr()); + if (wrapperPtr == nullptr) { + throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer"); + } + return wrapperPtr->buffer; + } + + template + const TBuffer& unwrap() const { + const BufferWrapper* wrapperPtr = + dynamic_cast*>(ptr()); + if (wrapperPtr == nullptr) { + throw std::runtime_error("Invalid unwrapping of tensorpipe::Buffer"); + } + return wrapperPtr->buffer; + } + + Device device() const { + return ptr()->device(); + } + + private: + static constexpr int kStructSize = 32; + static constexpr int kStructAlign = 8; + std::aligned_storage::type raw_{}; + + const AbstractBufferWrapper* ptr() const { + // FIXME: Once we go C++17, use std::launder on the returned pointer. + return reinterpret_cast(&raw_); + } + + AbstractBufferWrapper* ptr() { + // FIXME: Once we go C++17, use std::launder on the returned pointer. + return reinterpret_cast(&raw_); + } +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..2b5fe49235374a40ac0a8a8e2dc6689664d52cb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cpu_buffer.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +namespace tensorpipe { + +struct CpuBuffer { + void* ptr{nullptr}; + + Device getDevice() const { + return Device{kCpuDeviceType, 0}; + } +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..e772304b3ffdd97a39026f018d40ed7877f39e60 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/cuda_buffer.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { + +struct CudaBuffer { + void* ptr{nullptr}; + cudaStream_t stream{cudaStreamDefault}; + + Device getDevice() const; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h new file mode 100644 index 0000000000000000000000000000000000000000..d7ff69bf2ac8b770c6329e4fb1107dabb5d630dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/device.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +namespace tensorpipe { + +const std::string kCpuDeviceType{"cpu"}; +const std::string kCudaDeviceType{"cuda"}; + +struct Device { + std::string type; + int index; + + // This pointless constructor is needed to work around a bug in GCC 5.5 (and + // possibly other versions). It appears to be needed in the nop types that + // are used inside nop::Optional. + Device() {} + + Device(std::string type, int index) : type(std::move(type)), index(index) {} + + std::string toString() const { + std::stringstream ss; + ss << type << ":" << index; + return ss.str(); + } + + bool operator==(const Device& other) const { + return type == other.type && index == other.index; + } +}; + +} // namespace tensorpipe + +namespace std { + +template <> +struct hash<::tensorpipe::Device> { + size_t operator()(const ::tensorpipe::Device& device) const noexcept { + return std::hash{}(device.toString()); + } +}; + +template <> +struct hash> { + size_t operator()(const std::pair<::tensorpipe::Device, ::tensorpipe::Device>& + p) const noexcept { + size_t h1 = std::hash<::tensorpipe::Device>{}(p.first); + size_t h2 = std::hash<::tensorpipe::Device>{}(p.second); + // Shifting one hash to avoid collisions between (a, b) and (b, a). + return h1 ^ (h2 << 1); + } +}; + +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h new file mode 100644 index 0000000000000000000000000000000000000000..7dc997cb2f594126fcd433169de992b4cb0aef78 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/error.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +namespace tensorpipe { + +// Base class for actual errors. +class BaseError { + public: + virtual ~BaseError() = default; + + // Returns an explanatory string. + // Like `std::exception` but returns a `std::string`. + virtual std::string what() const = 0; +}; + +// Wrapper class for errors. +// +// Background: we wish to not use exceptions yet need an error +// representation that can propagate across function and thread +// boundaries. This representation must be copyable (so we can store +// and return it at a later point in time) and retain downstream type +// information. This implies a heap allocation because it's the +// easiest way to deal with variable size objects (barring a union of +// all downstream error classes and a lot of custom code). Instead of +// passing a shared_ptr around directly, we use this wrapper class to +// keep implementation details hidden from calling code. +// +class Error final { + public: + // Constant instance that indicates success. + static const Error kSuccess; + + // Default constructor for error that is not an error. + Error() {} + + Error(std::shared_ptr error, std::string file, int line) + : error_(std::move(error)), file_(std::move(file)), line_(line) {} + + virtual ~Error() = default; + + // Converting to boolean means checking if there is an error. This + // means we don't need to use an `std::optional` and allows for a + // snippet like the following: + // + // if (error) { + // // Deal with it. + // } + // + operator bool() const { + return static_cast(error_); + } + + template + std::shared_ptr castToType() const { + return std::dynamic_pointer_cast(error_); + } + + template + bool isOfType() const { + return castToType() != nullptr; + } + + // Like `std::exception` but returns a `std::string`. + std::string what() const; + + private: + std::shared_ptr error_; + std::string file_; + int line_; +}; + +class SystemError final : public BaseError { + public: + explicit SystemError(const char* syscall, int error) + : syscall_(syscall), error_(error) {} + + std::string what() const override; + + int errorCode() const; + + private: + const char* syscall_; + const int error_; +}; + +class ShortReadError final : public BaseError { + public: + ShortReadError(ssize_t expected, ssize_t actual) + : expected_(expected), actual_(actual) {} + + std::string what() const override; + + private: + const ssize_t expected_; + const ssize_t actual_; +}; + +class ShortWriteError final : public BaseError { + public: + ShortWriteError(ssize_t expected, ssize_t actual) + : expected_(expected), actual_(actual) {} + + std::string what() const override; + + private: + const ssize_t expected_; + const ssize_t actual_; +}; + +class EOFError final : public BaseError { + public: + EOFError() {} + + std::string what() const override; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h new file mode 100644 index 0000000000000000000000000000000000000000..549849ab2fa621abdd05a945cc302c3d0e460a51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/common/optional.h @@ -0,0 +1,1020 @@ +// Copyright (C) 2011 - 2012 Andrzej Krzemienski. +// +// Use, modification, and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The idea and interface is based on Boost.Optional library +// authored by Fernando Luis Cacciola Carballal + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#define TR2_OPTIONAL_REQUIRES(...) \ + typename std::enable_if<__VA_ARGS__::value, bool>::type = false + +#if defined __GNUC__ // NOTE: GNUC is also defined for Clang +#if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8) +#define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_8_AND_HIGHER___ +#endif +# +#if (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7) +#define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_7_AND_HIGHER___ +#endif +# +#if (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) && (__GNUC_PATCHLEVEL__ >= 1) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#elif (__GNUC__ > 4) +#define TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#endif +#endif +# +#if defined __clang_major__ +#if (__clang_major__ == 3 && __clang_minor__ >= 5) +#define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#elif (__clang_major__ > 3) +#define TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#endif +#if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ +#define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_ +#elif ( \ + __clang_major__ == 3 && __clang_minor__ == 4 && __clang_patchlevel__ >= 2) +#define TR2_OPTIONAL_CLANG_3_4_2_AND_HIGHER_ +#endif +#endif +# +#if defined _MSC_VER +#if (_MSC_VER >= 1900) +#define TR2_OPTIONAL_MSVC_2015_AND_HIGHER___ +#endif +#endif + +#if defined __clang__ +#if (__clang_major__ > 2) || (__clang_major__ == 2) && (__clang_minor__ >= 9) +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#else +#define OPTIONAL_HAS_THIS_RVALUE_REFS 0 +#endif +#elif defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#elif defined TR2_OPTIONAL_MSVC_2015_AND_HIGHER___ +#define OPTIONAL_HAS_THIS_RVALUE_REFS 1 +#else +#define OPTIONAL_HAS_THIS_RVALUE_REFS 0 +#endif + +#if defined TR2_OPTIONAL_GCC_4_8_1_AND_HIGHER___ +#define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 1 +#define OPTIONAL_CONSTEXPR_INIT_LIST constexpr +#else +#define OPTIONAL_HAS_CONSTEXPR_INIT_LIST 0 +#define OPTIONAL_CONSTEXPR_INIT_LIST +#endif + +#if defined TR2_OPTIONAL_CLANG_3_5_AND_HIGHTER_ && (defined __cplusplus) && \ + (__cplusplus != 201103L) +#define OPTIONAL_HAS_MOVE_ACCESSORS 1 +#else +#define OPTIONAL_HAS_MOVE_ACCESSORS 0 +#endif + +#// In C++11 constexpr implies const, so we need to make non-const members also non-constexpr +#if (defined __cplusplus) && (__cplusplus == 201103L) +#define OPTIONAL_MUTABLE_CONSTEXPR +#else +#define OPTIONAL_MUTABLE_CONSTEXPR constexpr +#endif + +namespace tensorpipe { + +// 20.5.4, optional for object types +template +class optional; + +// 20.5.5, optional for lvalue reference types +template +class optional; + +// workaround: std utility functions aren't constexpr yet +template +inline constexpr T&& constexpr_forward( + typename std::remove_reference::type& t) noexcept { + return static_cast(t); +} + +template +inline constexpr T&& constexpr_forward( + typename std::remove_reference::type&& t) noexcept { + static_assert(!std::is_lvalue_reference::value, "!!"); + return static_cast(t); +} + +template +inline constexpr typename std::remove_reference::type&& constexpr_move( + T&& t) noexcept { + return static_cast::type&&>(t); +} + +#if defined NDEBUG +#define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) (EXPR) +#else +#define TR2_OPTIONAL_ASSERTED_EXPRESSION(CHECK, EXPR) \ + ((CHECK) ? (EXPR) : ([] { assert(!#CHECK); }(), (EXPR))) +#endif + +namespace detail_ { + +// static_addressof: a constexpr version of addressof +template +struct has_overloaded_addressof { + template + constexpr static bool has_overload(...) { + return false; + } + + template ().operator&())> + constexpr static bool has_overload(bool) { + return true; + } + + constexpr static bool value = has_overload(true); +}; + +template )> +constexpr T* static_addressof(T& ref) { + return &ref; +} + +template )> +T* static_addressof(T& ref) { + return std::addressof(ref); +} + +// the call to convert(b) has return type A and converts b to type A iff b +// decltype(b) is implicitly convertible to A +template +constexpr U convert(U v) { + return v; +} + +} // namespace detail_ + +constexpr struct trivial_init_t { +} trivial_init{}; + +// 20.5.6, In-place construction +constexpr struct in_place_t { +} in_place{}; + +// 20.5.7, Disengaged state indicator +struct nullopt_t { + struct init {}; + constexpr explicit nullopt_t(init) {} +}; +constexpr nullopt_t nullopt{nullopt_t::init()}; + +// 20.5.8, class bad_optional_access +class bad_optional_access : public std::logic_error { + public: + explicit bad_optional_access(const std::string& what_arg) + : logic_error{what_arg} {} + explicit bad_optional_access(const char* what_arg) : logic_error{what_arg} {} +}; + +template +union storage_t { + unsigned char dummy_; + T value_; + + constexpr storage_t(trivial_init_t) noexcept : dummy_(){}; + + template + constexpr storage_t(Args&&... args) + : value_(constexpr_forward(args)...) {} + + ~storage_t() {} +}; + +template +union constexpr_storage_t { + unsigned char dummy_; + T value_; + + constexpr constexpr_storage_t(trivial_init_t) noexcept : dummy_(){}; + + template + constexpr constexpr_storage_t(Args&&... args) + : value_(constexpr_forward(args)...) {} + + ~constexpr_storage_t() = default; +}; + +template +struct optional_base { + bool init_; + storage_t storage_; + + constexpr optional_base() noexcept : init_(false), storage_(trivial_init){}; + + explicit constexpr optional_base(const T& v) : init_(true), storage_(v) {} + + explicit constexpr optional_base(T&& v) + : init_(true), storage_(constexpr_move(v)) {} + + template + explicit optional_base(in_place_t, Args&&... args) + : init_(true), storage_(constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + explicit optional_base( + in_place_t, + std::initializer_list il, + Args&&... args) + : init_(true), storage_(il, std::forward(args)...) {} + + ~optional_base() { + if (init_) + storage_.value_.T::~T(); + } +}; + +template +struct constexpr_optional_base { + bool init_; + constexpr_storage_t storage_; + + constexpr constexpr_optional_base() noexcept + : init_(false), storage_(trivial_init){}; + + explicit constexpr constexpr_optional_base(const T& v) + : init_(true), storage_(v) {} + + explicit constexpr constexpr_optional_base(T&& v) + : init_(true), storage_(constexpr_move(v)) {} + + template + explicit constexpr constexpr_optional_base(in_place_t, Args&&... args) + : init_(true), storage_(constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + OPTIONAL_CONSTEXPR_INIT_LIST explicit constexpr_optional_base( + in_place_t, + std::initializer_list il, + Args&&... args) + : init_(true), storage_(il, std::forward(args)...) {} + + ~constexpr_optional_base() = default; +}; + +template +using OptionalBase = typename std::conditional< + std::is_trivially_destructible::value, // if possible + constexpr_optional_base::type>, // use base with trivial destructor + optional_base::type>>::type; + +template +class optional : private OptionalBase { + static_assert( + !std::is_same::type, nullopt_t>::value, + "bad T"); + static_assert( + !std::is_same::type, in_place_t>::value, + "bad T"); + + constexpr bool initialized() const noexcept { + return OptionalBase::init_; + } + typename std::remove_const::type* dataptr() { + return std::addressof(OptionalBase::storage_.value_); + } + constexpr const T* dataptr() const { + return detail_::static_addressof(OptionalBase::storage_.value_); + } + +#if OPTIONAL_HAS_THIS_RVALUE_REFS == 1 + constexpr const T& contained_val() const& { + return OptionalBase::storage_.value_; + } +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + OPTIONAL_MUTABLE_CONSTEXPR T&& contained_val() && { + return std::move(OptionalBase::storage_.value_); + } + OPTIONAL_MUTABLE_CONSTEXPR T& contained_val() & { + return OptionalBase::storage_.value_; + } +#else + T& contained_val() & { + return OptionalBase::storage_.value_; + } + T&& contained_val() && { + return std::move(OptionalBase::storage_.value_); + } +#endif +#else + constexpr const T& contained_val() const { + return OptionalBase::storage_.value_; + } + T& contained_val() { + return OptionalBase::storage_.value_; + } +#endif + + void clear() noexcept { + if (initialized()) + dataptr()->T::~T(); + OptionalBase::init_ = false; + } + + template + void initialize(Args&&... args) noexcept( + noexcept(T(std::forward(args)...))) { + assert(!OptionalBase::init_); + ::new (static_cast(dataptr())) T(std::forward(args)...); + OptionalBase::init_ = true; + } + + template + void initialize(std::initializer_list il, Args&&... args) noexcept( + noexcept(T(il, std::forward(args)...))) { + assert(!OptionalBase::init_); + ::new (static_cast(dataptr())) T(il, std::forward(args)...); + OptionalBase::init_ = true; + } + + public: + typedef T value_type; + + // 20.5.5.1, constructors + constexpr optional() noexcept : OptionalBase(){}; + constexpr optional(nullopt_t) noexcept : OptionalBase(){}; + + optional(const optional& rhs) : OptionalBase() { + if (rhs.initialized()) { + ::new (static_cast(dataptr())) T(*rhs); + OptionalBase::init_ = true; + } + } + + optional(optional&& rhs) noexcept( + std::is_nothrow_move_constructible::value) + : OptionalBase() { + if (rhs.initialized()) { + ::new (static_cast(dataptr())) T(std::move(*rhs)); + OptionalBase::init_ = true; + } + } + + constexpr optional(const T& v) : OptionalBase(v) {} + + constexpr optional(T&& v) : OptionalBase(constexpr_move(v)) {} + + template + explicit constexpr optional(in_place_t, Args&&... args) + : OptionalBase(in_place_t{}, constexpr_forward(args)...) {} + + template < + class U, + class... Args, + TR2_OPTIONAL_REQUIRES(std::is_constructible>)> + OPTIONAL_CONSTEXPR_INIT_LIST explicit optional( + in_place_t, + std::initializer_list il, + Args&&... args) + : OptionalBase(in_place_t{}, il, constexpr_forward(args)...) {} + + // 20.5.4.2, Destructor + ~optional() = default; + + // 20.5.4.3, assignment + optional& operator=(nullopt_t) noexcept { + clear(); + return *this; + } + + optional& operator=(const optional& rhs) { + if (initialized() == true && rhs.initialized() == false) + clear(); + else if (initialized() == false && rhs.initialized() == true) + initialize(*rhs); + else if (initialized() == true && rhs.initialized() == true) + contained_val() = *rhs; + return *this; + } + + optional& operator=(optional&& rhs) noexcept( + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value) { + if (initialized() == true && rhs.initialized() == false) + clear(); + else if (initialized() == false && rhs.initialized() == true) + initialize(std::move(*rhs)); + else if (initialized() == true && rhs.initialized() == true) + contained_val() = std::move(*rhs); + return *this; + } + + template + auto operator=(U&& v) -> typename std::enable_if< + std::is_same::type, T>::value, + optional&>::type { + if (initialized()) { + contained_val() = std::forward(v); + } else { + initialize(std::forward(v)); + } + return *this; + } + + template + void emplace(Args&&... args) { + clear(); + initialize(std::forward(args)...); + } + + template + void emplace(std::initializer_list il, Args&&... args) { + clear(); + initialize(il, std::forward(args)...); + } + + // 20.5.4.4, Swap + void swap(optional& rhs) noexcept( + std::is_nothrow_move_constructible::value&& noexcept( + std::swap(std::declval(), std::declval()))) { + if (initialized() == true && rhs.initialized() == false) { + rhs.initialize(std::move(**this)); + clear(); + } else if (initialized() == false && rhs.initialized() == true) { + initialize(std::move(*rhs)); + rhs.clear(); + } else if (initialized() == true && rhs.initialized() == true) { + using std::swap; + swap(**this, *rhs); + } + } + + // 20.5.4.5, Observers + + explicit constexpr operator bool() const noexcept { + return initialized(); + } + constexpr bool has_value() const noexcept { + return initialized(); + } + + constexpr T const* operator->() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), dataptr()); + } + +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + + OPTIONAL_MUTABLE_CONSTEXPR T* operator->() { + assert(initialized()); + return dataptr(); + } + + constexpr T const& operator*() const& { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T& operator*() & { + assert(initialized()); + return contained_val(); + } + + OPTIONAL_MUTABLE_CONSTEXPR T&& operator*() && { + assert(initialized()); + return constexpr_move(contained_val()); + } + + constexpr T const& value() const& { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T& value() & { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + OPTIONAL_MUTABLE_CONSTEXPR T&& value() && { + if (!initialized()) + throw bad_optional_access("bad optional access"); + return std::move(contained_val()); + } + +#else + + T* operator->() { + assert(initialized()); + return dataptr(); + } + + constexpr T const& operator*() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(initialized(), contained_val()); + } + + T& operator*() { + assert(initialized()); + return contained_val(); + } + + constexpr T const& value() const { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + + T& value() { + return initialized() + ? contained_val() + : (throw bad_optional_access("bad optional access"), contained_val()); + } + +#endif + +#if OPTIONAL_HAS_THIS_RVALUE_REFS == 1 + + template + constexpr T value_or(V&& v) const& { + return *this ? **this : detail_::convert(constexpr_forward(v)); + } + +#if OPTIONAL_HAS_MOVE_ACCESSORS == 1 + + template + OPTIONAL_MUTABLE_CONSTEXPR T value_or(V&& v) && { + return *this + ? constexpr_move(const_cast&>(*this).contained_val()) + : detail_::convert(constexpr_forward(v)); + } + +#else + + template + T value_or(V&& v) && { + return *this + ? constexpr_move(const_cast&>(*this).contained_val()) + : detail_::convert(constexpr_forward(v)); + } + +#endif + +#else + + template + constexpr T value_or(V&& v) const { + return *this ? **this : detail_::convert(constexpr_forward(v)); + } + +#endif + + // 20.6.3.6, modifiers + void reset() noexcept { + clear(); + } +}; + +template +class optional { + static_assert(!std::is_same::value, "bad T"); + static_assert(!std::is_same::value, "bad T"); + T* ref; + + public: + // 20.5.5.1, construction/destruction + constexpr optional() noexcept : ref(nullptr) {} + + constexpr optional(nullopt_t) noexcept : ref(nullptr) {} + + constexpr optional(T& v) noexcept : ref(detail_::static_addressof(v)) {} + + optional(T&&) = delete; + + constexpr optional(const optional& rhs) noexcept : ref(rhs.ref) {} + + explicit constexpr optional(in_place_t, T& v) noexcept + : ref(detail_::static_addressof(v)) {} + + explicit optional(in_place_t, T&&) = delete; + + ~optional() = default; + + // 20.5.5.2, mutation + optional& operator=(nullopt_t) noexcept { + ref = nullptr; + return *this; + } + + // optional& operator=(const optional& rhs) noexcept { + // ref = rhs.ref; + // return *this; + // } + + // optional& operator=(optional&& rhs) noexcept { + // ref = rhs.ref; + // return *this; + // } + + template + auto operator=(U&& rhs) noexcept -> typename std::enable_if< + std::is_same::type, optional>::value, + optional&>::type { + ref = rhs.ref; + return *this; + } + + template + auto operator=(U&& rhs) noexcept -> typename std::enable_if< + !std::is_same::type, optional>::value, + optional&>::type = delete; + + void emplace(T& v) noexcept { + ref = detail_::static_addressof(v); + } + + void emplace(T&&) = delete; + + void swap(optional& rhs) noexcept { + std::swap(ref, rhs.ref); + } + + // 20.5.5.3, observers + constexpr T* operator->() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, ref); + } + + constexpr T& operator*() const { + return TR2_OPTIONAL_ASSERTED_EXPRESSION(ref, *ref); + } + + constexpr T& value() const { + return ref ? *ref + : (throw bad_optional_access("bad optional access"), *ref); + } + + explicit constexpr operator bool() const noexcept { + return ref != nullptr; + } + + constexpr bool has_value() const noexcept { + return ref != nullptr; + } + + template + constexpr typename std::decay::type value_or(V&& v) const { + return *this ? **this + : detail_::convert::type>( + constexpr_forward(v)); + } + + // x.x.x.x, modifiers + void reset() noexcept { + ref = nullptr; + } +}; + +template +class optional { + static_assert(sizeof(T) == 0, "optional rvalue references disallowed"); +}; + +// 20.5.8, Relational operators +template +constexpr bool operator==(const optional& x, const optional& y) { + return bool(x) != bool(y) ? false : bool(x) == false ? true : *x == *y; +} + +template +constexpr bool operator!=(const optional& x, const optional& y) { + return !(x == y); +} + +template +constexpr bool operator<(const optional& x, const optional& y) { + return (!y) ? false : (!x) ? true : *x < *y; +} + +template +constexpr bool operator>(const optional& x, const optional& y) { + return (y < x); +} + +template +constexpr bool operator<=(const optional& x, const optional& y) { + return !(y < x); +} + +template +constexpr bool operator>=(const optional& x, const optional& y) { + return !(x < y); +} + +// 20.5.9, Comparison with nullopt +template +constexpr bool operator==(const optional& x, nullopt_t) noexcept { + return (!x); +} + +template +constexpr bool operator==(nullopt_t, const optional& x) noexcept { + return (!x); +} + +template +constexpr bool operator!=(const optional& x, nullopt_t) noexcept { + return bool(x); +} + +template +constexpr bool operator!=(nullopt_t, const optional& x) noexcept { + return bool(x); +} + +template +constexpr bool operator<(const optional&, nullopt_t) noexcept { + return false; +} + +template +constexpr bool operator<(nullopt_t, const optional& x) noexcept { + return bool(x); +} + +template +constexpr bool operator<=(const optional& x, nullopt_t) noexcept { + return (!x); +} + +template +constexpr bool operator<=(nullopt_t, const optional&) noexcept { + return true; +} + +template +constexpr bool operator>(const optional& x, nullopt_t) noexcept { + return bool(x); +} + +template +constexpr bool operator>(nullopt_t, const optional&) noexcept { + return false; +} + +template +constexpr bool operator>=(const optional&, nullopt_t) noexcept { + return true; +} + +template +constexpr bool operator>=(nullopt_t, const optional& x) noexcept { + return (!x); +} + +// 20.5.10, Comparison with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// Comparison of optional with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// Comparison of optional with T +template +constexpr bool operator==(const optional& x, const T& v) { + return bool(x) ? *x == v : false; +} + +template +constexpr bool operator==(const T& v, const optional& x) { + return bool(x) ? v == *x : false; +} + +template +constexpr bool operator!=(const optional& x, const T& v) { + return bool(x) ? *x != v : true; +} + +template +constexpr bool operator!=(const T& v, const optional& x) { + return bool(x) ? v != *x : true; +} + +template +constexpr bool operator<(const optional& x, const T& v) { + return bool(x) ? *x < v : true; +} + +template +constexpr bool operator>(const T& v, const optional& x) { + return bool(x) ? v > *x : true; +} + +template +constexpr bool operator>(const optional& x, const T& v) { + return bool(x) ? *x > v : false; +} + +template +constexpr bool operator<(const T& v, const optional& x) { + return bool(x) ? v < *x : false; +} + +template +constexpr bool operator>=(const optional& x, const T& v) { + return bool(x) ? *x >= v : false; +} + +template +constexpr bool operator<=(const T& v, const optional& x) { + return bool(x) ? v <= *x : false; +} + +template +constexpr bool operator<=(const optional& x, const T& v) { + return bool(x) ? *x <= v : true; +} + +template +constexpr bool operator>=(const T& v, const optional& x) { + return bool(x) ? v >= *x : true; +} + +// 20.5.12, Specialized algorithms +template +void swap(optional& x, optional& y) noexcept(noexcept(x.swap(y))) { + x.swap(y); +} + +template +constexpr optional::type> make_optional(T&& v) { + return optional::type>(constexpr_forward(v)); +} + +template +constexpr optional make_optional(std::reference_wrapper v) { + return optional(v.get()); +} + +} // namespace tensorpipe + +namespace std { +template +struct hash> { + typedef typename hash::result_type result_type; + typedef tensorpipe::optional argument_type; + + constexpr result_type operator()(argument_type const& arg) const { + return arg ? std::hash{}(*arg) : result_type{}; + } +}; + +template +struct hash> { + typedef typename hash::result_type result_type; + typedef tensorpipe::optional argument_type; + + constexpr result_type operator()(argument_type const& arg) const { + return arg ? std::hash{}(*arg) : result_type{}; + } +}; +} // namespace std + +#undef TR2_OPTIONAL_REQUIRES +#undef TR2_OPTIONAL_ASSERTED_EXPRESSION diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config.h new file mode 100644 index 0000000000000000000000000000000000000000..1274da5b0a3d0066246b8f88e0d0992e98f01021 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#define TENSORPIPE_HAS_SHM_TRANSPORT 1 +#define TENSORPIPE_HAS_IBV_TRANSPORT 1 + +#define TENSORPIPE_HAS_CMA_CHANNEL 1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config_cuda.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..03591662ece703a2cf751927edc8036db964de8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/config_cuda.h @@ -0,0 +1,12 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#define TENSORPIPE_HAS_CUDA_IPC_CHANNEL 1 +#define TENSORPIPE_HAS_CUDA_GDR_CHANNEL 1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h new file mode 100644 index 0000000000000000000000000000000000000000..16b795b50abc1e20254c4ce5ad2eb0c1da940078 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/context.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include + +namespace tensorpipe { + +class ContextImpl; +class Listener; +class Pipe; + +class ContextOptions { + public: + // The name should be a semantically meaningful description of this context. + // It will only be used for logging and debugging purposes, to identify the + // endpoints of a pipe. + ContextOptions&& name(std::string name) && { + name_ = std::move(name); + return std::move(*this); + } + + private: + std::string name_; + + friend ContextImpl; +}; + +class PipeOptions { + public: + // The name should be a semantically meaningful description of the context + // that the pipe is connecting to. It will only be used for logging and + // debugging purposes, to identify the endpoints of a pipe. + PipeOptions&& remoteName(std::string remoteName) && { + remoteName_ = std::move(remoteName); + return std::move(*this); + } + + private: + std::string remoteName_; + + friend ContextImpl; +}; + +class Context final { + public: + explicit Context(ContextOptions opts = ContextOptions()); + + void registerTransport( + int64_t priority, + std::string transport, + std::shared_ptr context); + + void registerChannel( + int64_t priority, + std::string channel, + std::shared_ptr context); + + std::shared_ptr listen(const std::vector& urls); + + std::shared_ptr connect( + const std::string& url, + PipeOptions opts = PipeOptions()); + + // Put the context in a terminal state, in turn closing all of its pipes and + // listeners, and release its resources. This may be done asynchronously, in + // background. + void close(); + + // Wait for all resources to be released and all background activity to stop. + void join(); + + ~Context(); + + private: + // The implementation is managed by a shared_ptr because each child object + // will also hold a shared_ptr to it. However, its lifetime is tied to the one + // of this public object since when the latter is destroyed the implementation + // is closed and joined. + const std::shared_ptr impl_; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h new file mode 100644 index 0000000000000000000000000000000000000000..54842e908ab0db832048e59ae4c9886a66eef469 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/error.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { + +class LogicError final : public BaseError { + public: + explicit LogicError(std::string reason) : reason_(std::move(reason)) {} + + std::string what() const override; + + private: + const std::string reason_; +}; + +class ContextClosedError final : public BaseError { + public: + explicit ContextClosedError() {} + + std::string what() const override; +}; + +class ListenerClosedError final : public BaseError { + public: + explicit ListenerClosedError() {} + + std::string what() const override; +}; + +class PipeClosedError final : public BaseError { + public: + explicit PipeClosedError() {} + + std::string what() const override; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h new file mode 100644 index 0000000000000000000000000000000000000000..d11e4af333e059bfa05e3a4968bd54a608833320 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/listener.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace tensorpipe { + +class ContextImpl; +class ListenerImpl; +class Pipe; + +// The listener. +// +// Listeners are used to produce pipes. Depending on the type of the +// context, listeners may use a variety of addresses to listen on. For +// example, for TCP/IP sockets they listen on an IPv4 or IPv6 address, +// for Unix domain sockets they listen on a path, etcetera. +// +// A pipe can only be accepted from this listener after it has been +// fully established. This means that both its connection and all its +// side channels have been established. +// +class Listener final { + // Use the passkey idiom to allow make_shared to call what should be a private + // constructor. See https://abseil.io/tips/134 for more information. + struct ConstructorToken {}; + + public: + Listener( + ConstructorToken token, + std::shared_ptr context, + std::string id, + const std::vector& urls); + + // + // Entry points for user code + // + + using accept_callback_fn = + std::function)>; + + void accept(accept_callback_fn fn); + + // Returns map with the materialized address of listeners by transport. + // + // If you don't bind a transport listener to a specific port or address, it + // may generate its address automatically. Then, in order to connect to the + // listener, the user must use a separate mechanism to communicate the + // materialized address to whoever wants to connect. + // + const std::map& addresses() const; + + // Returns materialized address for specific transport. + // + // See `addresses()` for more information. + // + const std::string& address(const std::string& transport) const; + + // Returns URL with materialized address for specific transport. + // + // See `addresses()` for more information. + // + std::string url(const std::string& transport) const; + + // Put the listener in a terminal state, aborting its pending operations and + // rejecting future ones, and release its resrouces. This may be carried out + // asynchronously, in background. Since the pipes may occasionally use the + // listener to open new connections, closing a listener may trigger errors + // in the pipes. + void close(); + + ~Listener(); + + private: + // Using a shared_ptr allows us to detach the lifetime of the implementation + // from the public object's one and perform the destruction asynchronously. + const std::shared_ptr impl_; + + // Allow context to access constructor token. + friend ContextImpl; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h new file mode 100644 index 0000000000000000000000000000000000000000..364962ebcae3ed521978f438d11d00459b49f8d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/message.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace tensorpipe { + +// Messages consist of a primary buffer and zero or more separate +// buffers. The primary buffer is always a host-side memory region that +// contains a serialized version of the message we're dealing with. This +// serialized message, in turn, may have references to the separate +// buffers that accompany the primary buffer. These separate buffers may +// point to any type of memory, host-side or device-side. +// +class Message final { + public: + std::string metadata; + + struct Payload { + void* data{nullptr}; + size_t length{0}; + + // Users may include arbitrary metadata in the following fields. + // This may contain allocation hints for the receiver, for example. + std::string metadata; + }; + + // Holds the payloads that are transferred over the primary connection. + std::vector payloads; + + struct Tensor { + tensorpipe::Buffer buffer; + size_t length{0}; + + // Users may optionally specify the target device, on which the receiver + // should allocate memory for this tensor. If left unset, the receiver will + // choose one at their convenience. + optional targetDevice; + + // Users may include arbitrary metadata in the following field. + // This may contain allocation hints for the receiver, for example. + std::string metadata; + }; + + // Holds the tensors that are offered to the side channels. + std::vector tensors; +}; + +// Descriptors consist of metadata required by the receiver to allocate memory +// for an incoming message. +class Descriptor final { + public: + std::string metadata; + + struct Payload { + size_t length{0}; + std::string metadata; + }; + std::vector payloads; + + struct Tensor { + size_t length{0}; + + // This is the sender-side device from which this tensor is being sent. + Device sourceDevice; + + // The sender may optionally specify a target device, in which case the + // receiver must allocate memory for this tensor on the specified device. + optional targetDevice; + + std::string metadata; + }; + std::vector tensors; +}; + +// Allocations consist of actual memory allocations provided by the receiver for +// an incoming message. They must match the length and target devices specified +// in the corresponding Descriptor. +class Allocation final { + public: + struct Payload { + void* data{nullptr}; + }; + std::vector payloads; + + struct Tensor { + tensorpipe::Buffer buffer; + }; + std::vector tensors; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h new file mode 100644 index 0000000000000000000000000000000000000000..be7ca2bfdf6c41a2b4c97854066cb39b73c94de5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/core/pipe.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace tensorpipe { + +class ContextImpl; +class ListenerImpl; +class PipeImpl; + +// The pipe. +// +// Pipes represent a set of connections between a pair of processes. +// Unlike POSIX pipes, they are message oriented instead of byte +// oriented. Messages that are sent through the pipe may use whatever +// channels are at their disposal to make it happen. If the pair of +// processes happen to be colocated on the same machine, they may +// leverage a region of shared memory to communicate the primary +// buffer of a message. Secondary buffers may use shared memory as +// well, if they're located in CPU memory, or use a CUDA device to +// device copy if they're located in NVIDIA GPU memory. If the pair is +// located across the world, they may simply use a set of TCP +// connections to communicate. +// +class Pipe final { + // Use the passkey idiom to allow make_shared to call what should be a private + // constructor. See https://abseil.io/tips/134 for more information. + struct ConstructorToken {}; + + public: + // + // Initialization + // + + Pipe( + ConstructorToken token, + std::shared_ptr context, + std::string id, + std::string remoteName, + const std::string& url); + + Pipe(ConstructorToken token, std::shared_ptr impl); + + // + // Entry points for user code + // + + using read_descriptor_callback_fn = + std::function; + + void readDescriptor(read_descriptor_callback_fn fn); + + using read_callback_fn = std::function; + + void read(Allocation allocation, read_callback_fn fn); + + using write_callback_fn = std::function; + + void write(Message message, write_callback_fn fn); + + // Retrieve the user-defined name that was given to the constructor of the + // context on the remote side, if any (if not, this will be the empty string). + // This is intended to help in logging and debugging only. + const std::string& getRemoteName(); + + // Put the pipe in a terminal state, aborting its pending operations and + // rejecting future ones, and release its resrouces. This may be carried out + // asynchronously, in background. + void close(); + + ~Pipe(); + + private: + // Using a shared_ptr allows us to detach the lifetime of the implementation + // from the public object's one and perform the destruction asynchronously. + const std::shared_ptr impl_; + + // Allow context to access constructor token. + friend ContextImpl; + // Allow listener to access constructor token. + friend ListenerImpl; +}; + +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h new file mode 100644 index 0000000000000000000000000000000000000000..9344a2e480b24bd77b8c108b8ef1adb1404af616 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/context.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +namespace tensorpipe { +namespace transport { + +class Connection; +class Listener; + +class Context { + public: + virtual std::shared_ptr connect(std::string addr) = 0; + + virtual std::shared_ptr listen(std::string addr) = 0; + + // Return whether the context is able to operate correctly. + // + // Some transport types may be unable to perform as intended under + // some circumstances (e.g., specialized hardware unavailable, lack + // of permissions). They can report it through this method in order + // for the core context to avoid registering them in the first place. + // + virtual bool isViable() const = 0; + + // Return string to describe the domain for this context. + // + // Two processes with a context of the same type can connect to each + // other if one side's domain descriptor is "accepted" by the other + // one, using the canCommunicateWithRemote method below. That method + // must be symmetric, and unless overridden defaults to string + // comparison. + // + // For example, for a transport that leverages TCP/IP, this may be + // as simple as the address family (assuming we can route between + // any two processes). For a transport that leverages shared memory, + // this descriptor must uniquely identify the machine, such that + // only co-located processes generate the same domain descriptor. + // + virtual const std::string& domainDescriptor() const = 0; + + // Compare local and remote domain descriptor for compatibility. + // + // Determine whether a connection can be opened between this context + // and a remote one that has the given domain descriptor. This + // function needs to be symmetric: if we called this method on the + // remote context with the local descriptor we should get the same + // answer. Unless overridden it defaults to string comparison. + // + virtual bool canCommunicateWithRemote( + const std::string& remoteDomainDescriptor) const { + return domainDescriptor() == remoteDomainDescriptor; + } + + // Tell the context what its identifier is. + // + // This is only supposed to be called from the high-level context or from + // channel contexts. It will only used for logging and debugging purposes. + virtual void setId(std::string id) = 0; + + virtual void close() = 0; + + virtual void join() = 0; + + virtual ~Context() = default; +}; + +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h new file mode 100644 index 0000000000000000000000000000000000000000..6ba124bbeca26608fb6983cb1c7bde7cee79d202 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/error.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { + +class ContextClosedError final : public BaseError { + public: + ContextClosedError() {} + + std::string what() const override; +}; + +class ListenerClosedError final : public BaseError { + public: + ListenerClosedError() {} + + std::string what() const override; +}; + +class ConnectionClosedError final : public BaseError { + public: + ConnectionClosedError() {} + + std::string what() const override; +}; + +class ContextNotViableError final : public BaseError { + public: + ContextNotViableError() {} + + std::string what() const override; +}; + +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h new file mode 100644 index 0000000000000000000000000000000000000000..8b690ff6cbd127ca22011bde8672e2392f3ccad4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/error.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +class IbvError final : public BaseError { + public: + explicit IbvError(std::string error) : error_(error) {} + + std::string what() const override; + + private: + std::string error_; +}; + +class GetaddrinfoError final : public BaseError { + public: + explicit GetaddrinfoError(int error) : error_(error) {} + + std::string what() const override; + + private: + int error_; +}; + +class NoAddrFoundError final : public BaseError { + public: + NoAddrFoundError() {} + + std::string what() const override; +}; + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..17372df56aabb476d48262ad6ad29909eb6e51c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +std::shared_ptr create(); + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h new file mode 100644 index 0000000000000000000000000000000000000000..3cffa2f42b3f56ebc38677f6d781a38b81cc9704 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/ibv/utility.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace ibv { + +std::tuple lookupAddrForIface(std::string iface); + +std::tuple lookupAddrForHostname(); + +} // namespace ibv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..ae5e6db0a18adef56f21d747dd2fa28e0f6d47b6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/shm/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace shm { + +std::shared_ptr create(); + +} // namespace shm +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h new file mode 100644 index 0000000000000000000000000000000000000000..19333e2e8647ad9758e92094c63f25ba8ee60177 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/error.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +class UVError final : public BaseError { + public: + explicit UVError(int error) : error_(error) {} + + std::string what() const override; + + private: + int error_; +}; + +class NoAddrFoundError final : public BaseError { + public: + NoAddrFoundError() {} + + std::string what() const override; +}; + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..364a5b61015cc1a994078b63461ebcd71a874e21 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/factory.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +std::shared_ptr create(); + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h new file mode 100644 index 0000000000000000000000000000000000000000..0a4afba0c995086738446a954dde27c29ea8031d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/tensorpipe/transport/uv/utility.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +namespace tensorpipe { +namespace transport { +namespace uv { + +std::tuple lookupAddrForIface(std::string iface); + +std::tuple lookupAddrForHostname(); + +// Try to replicate the same logic used by NCCL to find a node's own address. +// Roughly, it returns the "first" usable address it can find, and prioritizes +// the interfaces with an `ib` prefix and de-prioritizes those with a `docker` +// or `lo` prefix. It can optionally only return only IPv4 or IPv4 addresses. +std::tuple lookupAddrLikeNccl( + optional familyFilter = nullopt); + +} // namespace uv +} // namespace transport +} // namespace tensorpipe diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h new file mode 100644 index 0000000000000000000000000000000000000000..58327bae896d61d5ecb287e32bdd91cacaf73c45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h @@ -0,0 +1,144 @@ +#pragma once +#ifdef USE_CUDA +#include +#include +#include +#include +#include +#include +#include +namespace torch { + +TORCH_CUDA_CU_API bool CudaIPCCollect(); + +struct CudaIPCReceivedData final { + CudaIPCReceivedData() = default; + explicit CudaIPCReceivedData(std::shared_ptr shared_ptr) + : shared_ptr_(std::move(shared_ptr)) {} + std::shared_ptr shared_ptr_; +}; + +struct CudaIPCSentData final { + std::string handle_; + uint64_t offset_; + uint64_t* counter_ptr_; // Reference counter shared memory block + at::DataPtr original_ptr_; // Original mem allocation + cudaEvent_t event_; // Sync cuEventDestroy + bool event_sync_required_; + at::Device device_; + + CudaIPCSentData( + std::string handle, + uint64_t offset, + uint64_t* counter_ptr, + at::Device device); + ~CudaIPCSentData(); + + uint64_t counter_value(); + std::string handle() { + return handle_; + } + uint64_t offset() { + return offset_; + } + void set_original_ptr(at::DataPtr data_ptr) { + original_ptr_ = std::move(data_ptr); + } +}; + +TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData( + void* data, + at::Device device); + +namespace { + +inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000; +inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000; +// This was determined empirically that CUDA (v10.1 and below) have the limit +// on the number of recorded blocking interprocess events. It is around ~22,000. +// And to give us leeway, we picked 1000 as it gives us enough events to share +// tensors effectively. +inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000; + +// All to be deleted data blocks with non zero reference counter goes there +struct CudaIPCSentDataLimbo final { + ~CudaIPCSentDataLimbo(); + bool collect(); + void add(std::unique_ptr shared_block); + uint64_t size(); + + private: + // TODO: Can be changed to FIFO in order to avoid full traverse on every + // collect() + std::vector> shared_blocks_; + std::mutex limbo_mutex_; +}; + +struct CudaIPCRefCountersFile final { + CudaIPCRefCountersFile( + std::string handle, + uint64_t size, + at::DataPtr data_ptr) + : next_offset_(0), + size_(size), + used_slots_(0), + handle_(std::move(handle)), + refcounted_shared_mem_(std::move(data_ptr)) {} + + uint64_t* counter_ptr() { + return static_cast(refcounted_shared_mem_.get()) + next_offset_; + } + + void set_counter(uint64_t value) { + *counter_ptr() = value; + } + + bool have_offsets() { + return next_offset_ < size_; + } + + bool offsets_in_use() { + return used_slots_; + } + + uint64_t get_offset() { + return next_offset_; + } + + void rotate_offset() { + next_offset_++; + used_slots_++; + } + + void return_offset(uint64_t offset /* unused */) { + used_slots_--; + } + + std::string handle() { + return handle_; + } + + private: + uint64_t next_offset_; + uint64_t size_; + uint64_t used_slots_; + std::string handle_; + at::DataPtr refcounted_shared_mem_; +}; + +} // namespace +} // namespace torch + +namespace c10 { +namespace { +class CudaIPCCollectCallback : public FreeMemoryCallback { + public: + bool Execute() override { + return torch::CudaIPCCollect(); + } +}; +} // namespace + +} // namespace c10 + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..3bafd470f1f7be827de490aa9c086441c38aa32f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h @@ -0,0 +1,6 @@ +#pragma once + +#include + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays) +extern PyMethodDef DataLoaderMethods[]; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..665c38bf035d45eafc0575f76a49cacfb9169371 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct TORCH_API THPDevice { + PyObject_HEAD at::Device device; +}; + +TORCH_API extern PyTypeObject THPDeviceType; + +inline bool THPDevice_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDeviceType; +} + +TORCH_API PyObject* THPDevice_New(const at::Device& device); + +TORCH_API void THPDevice_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h new file mode 100644 index 0000000000000000000000000000000000000000..70cee858f9f1076cab2179581d220035fe7005e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +const int DTYPE_NAME_LEN = 64; + +struct TORCH_API THPDtype { + PyObject_HEAD at::ScalarType scalar_type; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[DTYPE_NAME_LEN + 1]; +}; + +TORCH_API extern PyTypeObject THPDtypeType; + +inline bool THPDtype_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDtypeType; +} + +inline bool THPPythonScalarType_Check(PyObject* obj) { + return obj == (PyObject*)(&PyFloat_Type) || + obj == (PyObject*)(&PyBool_Type) || obj == (PyObject*)(&PyLong_Type); +} + +TORCH_API PyObject* THPDtype_New( + at::ScalarType scalar_type, + const std::string& name); + +void THPDtype_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h new file mode 100644 index 0000000000000000000000000000000000000000..1fd0a9d418fb3f7231e02432429eb51749bbed9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h @@ -0,0 +1,36 @@ +#pragma once + +// Provides conversions between Python tensor objects and at::Tensor. + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct THPDtype; +struct THPLayout; + +namespace c10 { +struct Storage; +} + +namespace torch { +void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType); +void registerLayoutObject(THPLayout* thp_layout, at::Layout layout); + +TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage); +at::Storage createStorage(PyObject* obj); +std::tuple createStorageGetType( + PyObject* obj); +bool isStorage(PyObject* obj); + +TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType); +THPLayout* getTHPLayout(at::Layout layout); +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..d2ae5f3f8490589a58f012eb70aec4e39aa2832f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h @@ -0,0 +1,414 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(USE_DISTRIBUTED) && defined(USE_C10D) +#include +#endif + +static inline void PyErr_SetString(PyObject* type, const std::string& message) { + PyErr_SetString(type, message.c_str()); +} +/// NOTE [ Conversion Cpp Python Warning ] +/// The warning handler cannot set python warnings immediately +/// as it requires acquiring the GIL (potential deadlock) +/// and would need to cleanly exit if the warning raised a +/// python error. To solve this, we buffer the warnings and +/// process them when we go back to python. +/// This requires the two try/catch blocks below to handle the +/// following cases: +/// - If there is no Error raised in the inner try/catch, the +/// buffered warnings are processed as python warnings. +/// - If they don't raise an error, the function process with the +/// original return code. +/// - If any of them raise an error, the error is set (PyErr_*) and +/// the destructor will raise a cpp exception python_error() that +/// will be caught by the outer try/catch that will be able to change +/// the return value of the function to reflect the error. +/// - If an Error was raised in the inner try/catch, the inner try/catch +/// must set the python error. The buffered warnings are then +/// processed as cpp warnings as we cannot predict before hand +/// whether a python warning will raise an error or not and we +/// cannot handle two errors at the same time. +/// This advanced handler will only be used in the current thread. +/// If any other thread is used, warnings will be processed as +/// cpp warnings. +#define HANDLE_TH_ERRORS \ + try { \ + torch::PyWarningHandler __enforce_warning_buffer; \ + try { +#define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \ + catch (const c10::ErrorType& e) { \ + auto msg = torch::get_cpp_stacktraces_enabled() \ + ? e.what() \ + : e.what_without_backtrace(); \ + PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \ + retstmnt; \ + } + +// Only catch torch-specific exceptions +#define CATCH_CORE_ERRORS(retstmnt) \ + catch (python_error & e) { \ + e.restore(); \ + retstmnt; \ + } \ + catch (py::error_already_set & e) { \ + e.restore(); \ + retstmnt; \ + } \ + _CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \ + _CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \ + _CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + NotImplementedError, PyExc_NotImplementedError, retstmnt) \ + _CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistBackendError, THPException_DistBackendError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistNetworkError, THPException_DistNetworkError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \ + _CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \ + catch (torch::PyTorchError & e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(e.python_type(), msg); \ + retstmnt; \ + } + +#define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt) + +#define CATCH_ALL_ERRORS(retstmnt) \ + CATCH_TH_ERRORS(retstmnt) \ + catch (const std::exception& e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(PyExc_RuntimeError, msg); \ + retstmnt; \ + } + +#define END_HANDLE_TH_ERRORS_PYBIND \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (py::error_already_set & e) { \ + throw; \ + } \ + catch (py::builtin_exception & e) { \ + throw; \ + } \ + catch (torch::jit::JITException & e) { \ + throw; \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + throw py::error_already_set(); \ + } + +#define END_HANDLE_TH_ERRORS_RET(retval) \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + return retval; \ + } + +#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr) + +extern PyObject *THPException_FatalError, *THPException_LinAlgError, + *THPException_OutOfMemoryError, *THPException_DistError, + *THPException_DistBackendError, *THPException_DistNetworkError, + *THPException_DistStoreError; + +// Throwing this exception means that the python error flags have been already +// set and control should be immediately returned to the interpreter. +struct python_error : public std::exception { + python_error() : type(nullptr), value(nullptr), traceback(nullptr) {} + + python_error(const python_error& other) + : type(other.type), + value(other.value), + traceback(other.traceback), + message(other.message) { + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + } + + python_error(python_error&& other) noexcept + : type(other.type), + value(other.value), + traceback(other.traceback), + message(std::move(other.message)) { + other.type = nullptr; + other.value = nullptr; + other.traceback = nullptr; + } + + ~python_error() override { + if (type || value || traceback) { + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + } + + const char* what() const noexcept override { + return message.c_str(); + } + + void build_message() { + // Ensure we have the GIL. + pybind11::gil_scoped_acquire gil; + + // No errors should be set when we enter the function since PyErr_Fetch + // clears the error indicator. + TORCH_INTERNAL_ASSERT(!PyErr_Occurred()); + + // Default message. + message = "python_error"; + + // Try to retrieve the error message from the value. + if (value != nullptr) { + // Reference count should not be zero. + TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0); + + PyObject* pyStr = PyObject_Str(value); + if (pyStr != nullptr) { + PyObject* encodedString = + PyUnicode_AsEncodedString(pyStr, "utf-8", "strict"); + if (encodedString != nullptr) { + char* bytes = PyBytes_AS_STRING(encodedString); + if (bytes != nullptr) { + // Set the message. + message = std::string(bytes); + } + Py_XDECREF(encodedString); + } + Py_XDECREF(pyStr); + } + } + + // Clear any errors since we don't want to propagate errors for functions + // that are trying to build a string for the error message. + PyErr_Clear(); + } + + /** Saves the exception so that it can be re-thrown on a different thread */ + inline void persist() { + if (type) + return; // Don't overwrite exceptions + // PyErr_Fetch overwrites the pointers + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + PyErr_Fetch(&type, &value, &traceback); + build_message(); + } + + /** Sets the current Python error from this exception */ + inline void restore() { + if (!type) + return; + // PyErr_Restore steals references + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + PyErr_Restore(type, value, traceback); + } + + PyObject* type; + PyObject* value; + PyObject* traceback; + + // Message to return to the user when 'what()' is invoked. + std::string message; +}; + +bool THPException_init(PyObject* module); + +namespace torch { + +// Set python current exception from a C++ exception +TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&); + +TORCH_PYTHON_API std::string processErrorMsg(std::string str); + +// Abstract base class for exceptions which translate to specific Python types +struct PyTorchError : public std::exception { + PyTorchError() = default; + PyTorchError(std::string msg_) : msg(std::move(msg_)) {} + virtual PyObject* python_type() = 0; + const char* what() const noexcept override { + return msg.c_str(); + } + std::string msg; +}; + +// Declare a printf-like function on gcc & clang +// The compiler can then warn on invalid format specifiers +#ifdef __GNUC__ +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \ + __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX))) +#else +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) +#endif + +// Translates to Python IndexError +struct IndexError : public PyTorchError { + using PyTorchError::PyTorchError; + IndexError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_IndexError; + } +}; + +// Translates to Python TypeError +struct TypeError : public PyTorchError { + using PyTorchError::PyTorchError; + TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_TypeError; + } +}; + +// Translates to Python ValueError +struct ValueError : public PyTorchError { + using PyTorchError::PyTorchError; + TORCH_PYTHON_API ValueError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_ValueError; + } +}; + +// Translates to Python NotImplementedError +struct NotImplementedError : public PyTorchError { + NotImplementedError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + NotImplementedError() = default; + PyObject* python_type() override { + return PyExc_NotImplementedError; + } +}; + +// Translates to Python AttributeError +struct AttributeError : public PyTorchError { + AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_AttributeError; + } +}; + +// Translates to Python LinAlgError +struct LinAlgError : public PyTorchError { + LinAlgError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return THPException_LinAlgError; + } +}; + +// ATen warning handler for Python +struct PyWarningHandler { + // Move actual handler into a separate class with a noexcept + // destructor. Otherwise, we need to force all WarningHandler + // subclasses to have a noexcept(false) destructor. + struct InternalHandler : at::WarningHandler { + ~InternalHandler() override = default; + void process(const c10::Warning& warning) override; + + std::vector warning_buffer_; + }; + + public: + /// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification + TORCH_PYTHON_API PyWarningHandler() noexcept(true); + // NOLINTNEXTLINE(bugprone-exception-escape) + TORCH_PYTHON_API ~PyWarningHandler() noexcept(false); + + /** Call if an exception has been thrown + + * Necessary to determine if it is safe to throw from the desctructor since + * std::uncaught_exception is buggy on some platforms and generally + * unreliable across dynamic library calls. + */ + void set_in_exception() { + in_exception_ = true; + } + + private: + InternalHandler internal_handler_; + at::WarningHandler* prev_handler_; + bool in_exception_; +}; + +namespace detail { +template +using Arg = typename invoke_traits::template arg::type; + +template +auto wrap_pybind_function_impl_( + Func&& f, + std::index_sequence, + bool release_gil) { + using result_type = typename invoke_traits::result_type; + namespace py = pybind11; + + // f=f is needed to handle function references on older compilers + return [f = std::forward(f), + release_gil](Arg... args) -> result_type { + HANDLE_TH_ERRORS + if (release_gil) { + py::gil_scoped_release no_gil; + return c10::guts::invoke(f, std::forward>(args)...); + } else { + return c10::guts::invoke(f, std::forward>(args)...); + } + END_HANDLE_TH_ERRORS_PYBIND + }; +} +} // namespace detail + +// Wrap a function with TH error and warning handling. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), std::make_index_sequence{}, false); +} + +// Wrap a function with TH error, warning handling and releases the GIL. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function_no_gil(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), std::make_index_sequence{}, true); +} + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..4bcd910f1279534c63b507beb75063953b6d5de3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +#ifdef THP_BUILD_MAIN_LIB +#define TORCH_PYTHON_API C10_EXPORT +#else +#define TORCH_PYTHON_API C10_IMPORT +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..f5b7b4661eb5851ac77a6dc25192b65a6e125b0a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPGenerator { + PyObject_HEAD at::Generator cdata; +}; + +// Creates a new Python object wrapping the default at::Generator. The reference +// is borrowed. The caller should ensure that the at::Generator object lifetime +// last at least as long as the Python wrapper. +TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator( + at::Generator cdata); + +#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass) + +TORCH_PYTHON_API extern PyObject* THPGeneratorClass; + +bool THPGenerator_init(PyObject* module); + +TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen); + +// Creates a new Python object for a Generator. The Generator must not already +// have a PyObject* associated with it. +PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h new file mode 100644 index 0000000000000000000000000000000000000000..265582e0ddfaea8f997dc15ccc20fa2800db54b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +#include + +const int LAYOUT_NAME_LEN = 64; + +struct THPLayout { + PyObject_HEAD at::Layout layout; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[LAYOUT_NAME_LEN + 1]; +}; + +extern PyTypeObject THPLayoutType; + +inline bool THPLayout_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPLayoutType; +} + +PyObject* THPLayout_New(at::Layout layout, const std::string& name); + +void THPLayout_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..7f60a0ba0282c39cb8c72876a4288560ec280b93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +#include + +#include + +const int MEMORY_FORMAT_NAME_LEN = 64; + +struct THPMemoryFormat { + PyObject_HEAD at::MemoryFormat memory_format; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[MEMORY_FORMAT_NAME_LEN + 1]; +}; + +extern PyTypeObject THPMemoryFormatType; + +inline bool THPMemoryFormat_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPMemoryFormatType; +} + +PyObject* THPMemoryFormat_New( + at::MemoryFormat memory_format, + const std::string& name); + +void THPMemoryFormat_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h new file mode 100644 index 0000000000000000000000000000000000000000..71ff8c4fcb85e2c9e55fb4c0660ef506b6fda6e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h @@ -0,0 +1,6 @@ +#ifndef THP_MODULE_INC +#define THP_MODULE_INC + +#define THP_STATELESS_ATTRIBUTE_NAME "_torch" + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..30809ff10be90e2d091002ce4c2abb8e731b8d0b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h @@ -0,0 +1,7 @@ +#pragma once + +#include +#include + +TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter(); +TORCH_PYTHON_API bool isMainPyInterpreter(); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb75304c0ed0bf885a058e6f08d0cc8fe23ec3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +#include + +constexpr int QSCHEME_NAME_LEN = 64; + +struct THPQScheme { + PyObject_HEAD at::QScheme qscheme; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[QSCHEME_NAME_LEN + 1]; +}; + +extern PyTypeObject THPQSchemeType; + +inline bool THPQScheme_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPQSchemeType; +} + +PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name); + +void THPQScheme_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h new file mode 100644 index 0000000000000000000000000000000000000000..dd4283f7d77234cbd3dac815456981b22a4dad00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +extern PyTypeObject THPSizeType; + +#define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType) + +PyObject* THPSize_New(const torch::autograd::Variable& t); +PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes); +PyObject* THPSize_NewFromSymSizes(const at::Tensor& t); + +void THPSize_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..fa58768df3203f3efbbc31af739bf21dd061fdc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h @@ -0,0 +1,56 @@ +#ifndef THP_STORAGE_INC +#define THP_STORAGE_INC + +#include + +#define THPStorageStr "torch.UntypedStorage" + +struct THPStorage { + PyObject_HEAD; + c10::MaybeOwned cdata; + bool is_hermetic; +}; + +TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage); +TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage( + PyTypeObject* type, + c10::Storage _storage, + c10::impl::PyInterpreterStatus status, + bool allow_preexisting_pyobj = false); +extern PyTypeObject* THPStorageClass; + +static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) { + return tp == THPStorageClass; +} + +static inline bool THPStorage_CheckExact(PyObject* obj) { + return THPStorage_CheckTypeExact(Py_TYPE(obj)); +} + +inline bool THPStorage_Check(PyObject* obj) { + if (!THPStorageClass) + return false; + + const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass); + if (result == -1) + throw python_error(); + return result; +} + +bool THPStorage_init(PyObject* module); +void THPStorage_postInit(PyObject* module); + +void THPStorage_assertNotNull(THPStorage* storage); +void THPStorage_assertNotNull(PyObject* obj); + +extern PyTypeObject THPStorageType; + +inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) { + return *storage->cdata; +} + +inline const c10::Storage& THPStorage_Unpack(PyObject* obj) { + return THPStorage_Unpack(reinterpret_cast(obj)); +} + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h new file mode 100644 index 0000000000000000000000000000000000000000..bd0825fa30142ba7101510765b1b230142ab4f0c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_METHODS_INC +#define THP_STORAGE_METHODS_INC + +#include + +PyMethodDef* THPStorage_getMethods(); + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h new file mode 100644 index 0000000000000000000000000000000000000000..803abf1832f000084c8e55cf147c51fbc511a0cc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_SHARING_INC +#define THP_STORAGE_SHARING_INC + +#include + +PyMethodDef* THPStorage_getSharingMethods(); + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..91f1abe0516ce5555a8460e6fca232bd518e8ad0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h @@ -0,0 +1,23 @@ +#ifndef THP_STREAM_INC +#define THP_STREAM_INC + +#include +#include +#include + +struct THPStream { + PyObject_HEAD int64_t stream_id; + int64_t device_type; + int64_t device_index; +}; +extern TORCH_API PyTypeObject* THPStreamClass; + +void THPStream_init(PyObject* module); + +inline bool THPStream_Check(PyObject* obj) { + return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass); +} + +PyObject* THPStream_Wrap(const c10::Stream& stream); + +#endif // THP_STREAM_INC diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h new file mode 100644 index 0000000000000000000000000000000000000000..23512f1bce424865d82fc47f8af58740845b8ff4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h @@ -0,0 +1,19 @@ +#pragma once + +#define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y) +#define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y + +#define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z) +#define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z + +#define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w) +#define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w + +#define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y) +#define TH_CONCAT_2_EXPAND(x, y) x##y + +#define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z) +#define TH_CONCAT_3_EXPAND(x, y, z) x##y##z + +#define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w +#define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h new file mode 100644 index 0000000000000000000000000000000000000000..88d8489ba7b8f9dac36837cfa0816c6fd0375b4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h @@ -0,0 +1,30 @@ +#ifndef THP_H +#define THP_H + +#include +#include + +// Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/ +// define PyInt_* macros for Python 3.x. NB: We must include Python.h first, +// otherwise we'll incorrectly conclude PyInt_Check isn't defined! +#ifndef PyInt_Check +#define PyInt_Check PyLong_Check +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_Type PyLong_Type +#endif + +#include +#include +#include +#include +#include +#include +#include // This requires defined Storage and Tensor types +#include + +#include + +#include + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h new file mode 100644 index 0000000000000000000000000000000000000000..97d12e4eea5c6bbea483a1e2ebfd1a1ed7065411 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include + +struct THPDTypeInfo { + PyObject_HEAD at::ScalarType type; +}; + +struct THPFInfo : THPDTypeInfo {}; + +struct THPIInfo : THPDTypeInfo {}; + +extern PyTypeObject THPFInfoType; +extern PyTypeObject THPIInfoType; + +inline bool THPFInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPFInfoType; +} + +inline bool THPIInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPIInfoType; +} + +void THPDTypeInfo_init(PyObject* module); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h new file mode 100644 index 0000000000000000000000000000000000000000..01a20cb01dff68de245f3c8b16ca1914beb76a1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h @@ -0,0 +1,13 @@ +#ifndef THP_TYPES_INC +#define THP_TYPES_INC + +#include + +#ifndef INT64_MAX +#include +#endif + +template +struct THPTypeInfo {}; + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..200b46a23f6395c92144f7357faf8de14aeff42a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +typedef std::function THPCopyFunction; +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPCopyInfo { + PyTypeObject* srcType; // Python type of src tensor/storage + THPCopyFunction copy; // copy function + bool non_blocking; // true if copy implements an 'non_blocking' copy + bool broadcast; // true if the copy implements a broadcast copy +}; +typedef std::vector THPCopyList; + +inline bool tryTHPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + for (auto& i : v) { + if (i.non_blocking == non_blocking && + PyType_IsSubtype(Py_TYPE(src), i.srcType)) { + (i.copy)(dst, src, broadcast); + return true; + } + } + return false; +} + +inline bool THPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (tryTHPCopy(v, dst, src, non_blocking, broadcast)) { + return true; + } else if (non_blocking && tryTHPCopy(v, dst, src, false, broadcast)) { + return true; + } + THPUtils_setError( + "copy from %s to %s isn't implemented", + THPUtils_typename(src), + THPUtils_typename(dst)); + return false; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..a24fcb1a6991e7405bf1cd935073ba0de84f307b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h @@ -0,0 +1,14 @@ +#ifndef PROFILER_ITT_H +#define PROFILER_ITT_H +#include + +namespace torch { +namespace profiler { +TORCH_API bool itt_is_available(); +TORCH_API void itt_range_push(const char* msg); +TORCH_API void itt_range_pop(); +TORCH_API void itt_mark(const char* msg); +} // namespace profiler +} // namespace torch + +#endif // PROFILER_ITT_H diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a0fa303c7c566d72a45306d4f063bd52accadd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h @@ -0,0 +1,128 @@ +#pragma once +#include +#include +#include +#include +#include + +// `TorchScript` offers a simple logging facility that can enabled by setting an +// environment variable `PYTORCH_JIT_LOG_LEVEL`. + +// Logging is enabled on a per file basis. To enable logging in +// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be +// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination` +// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`). + +// Multiple files can be logged by separating each file name with a colon `:` as +// in the following example, +// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination` + +// There are 3 logging levels available for your use ordered by the detail level +// from lowest to highest. + +// * `GRAPH_DUMP` should be used for printing entire graphs after optimization +// passes +// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e. +// node deletion, constant folding, etc) +// * `GRAPH_DEBUG` should be used for providing information useful for debugging +// the internals of a particular optimization pass or analysis + +// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP` +// statements will be enabled when one specifies a file(s) in +// `PYTORCH_JIT_LOG_LEVEL`. + +// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in +// `>alias_analysis`. +// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in +// `>>alias_analysis`. +// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there +// is no logging level that is higher than `GRAPH_DEBUG`. + +namespace torch { +namespace jit { + +struct Node; +struct Graph; + +enum class JitLoggingLevels { + GRAPH_DUMP = 0, + GRAPH_UPDATE, + GRAPH_DEBUG, +}; + +TORCH_API std::string get_jit_logging_levels(); + +TORCH_API void set_jit_logging_levels(std::string level); + +TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream); + +TORCH_API std::ostream& get_jit_logging_output_stream(); + +TORCH_API std::string getHeader(const Node* node); + +TORCH_API std::string log_function(const std::shared_ptr& graph); + +TORCH_API ::torch::jit::JitLoggingLevels jit_log_level(); + +// Prefix every line in a multiline string \p IN_STR with \p PREFIX. +TORCH_API std::string jit_log_prefix( + const std::string& prefix, + const std::string& in_str); + +TORCH_API std::string jit_log_prefix( + ::torch::jit::JitLoggingLevels level, + const char* fn, + int l, + const std::string& in_str); + +TORCH_API bool is_enabled( + const char* cfname, + ::torch::jit::JitLoggingLevels level); + +TORCH_API std::ostream& operator<<( + std::ostream& out, + ::torch::jit::JitLoggingLevels level); + +#define JIT_LOG(level, ...) \ + if (is_enabled(__FILE__, level)) { \ + ::torch::jit::get_jit_logging_output_stream() \ + << ::torch::jit::jit_log_prefix( \ + level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \ + } + +// tries to reconstruct original python source +#define SOURCE_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \ + MSG, \ + "\n", \ + ::torch::jit::log_function(G)); +// use GRAPH_DUMP for dumping graphs after optimization passes +#define GRAPH_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString()); +// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion, +// constant folding, CSE) +#define GRAPH_UPDATE(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__); +// use GRAPH_DEBUG to provide information useful for debugging a particular opt +// pass +#define GRAPH_DEBUG(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__); +// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script +#define GRAPH_EXPORT(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \ + MSG, \ + "\n\n", \ + (G)->toString(), \ + ""); + +#define GRAPH_DUMP_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP)) +#define GRAPH_UPDATE_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE)) +#define GRAPH_DEBUG_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG)) +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h new file mode 100644 index 0000000000000000000000000000000000000000..a5bb535c9c6fe708bbbf51625182a725425f1dc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h @@ -0,0 +1,39 @@ +#pragma once +#include +#include +#include + +// `TorchScript` offers a simple optimization limit checker +// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`. +// The purpose is to limit how many optimization you can make per pass. +// This is useful for debugging any passes. + +// Opt limit checker is enabled on a per file basis (hence per pass). For +// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set +// to `constant_propagation=` or, simply, to +// `constant_propagation=` where is the number of +// optimizations you want to make for the pass. (i.e. +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation="`). + +// Multiple files can be configured by separating each file name with a colon +// `:` as in the following example, +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=:dead_code_elimination="` + +// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if +// we haven't reached the optimization limit yet. Otherwise, it will return +// false. Typical usage: + +// if (!JIT_OPT_ALLOWED) { +// GRAPH_DUMP(...); //supplied from jit_log +// return; +// } + +namespace torch { +namespace jit { + +TORCH_API bool opt_limit(const char* pass_name); + +#define JIT_OPT_ALLOWED opt_limit(__FILE__) + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..48c689b6cbfbd7bfb29e352b142e7b7baa8b1484 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace torch { +namespace jit { + +class ResourceGuard { + std::function _destructor; + bool _released; + + public: + ResourceGuard(std::function destructor) + : _destructor(std::move(destructor)), _released(false) {} + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~ResourceGuard() { + if (!_released) + _destructor(); + } + + void release() { + _released = true; + } +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..d45863399f5c3fbf34a518f4530d0f15af4ac1c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h @@ -0,0 +1,406 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { +class HasRand : public IRVisitor { + public: + HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) { + stmt_->accept(this); + } + + bool has_rand() const { + return has_rand_; + } + + private: + void visit(IntrinsicsPtr v) override { + if (v->op_type() == IntrinsicsOp::kRand) { + has_rand_ = true; + } else { + IRVisitor::visit(std::move(v)); + } + } + StmtPtr stmt_; + bool has_rand_ = false; +}; + +template +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class NodeFinder : public IRVisitor { + public: + void visit(NodePtr v) override { + nodes.push_back((NodePtr)v); + IRVisitor::visit(v); + } + + static std::vector> find(StmtPtr s) { + NodeFinder nf; + s->accept(&nf); + return nf.nodes; + } + + static std::vector> find(ExprPtr e) { + NodeFinder nf; + e->accept(&nf); + return nf.nodes; + } + + std::vector> nodes; +}; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class VarFinder : public IRVisitor { + public: + void visit(VarPtr v) override { + vars_.insert(v); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + VarFinder nf; + s->accept(&nf); + return nf.vars(); + } + + static std::unordered_set find(ExprPtr e) { + VarFinder nf; + e->accept(&nf); + return nf.vars(); + } + + const std::unordered_set& vars() { + return vars_; + } + + private: + std::unordered_set vars_; +}; + +class BufFinder : public IRVisitor { + public: + void visit(BufPtr v) override { + bufs_.insert(v); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + BufFinder nf; + s->accept(&nf); + return nf.bufs(); + } + + static std::unordered_set find(ExprPtr e) { + BufFinder nf; + e->accept(&nf); + return nf.bufs(); + } + + const std::unordered_set& bufs() { + return bufs_; + } + + private: + std::unordered_set bufs_; +}; + +// Finds all kinds of write operations to the provided Buf. +class WritesToBuf : public IRVisitor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + WritesToBuf(BufPtr target) : target_(std::move(target)) {} + + std::vector writes() { + return writes_; + } + + static std::vector find(StmtPtr s, BufPtr b) { + WritesToBuf finder(std::move(b)); + s->accept(&finder); + return finder.writes(); + } + + private: + void visit(StorePtr v) override { + if (v->buf() == target_) { + writes_.push_back(v); + } + } + + void visit(AtomicAddPtr v) override { + if (v->buf() == target_) { + writes_.push_back(v); + } + } + + BufPtr target_; + std::vector writes_; +}; + +class StmtsReadingBuf : public IRVisitor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {} + + std::vector reads() { + return reads_; + } + + static std::vector find(StmtPtr s, BufPtr b) { + StmtsReadingBuf finder(std::move(b)); + s->accept(&finder); + return finder.reads(); + } + + private: + bool readsBuffer(StmtPtr s) { + auto loads = NodeFinder::find(std::move(s)); + for (const auto& l : loads) { + if (l->buf() == target_) { + return true; + } + } + return false; + } + + void visit(StorePtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(LetPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(CondPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(AtomicAddPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + BufPtr target_; + std::vector reads_; +}; + +class ExternalAllocBufFinder : public IRVisitor { + public: + void visit(ExternalCallWithAllocPtr v) override { + const auto& bufs_out = v->buf_out_args(); + bufs_.insert(bufs_out.begin(), bufs_out.end()); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + ExternalAllocBufFinder f; + s->accept(&f); + return f.bufs(); + } + + static std::unordered_set find(ExprPtr e) { + ExternalAllocBufFinder f; + e->accept(&f); + return f.bufs(); + } + + const std::unordered_set& bufs() { + return bufs_; + } + + private: + std::unordered_set bufs_; +}; + +// Traverses the IR to determine if a particular Var is modified within it. +class ModifiesVarChecker : public IRVisitor { + public: + ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {} + + static bool check(StmtPtr s, VarPtr v) { + ModifiesVarChecker checker(std::move(v)); + s->accept(&checker); + return checker.found(); + } + + bool found() { + return found_; + } + + private: + void visit(StorePtr v) override { + if (v->buf()->base_handle() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(AtomicAddPtr v) override { + if (v->buf()->base_handle() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(LetPtr v) override { + if (v->var() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(ForPtr v) override { + if (v->var() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + VarPtr var_; + bool found_{false}; +}; + +// Traverse the Block stmt to identify the live range of the specified buf. The +// live range, indicated by a pair of integers, specifies the first and last +// stmt in block stmts that access to the buf. +class BufLiveRange : public IRVisitor { + public: + BufLiveRange(BufPtr b) : buf_(std::move(b)) {} + + static std::tuple liveRange(StmtPtr s, BufPtr b) { + BlockPtr block = to(std::move(s)); + // We Only analyze buffer live ranges for block stmts. + if (!block) { + return std::make_tuple(0, 0); + } + + BufLiveRange analyzer(std::move(b)); + block->accept(&analyzer); + return analyzer.getLiveRange(); + } + + private: + std::tuple getLiveRange() { + return std::make_tuple(begin_, end_); + } + + bool hasBufReads(StmtPtr s) { + auto loads1 = NodeFinder::find(s); + for (const auto& l : loads1) { + if (l->buf() == buf_) { + return true; + } + } + auto loads2 = NodeFinder::find(s); + for (const auto& l : loads2) { + for (const auto& lb : l->buf_args()) { + if (lb == buf_) { + return true; + } + } + } + auto loads3 = NodeFinder::find(std::move(s)); + for (const auto& l : loads3) { + for (const auto& lb : l->buf_args()) { + if (lb == buf_) { + return true; + } + } + } + return false; + } + + bool hasBufWrites(StmtPtr s) { + auto writes1 = NodeFinder::find(s); + for (const auto& w : writes1) { + if (w->buf() == buf_) { + return true; + } + } + auto writes2 = NodeFinder::find(s); + for (const auto& w : writes2) { + if (w->buf() == buf_) { + return true; + } + } + auto writes3 = NodeFinder::find(std::move(s)); + for (const auto& w : writes3) { + for (const auto& wb : w->buf_out_args()) { + if (wb == buf_) { + return true; + } + } + } + return false; + } + + void findAccAndUpdateLiveRange(StmtPtr s) { + bool has_reads = hasBufReads(s), has_writes = hasBufWrites(std::move(s)); + if (has_reads || has_writes) { + if (begin_ == -1) { + begin_ = curr_index_; + }; + end_ = curr_index_; + } + } + + void visit(BlockPtr v) override { + for (const StmtPtr& s : *v) { + curr_index_ += 1; + findAccAndUpdateLiveRange(s); + } + } + + BufPtr buf_; + int32_t begin_ = -1; + int32_t end_ = -1; + int32_t curr_index_ = -1; +}; + +// A class that analyzes the given program relevant for Block backend +// It creates a map of multi dim buffers and their flat versions +class CreateBufferMap : public IRVisitor { + public: + const std::unordered_map& getBufferMap() const { + return map_input_to_tensor_bufs_; + } + + private: + void visit(StorePtr v) override { + auto load_node = to(v->value()); + if (load_node) { + auto t_buf = load_node->buf(); + map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf()); + } else { + auto add_node = to(v->value()); + auto mul_node = to(v->value()); + // This means for now, v->value() can be Add or Mul + TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage()); + map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf()); + } + v->value()->accept(this); + } + std::unordered_map map_input_to_tensor_bufs_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..22de1ce32d00fb896a532e23b9ff6bd014cd646d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h @@ -0,0 +1,295 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// A class that analyzes the given program relevant for Cuda backends. +class CudaAnalysis : public IRVisitor { + public: + CudaAnalysis() { + gpu_block_extents_ = {alloc(1), alloc(1), alloc(1)}; + gpu_thread_extents_ = { + alloc(1), alloc(1), alloc(1)}; + } + bool is_buf_store_target(BufPtr buf) const { + return store_targets_.count(buf) > 0; + } + + const std::unordered_set& thread_local_bufs() const { + return thread_local_bufs_; + } + + const std::unordered_set& cross_block_bufs() const { + return cross_block_bufs_; + } + + const std::vector& gpu_block_extents() const { + return gpu_block_extents_; + } + + const std::vector& gpu_thread_extents() const { + return gpu_thread_extents_; + } + + private: + void visit(StorePtr v) override { + store_targets_.insert(v->buf()); + } + + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(PlacementAllocatePtr v) override; + void visit(ForPtr v) override; + + std::unordered_set store_targets_; + std::unordered_set thread_local_bufs_; + std::unordered_set cross_block_bufs_; + + std::vector gpu_block_extents_; + std::vector gpu_thread_extents_; +}; + +// An IRMutator that replaces binding loop options with Cuda metavars, and masks +// statements blocks which should execute with less reach than the launch +// parameter extent. +// +// We do this by segmenting each block into chunks which should have the same +// execution parameters, then if those params differ from the max mask each dim. +class GPUMetaVarRewriter : public IRMutator { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis) + : cuda_analysis_(cuda_analysis) { + gpu_block_vars_ = { + alloc("blockIdx.x", kInt), + alloc("blockIdx.y", kInt), + alloc("blockIdx.z", kInt)}; + gpu_thread_vars_ = { + alloc("threadIdx.x", kInt), + alloc("threadIdx.y", kInt), + alloc("threadIdx.z", kInt)}; + + current_block_reach_ = { + alloc(1), alloc(1), alloc(1)}; + current_thread_reach_ = { + alloc(1), alloc(1), alloc(1)}; + } + + StmtPtr mutate(ForPtr v) override; + StmtPtr mutate(BlockPtr v) override; + + const std::vector& gpu_block_vars() const { + return gpu_block_vars_; + } + + const std::vector& gpu_thread_vars() const { + return gpu_thread_vars_; + } + + const std::vector& gpu_block_extents() const { + return cuda_analysis_->gpu_block_extents(); + } + + const std::vector& gpu_thread_extents() const { + return cuda_analysis_->gpu_thread_extents(); + } + + private: + // When processing a block, stores the contents of each sub-segment. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + class Segment { + public: + void reset(bool mask) { + stmts_.clear(); + mask_ = mask; + } + + bool empty() const { + return stmts_.empty(); + } + + std::vector& stmts() { + return stmts_; + } + bool mask() { + return mask_; + } + + private: + std::vector stmts_; + bool mask_{true}; + }; + + // Returns true if the current execution scope is equivalent to the launch + // parameters. + bool isFullExtent(); + + std::vector gpu_block_vars_; + std::vector gpu_thread_vars_; + + std::vector current_block_reach_; + std::vector current_thread_reach_; + + const CudaAnalysis* cuda_analysis_; +}; + +// A class that overrides the underlying IRPrinter to produce Cuda C. +class CudaPrinter : public IRPrinter { + public: + explicit CudaPrinter( + std::ostream* os, + const CudaAnalysis* cuda_analysis, + bool has_random) + : IRPrinter(*os), cuda_analysis_(cuda_analysis) { + if (has_random) { + rand_func_ = alloc("rand", kHandle); + } + } + + void visit(CastPtr v) override; + void visit(IntrinsicsPtr v) override; + void visit(ForPtr v) override; + + void visit(LoadPtr v) override; + void visit(StorePtr v) override; + void visit(AtomicAddPtr v) override; + void visit(MaxPtr v) override; + void visit(MinPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(BlockPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(LetPtr v) override; + + void visit(ExternalCallPtr v) override; + + VarPtr rand_func() const { + return rand_func_; + } + + std::string dtypeToCppString(const Dtype& dtype) override; + + using IRPrinter::name_manager; + using IRPrinter::visit; + + private: + VarPtr rand_func_; + const CudaAnalysis* cuda_analysis_; + + void print_flat_alloc(AllocatePtr alloc); +}; + +// Construct Cuda C from the buffer and tensor input, and invoke the kernel +// when real arguments are provided. +class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CudaCodeGen(StmtPtr stmt, Ts... ts) + : CodeGen( + stmt, + std::vector({BufferArg(ts)...}), + at::Device(at::kCUDA, at::cuda::current_device())) { + Initialize(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CudaCodeGen( + StmtPtr stmt, + const std::vector& buffer_args, + at::Device device = at::Device(at::kCUDA, at::cuda::current_device()), + const std::string& kernel_func_name = "func") + : CodeGen(stmt, buffer_args, device, kernel_func_name) { + Initialize(); + } + + ~CudaCodeGen() override; + + void call(const std::vector& args) override; + void call_raw(const std::vector& args) override; + void call_with_numel(void** args, int64_t numel) override; + + template + void operator()(const Ts&... ts) { + call(std::vector({CallArg(ts)...})); + } + + at::Tensor empty_strided( + c10::IntArrayRef size, + c10::IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt) override; + + const std::vector& gpu_block_extents() const { + return cuda_analysis_->gpu_block_extents(); + } + + const std::vector& gpu_thread_extents() const { + return cuda_analysis_->gpu_thread_extents(); + } + + std::string getCodeText(const std::string& attr = "") override { + return oss_.str(); + } + + private: + void Initialize(); + + void CompileToNVRTC(const std::string& code, const std::string& func_name); + + UniqueNameManager* name_manager() { + if (!printer_) { + throw std::runtime_error("Null IRPrinter is not expected"); + } + return printer_->name_manager(); + } + + std::ostream& os() { + return printer_->os(); + } + + std::ostringstream oss_; + std::unique_ptr printer_; + std::unique_ptr cuda_analysis_; + std::unique_ptr metavar_rewriter_; + std::unordered_set taken_func_names; + std::mutex eval_lock_; + CUfunction function_; + bool has_random_ = false; + int thread_block_size_ = -1; + + std::vector arg_pos_in_extents_; +#ifdef TORCH_ENABLE_LLVM + std::vector> block_extents_eval_; + std::vector> thread_extents_eval_; +#else + std::vector> block_extents_eval_; + std::vector> thread_extents_eval_; +#endif + + std::string GetUniqueFuncName(const std::string& func_prefix); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h new file mode 100644 index 0000000000000000000000000000000000000000..1a0cc57875d19978b0fe249dab83b8038b9dce9b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h @@ -0,0 +1,499 @@ +/** + * This file implements the core classes for Tensor Expressions. + * + * The structure of the expressions is inspired by Halide/TVM IR. + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +enum IRNodeType { + kPrimitive, + kAdd, + kSub, + kMul, + kDiv, + kMod, + kMax, + kMin, + kAnd, + kOr, + kLshift, + kRshift, + kXor, + kCompareSelect, + kCast, + kBitCast, + kOther, +}; + +// The common base between all expression node. +class TORCH_API Expr : public std::enable_shared_from_this { + public: + explicit Expr(Dtype dtype, IRNodeType expr_type = kOther) + : dtype_(dtype), expr_type_(expr_type) {} + virtual ~Expr() = default; + Dtype dtype() const { + return dtype_; + } + virtual void accept(IRVisitor* visitor) = 0; + virtual ExprPtr accept_mutator(IRMutator* mutator) = 0; + + IRNodeType expr_type() const { + return expr_type_; + } + // Is this a fixed (constant) immediate value. + virtual bool isConstant() const { + return false; + } + + void set_dtype(Dtype dtype) { + dtype_ = dtype; + } + + /* + * Make a deep copy of the given expression. + * + * All sub-expressions inside the given expressions are also cloned. Note + * that the variables are not deep-copied since they are immutable. + */ + static ExprPtr clone(ExprPtr s); + + protected: + std::shared_ptr getptr() { + return shared_from_this(); + } + + private: + Dtype dtype_; + IRNodeType expr_type_; +}; + +// A CRTP pattern to accept visitors for children class, +// and dispatch back to the children. +template +class ExprNode : public Base { + public: + using ExprNodeBase = ExprNode; + void accept(IRVisitor* visitor) override { + visitor->visit(static_to(Base::getptr())); + } + ExprPtr accept_mutator(IRMutator* mutator) override; + // pass the constructor to the base class + using Base::Base; +}; + +// A wrapper object to the underlying ExprNode. +// Also serves the primary way to build and operate on other expressions. +class TORCH_API ExprHandle { + public: + ExprHandle() = default; + explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {} + + ExprPtr node() { + return base_expr_node_; + } + + ExprPtr node() const { + return base_expr_node_; + } + + bool empty() const { + return base_expr_node_ == nullptr; + } + +#define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v); + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE); +#undef IMM_EXPR_DECLARE + + template + NodePtr AsNode() { + return to(this->node()); + } + + template + NodePtr AsNode() const { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + return const_cast(this)->AsNode(); + } + + Dtype dtype() const { + return node()->dtype(); + } + + // Handling the math operators. + ExprHandle operator+(const ExprHandle& other) const; + ExprHandle operator-(const ExprHandle& other) const; + ExprHandle operator*(const ExprHandle& other) const; + ExprHandle operator/(const ExprHandle& other) const; + ExprHandle operator%(const ExprHandle& other) const; + ExprHandle operator==(const ExprHandle& other) const; + ExprHandle operator!=(const ExprHandle& other) const; + ExprHandle operator>(const ExprHandle& other) const; + ExprHandle operator>=(const ExprHandle& other) const; + ExprHandle operator<(const ExprHandle& other) const; + ExprHandle operator<=(const ExprHandle& other) const; + ExprHandle operator&(const ExprHandle& other) const; + ExprHandle operator|(const ExprHandle& other) const; + ExprHandle operator&&(const ExprHandle& other) const; + ExprHandle operator||(const ExprHandle& other) const; + ExprHandle operator^(const ExprHandle& other) const; + ExprHandle operator<<(const ExprHandle& other) const; + ExprHandle operator>>(const ExprHandle& other) const; + + private: + ExprPtr base_expr_node_ = nullptr; +}; + +// The underlying representation node to a Var. +// Currently, each Var object represents a unique variable, even though the +// names might be the same. We should consider add a unique_name as well. +class TORCH_API Var : public ExprNode { + public: + static ExprHandle make(const std::string& name_hint, Dtype dtype) { + return ExprHandle(alloc(name_hint, dtype)); + } + static ExprHandle make(Dtype dtype) { + return ExprHandle(alloc("", dtype)); + } + + // TODO: unique_name + const std::string& name_hint() const { + return name_hint_; + } + + void set_name_hint(const std::string& name) { + name_hint_ = name; + } + + void set_name_hint(std::string&& name) { + name_hint_ = std::move(name); + } + + Var(std::string name_hint, Dtype dtype) + : ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {} + + private: + std::string name_hint_; +}; + +TORCH_API std::vector make_contiguous_strides( + const std::vector& dims); +TORCH_API std::vector make_channels_last_strides( + const std::vector& dims); + +class TORCH_API Buf : public ExprNode { + public: + static BufHandle make(const std::vector& dims, Dtype dtype); + + static BufHandle make( + const std::string& name_hint, + const std::vector& dims, + const std::vector& strides, + Dtype dtype); + + static BufHandle make( + const std::string& name_hint, + const std::vector& dims, + Dtype dtype, + c10::optional initializer = c10::nullopt, + c10::optional> strides = c10::nullopt, + c10::optional qscale = c10::nullopt, + c10::optional qzero = c10::nullopt); + + // TODO: unique_name + VarPtr base_handle() const { + return base_handle_; + } + void set_base_handle(VarPtr base_handle) { + base_handle_ = std::move(base_handle); + } + + const std::string& name_hint() const { + return base_handle_->name_hint(); + } + void set_name_hint(const std::string& name_hint) { + base_handle_->set_name_hint(name_hint); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Buf(const std::string& name_hint, + const std::vector& dims, + Dtype dtype, + ExprPtr initializer = nullptr, + c10::optional> strides = c10::nullopt, + ExprPtr qscale = nullptr, + ExprPtr qzero = nullptr) + : Buf(alloc(name_hint, kHandle), + dims, + dtype, + std::move(initializer), + std::move(strides), + std::move(qscale), + std::move(qzero)) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Buf(VarPtr var, + std::vector dims, + Dtype dtype, + ExprPtr initializer = nullptr, + c10::optional> strides = c10::nullopt, + ExprPtr qscale = nullptr, + ExprPtr qzero = nullptr); + + size_t ndim() const { + return dims_.size(); + } + ExprPtr dim(size_t index) const { + if (index >= ndim()) { + throw out_of_range_index(); + } + return dims_[index]; + } + std::vector dims() const { + return dims_; + } + void set_dims(std::vector dims) { + dims_ = std::move(dims); + } + + std::vector strides() const { + return strides_; + } + + void set_strides(std::vector strides) { + strides_ = std::move(strides); + } + + ExprPtr initializer() const { + return initializer_; + }; + + ExprPtr qzero() const { + return qzero_; + } + + ExprPtr qscale() const { + return qscale_; + } + + void set_qzero(ExprPtr qzero) { + qzero_ = std::move(qzero); + } + + void set_qscale(ExprPtr qscale) { + qscale_ = std::move(qscale); + } + + bool hasConstantDims() const { + for (const auto& d : dims_) { + if (!d->isConstant()) { + return false; + } + } + return true; + } + + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const; + + // The channels-last 1d can benefit the performance of some operators like + // conv1d. But the MemoryFormat enum has not covered this layout yet. Hence, + // we abstract a dedicated function to check channels-last 1d contiguous. + // + // Channels-last 1d: + // dims: n c l + // strides(nlc): c*l 1 c + bool is_channels_last_1d_contiguous() const { + if (dims_.size() != 3) { + return false; + } + return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2); + } + + private: + bool is_cont_with(int cur_dim, int adjacent_dim) const; + bool is_stride_one(int cur_dim) const; + + VarPtr base_handle_; + std::vector dims_; + std::vector strides_; + ExprPtr initializer_; + // qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8 + ExprPtr qscale_; + ExprPtr qzero_; +}; + +class TORCH_API BufHandle : public ExprHandle { + public: + BufHandle( + const std::string& name_hint, + const std::vector& dims, + Dtype dtype) + : ExprHandle(Buf::make(name_hint, dims, dtype)) {} + + BufHandle( + const std::string& name_hint, + const std::vector& dims, + const std::vector& strides, + Dtype dtype) + : ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {} + + BufHandle(const std::vector& dims, Dtype dtype) + : ExprHandle(Buf::make("_", dims, dtype)) {} + + explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {} + + explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {} + BufPtr node() const { + return static_to(ExprHandle::node()); + } + BufPtr node() { + return static_to(ExprHandle::node()); + } + + template + inline ExprHandle load(const Ts&... ts) const; + + template + inline ExprHandle load(const std::vector& args) const; + + inline ExprHandle load(const std::vector& args) const; + + StorePtr store(const std::vector& args, const ExprHandle& val) + const; + + bool operator==(const BufHandle& other) const { + return this->node() == other.node(); + } + bool operator!=(const BufHandle& other) const { + return !(*this == other); + } + + const std::string& name_hint() const { + return this->node()->name_hint(); + } + + bool empty() const { + return (this->node() == nullptr); + } + + size_t ndim() const { + return node()->ndim(); + } + + std::vector dims() const; + + ExprHandle dim(size_t index) const { + return ExprHandle(node()->dim(index)); + } + + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const { + return node()->is_contiguous(memory_format); + } + + bool is_channels_last_1d_contiguous() const { + return node()->is_channels_last_1d_contiguous(); + } +}; + +// An expression to construct the underlying variable node. +// Note: do not store any info here, since it is often possible to slice this +// object. For example: VarHandle x('x'); ExprHandle x2 = x; +class TORCH_API VarHandle : public ExprHandle { + public: + // Creates an empty VarHandle whose base Var is set to nullptr. + VarHandle() : ExprHandle() {} + + explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {} + + VarHandle(const std::string& name_hint, Dtype dtype) + : ExprHandle(Var::make(name_hint, dtype)) {} + + explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {} + + VarPtr node() const { + return static_to(ExprHandle::node()); + } + bool operator==(const VarHandle& other) const { + return this->node() == other.node(); + } + bool operator!=(const VarHandle& other) const { + return !(*this == other); + } + + const std::string& name_hint() const { + return this->node()->name_hint(); + } + bool empty() const { + return (this->node() == nullptr); + } +}; + +template +ExprPtr ExprNode::accept_mutator(IRMutator* mutator) { + return mutator->mutate(static_to(Base::getptr())); +} + +inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) { + return expr1.AsNode() == expr2.AsNode(); +} + +TORCH_API ExprHandle sin(const ExprHandle& v); +TORCH_API ExprHandle cos(const ExprHandle& v); +TORCH_API ExprHandle tan(const ExprHandle& v); +TORCH_API ExprHandle asin(const ExprHandle& v); +TORCH_API ExprHandle acos(const ExprHandle& v); +TORCH_API ExprHandle atan(const ExprHandle& v); +TORCH_API ExprHandle sinh(const ExprHandle& v); +TORCH_API ExprHandle cosh(const ExprHandle& v); +TORCH_API ExprHandle tanh(const ExprHandle& v); +TORCH_API ExprHandle sigmoid(const ExprHandle& v); +TORCH_API ExprHandle exp(const ExprHandle& v); +TORCH_API ExprHandle expm1(const ExprHandle& v); +TORCH_API ExprHandle abs(const ExprHandle& v); +TORCH_API ExprHandle log(const ExprHandle& v); +TORCH_API ExprHandle fast_tanh(const ExprHandle& v); +TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v); +TORCH_API ExprHandle fast_log(const ExprHandle& v); +TORCH_API ExprHandle log_vml(const ExprHandle& v); +TORCH_API ExprHandle log2(const ExprHandle& v); +TORCH_API ExprHandle log10(const ExprHandle& v); +TORCH_API ExprHandle log1p(const ExprHandle& v); +TORCH_API ExprHandle erf(const ExprHandle& v); +TORCH_API ExprHandle erfc(const ExprHandle& v); +TORCH_API ExprHandle sqrt(const ExprHandle& v); +TORCH_API ExprHandle rsqrt(const ExprHandle& v); +TORCH_API ExprHandle ceil(const ExprHandle& v); +TORCH_API ExprHandle floor(const ExprHandle& v); +TORCH_API ExprHandle round(const ExprHandle& v); +TORCH_API ExprHandle trunc(const ExprHandle& v); +TORCH_API ExprHandle frac(const ExprHandle& v); +TORCH_API ExprHandle lgamma(const ExprHandle& v); +TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle isnan(const ExprHandle& v1); +TORCH_API ExprHandle Relu(const ExprHandle& v1); + +TORCH_API ExprHandle +ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f); + +TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..627d67c934d593c8741d0cead8b78477e02d2585 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#define FOR_ALL_EXTERNAL_FUNCTIONS(_) \ + _(nnc_aten_adaptive_avg_pool2d) \ + _(nnc_aten_addmm) \ + _(nnc_aten_conv2d) \ + _(nnc_aten_conv1d) \ + _(nnc_aten_conv1d_out) \ + _(nnc_aten_dequantize) \ + _(nnc_aten_dequantize_out) \ + _(nnc_aten_embedding) \ + _(nnc_aten_matmul) \ + _(nnc_aten_mv) \ + _(nnc_aten_mm) \ + _(nnc_aten_mean) \ + _(nnc_aten_max_red) \ + _(nnc_aten_max_red_out) \ + _(nnc_aten_quantized_conv1d) \ + _(nnc_aten_quantized_conv1d_out) \ + _(nnc_aten_quantized_conv2d) \ + _(nnc_aten_quantized_conv2d_out) \ + _(nnc_aten_quantized_conv2d_relu) \ + _(nnc_aten_quantized_conv2d_relu_out) \ + _(nnc_aten_quantized_linear) \ + _(nnc_aten_quantized_linear_out) \ + _(nnc_aten_quantized_linear_relu) \ + _(nnc_aten_quantized_add) \ + _(nnc_aten_quantized_cat) \ + _(nnc_aten_quantized_mul) \ + _(nnc_aten_quantized_mul_out) \ + _(nnc_aten_quantized_mul_scalar) \ + _(nnc_aten_quantized_mul_scalar_out) \ + _(nnc_aten_quantized_relu) \ + _(nnc_aten_quantized_sigmoid) \ + _(nnc_aten_quantized_sigmoid_out) \ + _(nnc_aten_quantize_per_tensor) \ + _(nnc_aten_quantize_per_tensor_out) \ + _(nnc_aten_triangular_solve) \ + _(nnc_aten_upsample_nearest2d) \ + _(nnc_aten_upsample_nearest2d_out) \ + _(nnc_prepacked_conv2d_clamp_run) \ + _(nnc_prepacked_linear_clamp_run) + +#define DECLARE_EXTERNAL_FUNCTION(NAME) \ + TORCH_API void NAME( \ + int64_t bufs_num, \ + void** buf_data, \ + int64_t* buf_ranks, \ + int64_t* buf_dims, \ + int64_t* buf_strides, \ + int8_t* buf_dtypes, \ + int64_t args_num, \ + int64_t* extra_args); + +namespace torch { +namespace jit { +namespace tensorexpr { +struct QIData final { + double scale; + int64_t zero; + c10::ScalarType scalarType; +}; +std::vector constructTensors( + int64_t bufs_num, + void** buf_data, + int64_t* buf_ranks, + int64_t* buf_dims, + int64_t* buf_strides, + int8_t* buf_dtypes, + c10::optional>> qdataArg = + c10::nullopt); + +std::vector constructTensors2( + int64_t bufs_in_num, + void** buf_data, + int64_t* buf_ranks, + int64_t* buf_dims, + int64_t* buf_strides, + int8_t* buf_dtypes, + c10::optional>> qdataArg = + c10::nullopt, + size_t bufs_out_num = 0); + +#ifdef C10_MOBILE +extern "C" { +#endif +void DispatchParallel( + int8_t* func, + int64_t start, + int64_t stop, + int8_t* packed_data) noexcept; + +FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION) +#if AT_MKLDNN_ENABLED() +DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run); +#endif + +TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept; + +#ifdef C10_MOBILE +} // extern "C" +#endif + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +#undef DECLARE_EXTERNAL_FUNCTION diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h new file mode 100644 index 0000000000000000000000000000000000000000..8ec41fe2f4a83e14f5de47df196db02079da117c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h @@ -0,0 +1,217 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// Walk the Statement looking for Half size loads/stores. +class HalfChecker : public IRVisitor { + public: + HalfChecker(const std::vector& args) { + for (const auto& BA : args) { + hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half; + } + } + + bool hasHalf() const { + return hasHalf_; + } + + bool hasBFloat16() const { + return hasBFloat16_; + } + + void visit(LoadPtr v) override { + hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + void visit(StorePtr v) override { + hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + void visit(HalfImmPtr v) override { + hasHalf_ = true; + } + + void visit(BFloat16ImmPtr v) override { + hasBFloat16_ = true; + } + + void visit(CastPtr v) override { + hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + private: + bool hasHalf_{false}; + bool hasBFloat16_{false}; +}; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class HalfRewriter : public IRMutator { + ExprPtr mutate(LoadPtr v) override { + ExprPtr child = IRMutator::mutate(v); + if (!isHalf(child)) { + return child; + } + + ExprPtr ret = alloc( + child->dtype().cloneWithScalarType(ScalarType::Float), child); + + inserted_half_casts_.insert(ret); + return ret; + } + + StmtPtr mutate(StorePtr v) override { + // Since mutation changes the `value()` expression in-place, we need to + // get the dtype of the `value()` before that is mutated. + auto newType = v->value()->dtype(); + ExprPtr new_val = v->value()->accept_mutator(this); + auto bufType = v->buf()->dtype(); + + if (isHalf(newType.scalar_type())) { + new_val = alloc(newType, new_val); + inserted_half_casts_.insert(new_val); + } + + // The scalar_type of value is not Half while the buf is Half + if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) { + new_val = alloc( + newType.cloneWithScalarType(bufType.scalar_type()), new_val); + inserted_half_casts_.insert(new_val); + } + + v->set_value(new_val); + return v; + } + + ExprPtr mutate(HalfImmPtr v) override { + return alloc(kFloat, v); + } + + ExprPtr mutate(BFloat16ImmPtr v) override { + return alloc(kFloat, v); + } + + ExprPtr mutate(CastPtr v) override { + ExprPtr child = v->src_value()->accept_mutator(this); + + // just don't allow half casts we didn't insert. + if (isHalf(v)) { + if (inserted_half_casts_.count(v) < 1) { + v->set_src_value(child); + v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat)); + return v; + } + } + + // Remove Half(Float()) and friends. + CastPtr cast_child = to(child); + if (cast_child) { + auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double; + auto from_half = isHalf(cast_child->src_value()); + // Cannot simplify the double(float(half)) to double(half) as NNC does + // not support cast BF16 to double directly. + auto not_cast_half_to_doulbe = !(cast_to_double && from_half); + if (v->dtype().is_floating_point() && + cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) { + return alloc(v->dtype(), cast_child->src_value()); + } + } + + if (child == v->src_value()) { + return v; + } + + return alloc(v->dtype(), child); + } + + StmtPtr mutate(LetPtr v) override { + if (isHalf(v->var()->dtype().scalar_type())) { + VarPtr load_new_var = alloc(v->var()->name_hint(), kFloat); + ExprPtr new_value = alloc( + v->var()->dtype().cloneWithScalarType(ScalarType::Float), + v->value()->accept_mutator(this)); + var_map[v->var()] = load_new_var; + + return alloc(load_new_var, new_value); + } + + return IRMutator::mutate(v); + } + + ExprPtr mutate(VarPtr v) override { + auto it = var_map.find(v); + if (it != var_map.end()) { + return it->second; + } + + return v; + } + + template + ExprPtr mutateArithmetic(T v) { + IRMutator::mutate(v); + if (isHalf(v)) { + v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat)); + } + return v; + } + + ExprPtr mutate(AddPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(SubPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MulPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(DivPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MaxPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MinPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(CompareSelectPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(BroadcastPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(IfThenElsePtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(IntrinsicsPtr v) override { + return mutateArithmetic(v); + } + + private: + static bool isHalf(ScalarType st) { + return st == ScalarType::Half || st == ScalarType::BFloat16; + } + + static bool isHalf(ExprPtr v) { + return isHalf(v->dtype().scalar_type()); + } + + std::unordered_set inserted_half_casts_; + std::unordered_map var_map; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h new file mode 100644 index 0000000000000000000000000000000000000000..c16066197ca9e4faa209ae698478431d9e894dd2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h @@ -0,0 +1,304 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct TORCH_API SimplifierHashType { + SimplifierHashType() = default; + explicit SimplifierHashType(size_t s) : _h(s) {} + + bool operator==(const SimplifierHashType& other) const; + bool operator!=(const SimplifierHashType& other) const; + bool operator<(const SimplifierHashType& other) const; + bool operator==(const size_t other) const; + bool operator!=(const size_t other) const; + + size_t _h{0}; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +namespace std { +template <> +struct hash { + size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const { + return k._h; + } +}; + +} // namespace std + +namespace torch { +namespace jit { +namespace tensorexpr { + +#define CACHE_GUARD() \ + if (cachedHash(v)) { \ + return; \ + } + +class Term; +class Polynomial; + +/* Expression hasher providing comparable values representing sub-exprs. + * Uses memoization to avoid excessive recursion. */ +class TORCH_API HashProvider : public IRVisitor { + public: + template + SimplifierHashType hash(T e) { + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + e->accept(this); + return hashOf(e); + } + + bool cachedHash(ExprPtr e) { + return exprToHash_.find(e) != exprToHash_.end(); + } + bool cachedHash(StmtPtr s) { + return stmtToHash_.find(s) != stmtToHash_.end(); + } + + void clearCache() { + exprToHash_.clear(); + stmtToHash_.clear(); + } + + void visit(AddPtr v) override; + void visit(SubPtr v) override; + void visit(MulPtr v) override; + void visit(DivPtr v) override; + void visit(ModPtr v) override; + void visit(RoundOffPtr v) override; + void visit(MaxPtr v) override; + void visit(MinPtr v) override; + void visit(AndPtr v) override; + void visit(OrPtr v) override; + void visit(XorPtr v) override; + void visit(LshiftPtr v) override; + void visit(RshiftPtr v) override; + void visit(CompareSelectPtr v) override; + +// NOLINTNEXTLINE +#define IMM_VISIT(Type, Name) \ + void visit(Name##ImmPtr v) override { \ + CACHE_GUARD(); \ + putHash(v, hash_combine(#Name, v->value())); \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT); +#undef IMM_VISIT + + void visit(CastPtr v) override; + void visit(VarPtr v) override; + void visit(RampPtr v) override; + void visit(LoadPtr v) override; + void visit(StorePtr v) override; + void visit(BlockPtr v) override; + void visit(ForPtr v) override; + void visit(BroadcastPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(IntrinsicsPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(CondPtr v) override; + void visit(TermPtr v) override; + void visit(PolynomialPtr v) override; + void visit(MaxTermPtr v) override; + void visit(MinTermPtr v) override; + + template + SimplifierHashType hash_combine(const Types&... args) { + SimplifierHashType seed; + _hash_combine(seed, args...); + return seed; + } + + private: + SimplifierHashType hashOf(ExprPtr e) { + auto it = exprToHash_.find(e); + if (it != exprToHash_.end()) { + return it->second; + } + + // As a failsafe fall back to IRPrinter. + std::stringstream ss; + IRPrinter printer(ss); + e->accept(&printer); + SimplifierHashType hash = SimplifierHashType(te_hash(ss.str())); + putHash(std::move(e), hash); + + return hash; + } + + SimplifierHashType hashOf(StmtPtr s) { + auto it = stmtToHash_.find(s); + if (it != stmtToHash_.end()) { + return it->second; + } + + // As a failsafe fall back to IRPrinter. + std::stringstream ss; + IRPrinter printer(ss); + s->accept(&printer); + SimplifierHashType hash = SimplifierHashType(te_hash(ss.str())); + putHash(std::move(s), hash); + + return hash; + } + + // Hash funcs for various types, numbers are random. + template + void _hash_combine(SimplifierHashType& seed, const T& val) { + seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, const char* val) { + seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + // at:::Half doesn't have a prime_number_hash, so cast to short. + void _hash_combine(SimplifierHashType& seed, const at::Half& val) { + seed._h ^= + te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, const Dtype& val) { + seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) + + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, ExprPtr e) { + _hash_combine(seed, hash(std::move(e))); + } + + template + void _hash_combine( + SimplifierHashType& seed, + const T& val, + const Types&... args) { + _hash_combine(seed, val); + _hash_combine(seed, args...); + } + + void putHash(ExprPtr e, SimplifierHashType h) { + auto res = exprToHash_.emplace(e, h); + if (res.second == false) { + // This is always a logic bug since we should check the cache first. + throw std::runtime_error("hash collision"); + } + } + void putHash(StmtPtr s, SimplifierHashType h) { + auto res = stmtToHash_.emplace(s, h); + if (res.second == false) { + // This is always a logic bug since we should check the cache first. + throw std::runtime_error("hash collision"); + } + } + + std::unordered_map exprToHash_; + std::unordered_map stmtToHash_; + UniqueNameManager name_manager_; + + size_t te_hash(SimplifierHashType val) { + return val._h; + } + + size_t te_hash(int64_t val) { + // put the thing down. + size_t h = val ^ 0x647AA4D20C0B; + // bit flip it. + size_t h2 = ~h; + // and reverse byte order. + size_t h3 = 0; + for (unsigned int i = 0; i < 64; i += 8) { + h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8); + } + return h3; + } + + size_t te_hash(int32_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(uint32_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(uint64_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(int16_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(std::string val) { + size_t hash{0}; + int64_t intval{0}; + int64_t s = val.size() - 1; + while (s >= 0) { + for (unsigned int i = 0; i < 8; ++i) { + if (s < 0) + break; + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + int64_t c = val.data()[s]; + intval |= (c << (i * 8)); + + s--; + } + hash ^= te_hash(intval); + intval = 0; + } + + return hash; + } + + size_t te_hash(double d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(float d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int32_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(at::Half d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int16_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(at::BFloat16 d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int16_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h new file mode 100644 index 0000000000000000000000000000000000000000..03b6d9a1f0cce4fa8d6a551702d175162226ad7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class Expr; +class ExprHandle; +class Mod; +class And; +class Or; +class Xor; +class Lshift; +class Rshift; +class CompareSelect; +class Ramp; +class Load; +class IfThenElse; +class Intrinsics; + +class Stmt; +class ExternalCall; +class Store; +class For; +class Block; + +class TORCH_API IRVerifier : public IRVisitor { + public: + IRVerifier() = default; + + void visit(ModPtr v) override; + void visit(AndPtr v) override; + void visit(OrPtr v) override; + void visit(XorPtr v) override; + void visit(LshiftPtr v) override; + void visit(RshiftPtr v) override; + void visit(CompareSelectPtr v) override; + void visit(RampPtr v) override; + void visit(LoadPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(IntrinsicsPtr v) override; + + void visit(ExternalCallPtr v) override; + void visit(StorePtr v) override; + void visit(ForPtr v) override; + void visit(BlockPtr v) override; +}; + +TORCH_API void verify(StmtPtr); +TORCH_API void verify(ExprPtr); +TORCH_API void verify(ExprHandle); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..09e6069dba1c288b62604ad4f9edff9af8210e1d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h @@ -0,0 +1,64 @@ +#pragma once +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API IRVisitor { + public: + virtual ~IRVisitor() = default; + virtual void visit(AddPtr v); + virtual void visit(SubPtr v); + virtual void visit(MulPtr v); + virtual void visit(DivPtr v); + virtual void visit(ModPtr v); + virtual void visit(MaxPtr v); + virtual void visit(MinPtr v); + virtual void visit(AndPtr v); + virtual void visit(OrPtr v); + virtual void visit(XorPtr v); + virtual void visit(LshiftPtr v); + virtual void visit(RshiftPtr v); + virtual void visit(CompareSelectPtr v); + +#define IMM_PRINT_VISIT(Type, Name) virtual void visit(Name##ImmPtr v); + + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT) +#undef IMM_PRINT_VISIT + + virtual void visit(CastPtr v); + virtual void visit(BitCastPtr v); + virtual void visit(VarPtr v); + virtual void visit(BufPtr v); + virtual void visit(RampPtr v); + virtual void visit(LoadPtr v); + virtual void visit(ForPtr v); + virtual void visit(BlockPtr v); + virtual void visit(StorePtr v); + virtual void visit(BroadcastPtr v); + virtual void visit(IfThenElsePtr v); + virtual void visit(IntrinsicsPtr v); + virtual void visit(AllocatePtr v); + virtual void visit(FreePtr v); + virtual void visit(FreeExtPtr v); + virtual void visit(PlacementAllocatePtr v); + virtual void visit(LetPtr v); + virtual void visit(CondPtr v); + virtual void visit(TermPtr v); + virtual void visit(PolynomialPtr v); + virtual void visit(RoundOffPtr v); + virtual void visit(MaxTermPtr v); + virtual void visit(MinTermPtr v); + virtual void visit(ReduceOpPtr v); + virtual void visit(AtomicAddPtr v); + virtual void visit(SyncThreadsPtr v); + virtual void visit(ExternalCallPtr v); + virtual void visit(ExternalCallWithAllocPtr v); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..45658beb750e9f85fdd3bbdc8488223f8bbb04c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h @@ -0,0 +1,382 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct SmallSizeTPairHash { + public: + std::size_t operator()(const std::pair& x) const { + // hashing input index and then dim index + return x.first * 128 + x.second; + } +}; + +// Returns true if the TE fuser supports this conv2d. +bool conv2dIsSupportedJit(const Node* node); +// Returns true if the TE fuser supports this conv2d with mkldnn prepacked conv. +bool mkldnnPrepackedConvIsSupportedJit(const Node* node); +// Returns true if the TE _convolution node is Conv2d. +bool isConv2d(const Node* node); +// Returns true if the TE fuser supports this matmul. +bool matmulIsSupported(const Node* node); +template +inline std::vector bufferSizes(const T& t) { + std::vector sizes; + for (size_t i = 0; i < t->ndim(); i++) { + sizes.push_back(*intValue(t->dim(i))); + } + return sizes; +} + +// Get the dimensions of a value. +std::vector valueShape(const ArgValue& v); + +// If v is a tensor, broadcast it to match the shape of axes, or return +// directly if v is a constant. +ExprHandle tensorOrConstant( + const ArgValue& v, + const std::vector& axes); + +int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size); + +ExprHandle broadcast(BufHandle b, const std::vector& axes); + +ExprHandle constant(const ArgValue& v); + +std::vector computeIndicesToBroadcast( + const std::vector& outputAxes, + const std::vector& inputSizes); + +inline std::string getArgValueName(const ArgValue& a) { + if (std::holds_alternative(a)) { + return "BufHandle"; + } else if (std::holds_alternative(a)) { + return "VarHandle"; + } else if (std::holds_alternative(a)) { + return "double"; + } else if (std::holds_alternative(a)) { + return "int64_t"; + } else if (std::holds_alternative(a)) { + return "bool"; + } else if (std::holds_alternative(a)) { + return "BufList"; + } else if (std::holds_alternative(a)) { + return "DoubleList"; + } else if (std::holds_alternative(a)) { + return "IntList"; + } else if (std::holds_alternative(a)) { + return "None"; + } else { + throw std::runtime_error("ArgValue type not handled in string conversion"); + } +} + +template +std::vector convertVecArgValue(const std::vector& v) { + std::vector res; + for (auto& x : v) { + auto val = std::get_if(&x); + if (val) { + res.push_back(*val); + } else { + throw std::runtime_error( + "vector type not homogeneous - found " + getArgValueName(x) + + ", expected " + getArgValueName(v[0])); + } + } + return res; +} + +class TORCH_API TensorExprKernel { + struct ConstantDescr { + BufPtr buf; + // Only one of ptr and node is used at a time + // 1) ptr for the constant tensors + // 2) node for the constant custom class objects + void* ptr = nullptr; + Node* node = nullptr; + }; + + public: + // Constructor Params: + // * subgraph + // - the graph that needs to be compiled. + // * kernel_func_name + // - the name that should be used for the generated kernel. + // * custom_lowerings + // - map that represents custom lowering definitions for a set of ops. + // * symbolic_shape_inputs + // - a list of symbolic graph inputs that represent the symbolic dims of + // the input tensors. + // * pre_alloc + // - a flag to control pre-allocation of buffers. + explicit TensorExprKernel( + const std::shared_ptr& subgraph, + const std::string& kernel_func_name, + std::unordered_map custom_lowerings = + {}, + std::vector symbolic_shape_inputs = {}, + bool pre_alloc = false, + std::unordered_map< + const torch::jit::Value*, + std::vector> symbolic_strides = {}); + + explicit TensorExprKernel( + const std::shared_ptr& subgraph, + std::unordered_map custom_lowerings = + {}, + std::vector symbolic_shape_inputs = {}, + bool pre_alloc = false, + std::unordered_map< + const torch::jit::Value*, + std::vector> symbolic_strides = {}) + : TensorExprKernel( + subgraph, + SubgraphUtils::generateNameForGraph(subgraph), + custom_lowerings, + symbolic_shape_inputs, + pre_alloc, + symbolic_strides) {} + + void run(Stack& stack) const; + void runFast( + const std::vector& inputs, + const std::vector& outputs) const; + // Expected format of stack: + // ... + // i.e., output IValues must be below the input IValues in the stack. + void runWithAllocatedOutputs(Stack& stack) const; + + void fallback(Stack& stack) const { + InterpreterState(code_).run(stack); + } + void recompile(); + + StmtPtr getCodeGenStmt(); + + std::string getCodeText(const std::string& attr = "") { + return codegen_->getCodeText(attr); + } + + const std::shared_ptr graph() { + return graph_; + } + + const std::vector& getConstantDescriptors() const { + return constants_; + } + + const std::vector& getBufferArgs() const { + return bufferArgs_; + } + + const std::string& getKernelName() const { + return codegen_->kernel_func_name(); + } + + const std::vector& getSymbolicShapeInputs() const { + return symbolic_shape_inputs_; + } + + private: + enum BackendType { + kUninitialized, + kSimpleIREval, + kLLVMCodeGen, + kCudaCodeGen, + kBlockCodeGen, + }; + + enum MemoryLayoutPolicy { + kContiguous, + kChannelsLastNdContiguous, + }; + + void compile(); + void genInputDebugNames(); + void runKernel(Stack& stack) const; + + std::vector sizesForValue(const torch::jit::Value* v); + + // These functions broadcast shape and also store a `hasBroadcast_` variable. + std::vector broadcastShapesMut( + const std::vector& a, + const std::vector& b); + std::vector broadcastShapesMut( + std::vector> shapes); + + ArgValue toArg(const torch::jit::Value* v) const; + ExprHandle constant(const torch::jit::Value* v); + + Tensor computeValue(const torch::jit::Value* v); + + void bindConstant(const torch::jit::Value* v); + + StmtPtr transformLoops(BackendType backendType, StmtPtr st); + + std::string getCodeGenName(BackendType backendType); + + void getStaticOutputSizesAndStrides( + const at::ArrayRef& inputs, + std::vector>* static_sizes, + std::vector>* static_strides) const; + + std::vector prepareRunArgs( + const at::ArrayRef& inputs, + std::vector& outputs) const; + BackendType inferBackendTypeFromDevice(at::Device device); + + Tensor bindInput(const torch::jit::Value* input); + BlockPtr bindAllInputs(); + + // Deduce the memory layout policy to be propagated within + // NNC fusion group. The memory layout policy could be `kContiguous` + // or `kChannelsLastNdContiguous`. + // `kContiguous`: Always convert the non-contiguous input tensors and + // internal buffers to contiguous. + // `kChannelsLastNdContiguous`: Always convert the input tensors and + // internal buffers to channels-last contiguous. + // Currently, the rule is simple. + // If all the input and out tensors of NNC fusion group are channels-last + // contiguous, the policy is `kChannelsLastNdContiguous`. Otherwise, it + // is always `kContiguous`. + void deduceMemoryLayoutPolicy(); + + Tensor convertSymbolicOutputToCorrectStrides(torch::jit::Value* v); + Tensor convertStaticShapeOutputToCorrectStrides(torch::jit::Value* v); + Tensor convertSymbolicOutputToCorrectStrides( + const std::vector& sizes, + const std::vector& sorted_stride_indices_descending, + const std::vector& strides, + BufPtr& buf); + + NNCLoweringFunction getCustomLoweringFor(c10::Symbol op) const; + std::unordered_map getCustomLowerings() + const { + return custom_lowerings_; + } + + // Allocate memory for intermediate buffers at compile time. + // Specifically, we pre-allocate memory for intermediate buffers with static + // size and manage these buffers in the way we manage JIT constant tensors: + // push the buf args into the stack so NNC IR can access them at runtime. + std::vector preAllocIntermediateBufs( + const std::vector& interm_bufs); + + struct UnpackedTensorOptions { + c10::optional dtype; + c10::optional layout; + c10::optional device; + c10::optional pinned_memory; + + UnpackedTensorOptions(const c10::TensorOptions& opts) + : dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())), + layout(opts.layout_opt()), + device(opts.device_opt()), + pinned_memory(opts.pinned_memory_opt()) {} + }; + + ExprHandle getVarForShape(const c10::ShapeSymbol& ss); + std::vector computeInputTensorDims( + const torch::jit::Value* input); + ExprHandle getStrideArg(size_t tensor_input, size_t stride_index); + std::vector sizesFromSymbolicShape( + const c10::SymbolicShape& shape); + std::vector getInputStrides( + const torch::jit::Value* input, + const std::vector& inputTensorDims); + std::vector& getSymbolicStrideDesc( + const torch::jit::Value* value); + + // Apply the optimizations to the graph owned by the current fusion group, + // like concatenation optimization, post-op fusion, and some other graph-level + // optimizations. + void optimizeOwningGraph(); + + int64_t nInputs_ = 0; + int64_t nOutputs_ = 0; + std::vector bufferArgs_; + std::vector> tensorOutputSizes_; + std::vector> tensorOutputStrides_; + std::vector tensorOutputStrideDesc_; + std::vector isOutputScalar_; + std::vector tensorOutputTensorOptions_; + std::unordered_set bufOutputs_; + std::unordered_set bufsToBeParallelized_; + std::unordered_map bufs_; + std::unordered_map scalars_; + std::unordered_map input_name_map_; + std::unique_ptr codegen_; + at::Device device_ = at::kCPU; + std::shared_ptr graph_; + Code code_; + bool allow_fallback_{false}; + bool use_fallback_{false}; + bool hasRandom_{false}; + bool hasBroadcast_{false}; + std::unordered_map> + known_sizes_; + + std::vector> tensorOutputSymbolicSizes_; + // A map from ShapeSymbol.value() to the corresponding Var. + std::unordered_map shapeSymbolToVar_; + std::unordered_map shapeSymbolInputPos_; + // List of values corresponding to the ShapeSymbols that are inputs to + // kernel being compiled. The order of these values correspond to the order + // of the symbolic inputs at the end of the list of inputs to the kernel. + std::vector symbolic_shape_inputs_; + bool has_symbolic_shapes_{false}; + + std::vector unpacked_constant_tensors_; + std::vector constants_; + + std::unordered_map custom_lowerings_; + StmtPtr stmt_ = nullptr; + bool pre_alloc_{false}; + std::string kernel_func_name_; + + // index of stack, stride index of tensor that will be appended as a codegen + // arg + std::vector> input_stride_args_; + // map from to stride as arg VarHandle + std::unordered_map, VarHandle, SmallSizeTPairHash> + strideArgToVar_; + std::unordered_map< + const torch::jit::Value*, + std::vector> + symbolic_strides_; + + // Memory layout to be propagated with fusion group + MemoryLayoutPolicy memory_layout_policy_ = MemoryLayoutPolicy::kContiguous; +}; + +TORCH_API int& getTECudaPointwiseLoopLevels(); +TORCH_API int& getTECudaPointwiseBlockCount(); +TORCH_API int& getTECudaPointwiseBlockSize(); +TORCH_API bool& getTEGenerateBlockCode(); +TORCH_API bool& getTEMustUseLLVMOnCPU(); +TORCH_API bool fallbackAllowed(); +TORCH_API bool setFallbackAllowed(bool value); +TORCH_API bool& getCatWoConditionals(); +TORCH_API bool& getOptConditionals(); + +TORCH_API c10::optional pickDeviceType( + const at::ArrayRef& inputs); + +bool isContiguous( + const torch::jit::Value* v, + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..698de07f2be54f11f215bb464d98c69141de2ed1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h @@ -0,0 +1,329 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API Tensor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Tensor(BufPtr buf, const std::vector& args, ExprPtr body) + : buf_(std::move(buf)) { + stmt_ = constructStmt(args, std::move(body), {}, {}); + } + Tensor(BufHandle buf, const std::vector& args, ExprHandle body) + : Tensor(buf.node(), VarHandleVectorToVarVector(args), body.node()) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Tensor( + BufPtr buf, + const std::vector& args, + const std::vector& reduce_dims, + const std::vector& reduce_args, + ExprPtr body) + : buf_(std::move(buf)) { + stmt_ = constructStmt(args, std::move(body), reduce_dims, reduce_args); + } + Tensor( + BufHandle buf, + const std::vector& args, + const std::vector& reduce_dims, + const std::vector& reduce_args, + ExprHandle body) + : Tensor( + buf.node(), + VarHandleVectorToVarVector(args), + ExprHandleVectorToExprVector(reduce_dims), + VarHandleVectorToVarVector(reduce_args), + body.node()) {} + + Tensor(BufPtr buf, StmtPtr stmt) + : buf_(std::move(buf)), stmt_(std::move(stmt)) {} + + BufPtr buf() const { + return buf_; + } + + StmtPtr stmt() const { + return stmt_; + } + + template + inline ExprHandle load(const std::vector& args) const; + template + inline ExprHandle load(const Ts&... ts) const; + + private: + StmtPtr constructStmt( + const std::vector& args, + ExprPtr body, + const std::vector& reduce_dims, + const std::vector& reduce_args) const; + + BufPtr buf_; + StmtPtr stmt_; +}; + +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function< + ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function< + ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function&)>& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function&)>& body_func); + +inline std::vector create_index_vars( + const std::vector& dims) { + std::vector vars; + vars.reserve(dims.size()); + for (const ExprHandle& dim : dims) { + vars.emplace_back(alloc( + "i", dim.dtype().scalar_type() == ScalarType::Long ? kLong : kInt)); + } + return vars; +} + +// Handle reductions over a Reducer and a body_func which produces values. +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const InitFunc& init_func, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + std::vector vars = create_index_vars(dims); + std::vector reduce_vars = create_index_vars(reduce_dims); + + // If reduce_vars is empty, then it's not a reduction, but rather a simple + // copy + if (reduce_vars.empty()) { + ExprHandle body = Reducer::getReduceBody(body_func, vars); + BufHandle func_result = Buf::make( + func_name, dims, body.dtype(), c10::nullopt, std::move(strides)); + return Tensor(std::move(func_result), vars, std::move(body)); + } + + std::vector all_vars; + all_vars.insert(all_vars.end(), vars.begin(), vars.end()); + all_vars.insert(all_vars.end(), reduce_vars.begin(), reduce_vars.end()); + + ExprHandle body = Reducer::getReduceBody(body_func, all_vars); + std::vector output_args(vars.begin(), vars.end()); + ExprHandle init_expr = Cast::make(body.dtype(), init_func(vars)); + BufHandle func_result = Buf::make(func_name, dims, body.dtype(), init_expr); + + ExprHandle reduce_op = reducer(func_result, body, output_args, reduce_vars); + if (body.dtype() == kBFloat16) { + ExprHandle init_expr_acc = Cast::make(kFloat, init_func(vars)); + BufHandle func_result_acc = + Buf::make(func_name + "_acc", dims, kFloat, init_expr_acc); + reduce_op = reducer( + func_result, + std::move(func_result_acc), + std::move(body), + output_args, + reduce_vars); + } + + Tensor t = Tensor( + std::move(func_result), + vars, + reduce_dims, + reduce_vars, + std::move(reduce_op)); + return t; +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const InitFunc& init_func, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, + dims, + c10::nullopt, + reducer, + init_func, + body_func, + reduce_dims); +} + +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, + dims, + strides, + reducer, + [&](ParameterList p) { return ExprHandle(reducer.initializer()); }, + body_func, + reduce_dims); +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); +} + +// Overload which allows inline lambda functions for the body_func. +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BodyFunc&& body_func, + const std::vector& reduce_dims) { + return Reduce(func_name, dims, strides, reducer, body_func, reduce_dims); +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const BodyFunc&& body_func, + const std::vector& reduce_dims) { + return Reduce(func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); +} + +TORCH_API Tensor Reduce( + const std::string& name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BufHandle& buffer, + const std::vector& reduce_dims); +TORCH_API Tensor Reduce( + const std::string& name, + const std::vector& dims, + const Reducer& reducer, + const BufHandle& buffer, + const std::vector& reduce_dims); + +// Overload for the common case of all dimensions of a previously Computed +// Tensor. +TORCH_API Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + Tensor tensor, + const std::vector& reduce_dims); +TORCH_API Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + Tensor tensor, + const std::vector& reduce_dims); + +template +inline ExprHandle Tensor::load(const Ts&... ts) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params({ExprHandle(ts)...}); + return Load::make(BufHandle(this->buf()), params); +} + +template +inline ExprHandle Tensor::load(const std::vector& args) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params(args.begin(), args.end()); + return Load::make(BufHandle(this->buf()), params); +} + +template +inline ExprHandle BufHandle::load(const Ts&... ts) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params({ExprHandle(ts)...}); + return ExprHandle(alloc(node(), ExprHandleVectorToExprVector(params))); +} + +template +inline ExprHandle BufHandle::load(const std::vector& args) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params(args.begin(), args.end()); + return ExprHandle(alloc(node(), ExprHandleVectorToExprVector(params))); +} + +inline ExprHandle BufHandle::load(const std::vector& args) const { + return this->template load(args); +} + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..b6162e6168fd63967fb71b4abd3d5d56bbb70f7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class VarHandle; +class Var; + +using VarNameMap = std::unordered_map; + +// A manager to get unique names from vars. +// It starts with the name hints of the var and append "_" + $counter until it +// hits a unique name. +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class TORCH_API UniqueNameManager { + public: + const std::string& get_unique_name(const VarHandle& v); + + const std::string& get_unique_name(VarPtr v); + + private: + friend class ScopedVarName; + VarNameMap unique_name_mapping_; + std::unordered_map unique_name_count_; + std::unordered_set all_unique_names_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..01a6007e9f8e824a1dae904d63cc01ef091b03af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +at::Dimname THPDimname_parse(PyObject* obj); +bool THPUtils_checkDimname(PyObject* obj); +bool THPUtils_checkDimnameList(PyObject* obj); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h new file mode 100644 index 0000000000000000000000000000000000000000..0130e41ccb46edf3ab5d5a35c80607383acbddf8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h @@ -0,0 +1,25 @@ +#pragma once +// workaround for https://github.com/python/cpython/pull/23326 +#include +#include +// workaround for Python 2 issue: https://bugs.python.org/issue17120 +// NOTE: It looks like this affects Python 3 as well. +#pragma push_macro("_XOPEN_SOURCE") +#pragma push_macro("_POSIX_C_SOURCE") +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE + +#include +#include +#include + +#pragma pop_macro("_XOPEN_SOURCE") +#pragma pop_macro("_POSIX_C_SOURCE") + +#ifdef copysign +#undef copysign +#endif + +#if PY_MAJOR_VERSION < 3 +#error "Python 2 has reached end-of-life and is no longer supported by PyTorch." +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..e2587babf057b1b204c885280a4e63c27daf2a50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h @@ -0,0 +1,25 @@ +#ifndef THP_SERIALIZATION_INC +#define THP_SERIALIZATION_INC + +template +void doRead(io fildes, void* buf, size_t nbytes); + +template +void doWrite(io fildes, void* buf, size_t nbytes); + +// Note that this takes a mutable storage because it may pass through +// to at::from_blob. +template +void THPStorage_writeFileRaw( + c10::StorageImpl* self, + io fd, + bool save_size, + uint64_t element_size); + +template +c10::intrusive_ptr THPStorage_readFileRaw( + io fd, + c10::intrusive_ptr storage, + uint64_t element_size); + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..4e4643b75d9a6cc6369512d5d909149d5f6150ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h @@ -0,0 +1,227 @@ +#ifndef THP_UTILS_H +#define THP_UTILS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_CUDA +#include +#endif + +#define THPUtils_(NAME) TH_CONCAT_4(THP, Real, Utils_, NAME) + +#define THPUtils_typename(obj) (Py_TYPE(obj)->tp_name) + +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define THP_EXPECT(x, y) (__builtin_expect((x), (y))) +#else +#define THP_EXPECT(x, y) (x) +#endif + +#define THPUtils_checkReal_FLOAT(object) \ + (PyFloat_Check(object) || PyLong_Check(object)) + +#define THPUtils_unpackReal_FLOAT(object) \ + (PyFloat_Check(object) ? PyFloat_AsDouble(object) \ + : PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_checkReal_INT(object) PyLong_Check(object) + +#define THPUtils_unpackReal_INT(object) \ + (PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_unpackReal_BOOL(object) \ + (PyBool_Check(object) \ + ? object \ + : (throw std::runtime_error("Could not parse real"), Py_False)) + +#define THPUtils_unpackReal_COMPLEX(object) \ + (PyComplex_Check(object) \ + ? (c10::complex( \ + PyComplex_RealAsDouble(object), PyComplex_ImagAsDouble(object))) \ + : PyFloat_Check(object) \ + ? (c10::complex(PyFloat_AsDouble(object), 0)) \ + : PyLong_Check(object) \ + ? (c10::complex(PyLong_AsLongLong(object), 0)) \ + : (throw std::runtime_error("Could not parse real"), \ + c10::complex(0, 0))) + +#define THPUtils_checkReal_BOOL(object) PyBool_Check(object) + +#define THPUtils_checkReal_COMPLEX(object) \ + PyComplex_Check(object) || PyFloat_Check(object) || PyLong_Check(object) || \ + PyInt_Check(object) + +#define THPUtils_newReal_FLOAT(value) PyFloat_FromDouble(value) +#define THPUtils_newReal_INT(value) PyInt_FromLong(value) + +#define THPUtils_newReal_BOOL(value) PyBool_FromLong(value) + +#define THPUtils_newReal_COMPLEX(value) \ + PyComplex_FromDoubles(value.real(), value.imag()) + +#define THPDoubleUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPDoubleUtils_unpackReal(object) \ + (double)THPUtils_unpackReal_FLOAT(object) +#define THPDoubleUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPFloatUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPFloatUtils_unpackReal(object) \ + (float)THPUtils_unpackReal_FLOAT(object) +#define THPFloatUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPHalfUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPHalfUtils_unpackReal(object) \ + (at::Half) THPUtils_unpackReal_FLOAT(object) +#define THPHalfUtils_newReal(value) PyFloat_FromDouble(value) +#define THPHalfUtils_newAccreal(value) THPUtils_newReal_FLOAT(value) +#define THPComplexDoubleUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexDoubleUtils_unpackReal(object) \ + THPUtils_unpackReal_COMPLEX(object) +#define THPComplexDoubleUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPComplexFloatUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexFloatUtils_unpackReal(object) \ + (c10::complex)THPUtils_unpackReal_COMPLEX(object) +#define THPComplexFloatUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPBFloat16Utils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPBFloat16Utils_unpackReal(object) \ + (at::BFloat16) THPUtils_unpackReal_FLOAT(object) +#define THPBFloat16Utils_newReal(value) PyFloat_FromDouble(value) +#define THPBFloat16Utils_newAccreal(value) THPUtils_newReal_FLOAT(value) + +#define THPBoolUtils_checkReal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackReal(object) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newReal(value) THPUtils_newReal_BOOL(value) +#define THPBoolUtils_checkAccreal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackAccreal(object) \ + (int64_t) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newAccreal(value) THPUtils_newReal_BOOL(value) +#define THPLongUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPLongUtils_unpackReal(object) \ + (int64_t) THPUtils_unpackReal_INT(object) +#define THPLongUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPIntUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPIntUtils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPIntUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPShortUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPShortUtils_unpackReal(object) (short)THPUtils_unpackReal_INT(object) +#define THPShortUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPCharUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPCharUtils_unpackReal(object) (char)THPUtils_unpackReal_INT(object) +#define THPCharUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPByteUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPByteUtils_unpackReal(object) \ + (unsigned char)THPUtils_unpackReal_INT(object) +#define THPByteUtils_newReal(value) THPUtils_newReal_INT(value) +// quantized types +#define THPQUInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt32Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt32Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt32Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt4x2Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt4x2Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt4x2Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt2x4Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt2x4Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt2x4Utils_newReal(value) THPUtils_newReal_INT(value) + +/* + From https://github.com/python/cpython/blob/v3.7.0/Modules/xxsubtype.c + If compiled as a shared library, some compilers don't allow addresses of + Python objects defined in other libraries to be used in static PyTypeObject + initializers. The DEFERRED_ADDRESS macro is used to tag the slots where such + addresses appear; the module init function that adds the PyTypeObject to the + module must fill in the tagged slots at runtime. The argument is for + documentation -- the macro ignores it. +*/ +#define DEFERRED_ADDRESS(ADDR) nullptr + +#define THPUtils_assert(cond, ...) \ + THPUtils_assertRet(nullptr, cond, __VA_ARGS__) +#define THPUtils_assertRet(value, cond, ...) \ + if (THP_EXPECT(!(cond), 0)) { \ + THPUtils_setError(__VA_ARGS__); \ + return value; \ + } +TORCH_PYTHON_API void THPUtils_setError(const char* format, ...); +TORCH_PYTHON_API void THPUtils_invalidArguments( + PyObject* given_args, + PyObject* given_kwargs, + const char* function_name, + size_t num_options, + ...); + +bool THPUtils_checkIntTuple(PyObject* arg); +std::vector THPUtils_unpackIntTuple(PyObject* arg); + +TORCH_PYTHON_API void THPUtils_addPyMethodDefs( + std::vector& vector, + PyMethodDef* methods); + +int THPUtils_getCallable(PyObject* arg, PyObject** result); + +typedef THPPointer THPGeneratorPtr; +typedef class THPPointer THPStoragePtr; + +TORCH_PYTHON_API std::vector THPUtils_unpackLongs(PyObject* arg); +PyObject* THPUtils_dispatchStateless( + PyObject* tensor, + const char* name, + PyObject* args, + PyObject* kwargs); + +template +struct mod_traits {}; + +template +struct mod_traits< + _real, + typename std::enable_if::value>::type> { + static _real mod(_real a, _real b) { + return fmod(a, b); + } +}; + +template +struct mod_traits< + _real, + typename std::enable_if::value>::type> { + static _real mod(_real a, _real b) { + return a % b; + } +}; + +void setBackCompatBroadcastWarn(bool warn); +bool getBackCompatBroadcastWarn(); + +void setBackCompatKeepdimWarn(bool warn); +bool getBackCompatKeepdimWarn(); +bool maybeThrowBackCompatKeepdimWarn(char* func); + +// NB: This is in torch/csrc/cuda/utils.cpp, for whatever reason +#ifdef USE_CUDA +std::vector> +THPUtils_PySequence_to_CUDAStreamList(PyObject* obj); +#endif + +void storage_fill(const at::Storage& self, uint8_t value); +void storage_set(const at::Storage& self, ptrdiff_t idx, uint8_t value); +uint8_t storage_get(const at::Storage& self, ptrdiff_t idx); + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..a556ae6a81e572f1db0efed8dd0640afb40cea82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class.h @@ -0,0 +1,515 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// This function is used in conjunction with `class_::def()` to register +/// a constructor for a given C++ class type. For example, +/// `torch::init()` would register a two-argument constructor +/// taking an `int` and a `std::string` as argument. +template +detail::types init() { + return detail::types{}; +} + +template +struct InitLambda { + Func f; +}; + +template +decltype(auto) init(Func&& f) { + using InitTraits = c10::guts::infer_function_traits_t>; + using ParameterTypeList = typename InitTraits::parameter_types; + + InitLambda init{std::forward(f)}; + return init; +} + +/// Entry point for custom C++ class registration. To register a C++ class +/// in PyTorch, instantiate `torch::class_` with the desired class as the +/// template parameter. Typically, this instantiation should be done in +/// the initialization of a global variable, so that the class will be +/// made available on dynamic library loading without any additional API +/// calls needed. For example, to register a class named Foo, you might +/// create a global variable like so: +/// +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod) +/// .def("lambdaMethod", [](const c10::intrusive_ptr& self) { +/// // Do something with `self` +/// }); +/// +/// In addition to registering the class, this registration also chains +/// `def()` calls to register methods. `myMethod()` is registered with +/// a pointer to the Foo class's `myMethod()` method. `lambdaMethod()` +/// is registered with a C++ lambda expression. +template +class class_ : public ::torch::detail::class_base { + static_assert( + std::is_base_of::value, + "torch::class_ requires T to inherit from CustomClassHolder"); + + public: + /// This constructor actually registers the class type. + /// String argument `namespaceName` is an identifier for the + /// namespace you would like this class to appear in. + /// String argument `className` is the name you would like to + /// see this class exposed as in Python and TorchScript. For example, if + /// you pass `foo` as the namespace name and `Bar` as the className, the + /// class will appear as `torch.classes.foo.Bar` in Python and TorchScript + explicit class_( + const std::string& namespaceName, + const std::string& className, + std::string doc_string = "") + : class_base( + namespaceName, + className, + std::move(doc_string), + typeid(c10::intrusive_ptr), + typeid(c10::tagged_capsule)) {} + + /// def() can be used in conjunction with `torch::init()` to register + /// a constructor for a given C++ class type. For example, passing + /// `torch::init()` would register a two-argument + /// constructor taking an `int` and a `std::string` as argument. + template + class_& def( + torch::detail::types, + std::string doc_string = "", + std::initializer_list default_args = + {}) { // Used in combination with + // torch::init<...>() + auto func = [](c10::tagged_capsule self, Types... args) { + auto classObj = c10::make_intrusive(args...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(std::move(classObj))); + }; + + defineMethod( + "__init__", + std::move(func), + std::move(doc_string), + default_args); + return *this; + } + + // Used in combination with torch::init([]lambda(){......}) + template + class_& def( + InitLambda> init, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto init_lambda_wrapper = [func = std::move(init.f)]( + c10::tagged_capsule self, + ParameterTypes... arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(func, std::forward(arg)...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + + defineMethod( + "__init__", + std::move(init_lambda_wrapper), + std::move(doc_string), + default_args); + + return *this; + } + + /// This is the normal method registration API. `name` is the name that + /// the method will be made accessible by in Python and TorchScript. + /// `f` is a callable object that defines the method. Typically `f` + /// will either be a pointer to a method on `CurClass`, or a lambda + /// expression that takes a `c10::intrusive_ptr` as the first + /// argument (emulating a `this` argument in a C++ method.) + /// + /// Examples: + /// + /// // Exposes method `foo` on C++ class `Foo` as `call_foo()` in + /// // Python and TorchScript + /// .def("call_foo", &Foo::foo) + /// + /// // Exposes the given lambda expression as method `call_lambda()` + /// // in Python and TorchScript. + /// .def("call_lambda", [](const c10::intrusive_ptr& self) { + /// // do something + /// }) + template + class_& def( + std::string name, + Func f, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto wrapped_f = detail::wrap_func(std::move(f)); + defineMethod( + std::move(name), + std::move(wrapped_f), + std::move(doc_string), + default_args); + return *this; + } + + /// Method registration API for static methods. + template + class_& def_static(std::string name, Func func, std::string doc_string = "") { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + std::move(qualMethodName), + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + classTypePtr->addStaticMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// Property registration API for properties with both getter and setter + /// functions. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + SetterFunc setter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + torch::jit::Function* setter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + auto wrapped_setter = + detail::wrap_func(std::move(setter_func)); + setter = defineMethod(name + "_setter", wrapped_setter, doc_string); + + classTypePtr->addProperty(name, getter, setter); + return *this; + } + + /// Property registration API for properties with only getter function. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + classTypePtr->addProperty(name, getter, nullptr); + return *this; + } + + /// Property registration API for properties with read-write access. + template + class_& def_readwrite(const std::string& name, T CurClass::*field) { + auto getter_func = [field = + field](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + auto setter_func = [field = field]( + const c10::intrusive_ptr& self, T value) { + self.get()->*field = value; + }; + + return def_property(name, getter_func, setter_func); + } + + /// Property registration API for properties with read-only access. + template + class_& def_readonly(const std::string& name, T CurClass::*field) { + auto getter_func = + [field = std::move(field)](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + return def_property(name, getter_func); + } + + /// This is an unsafe method registration API added for adding custom JIT + /// backend support via custom C++ classes. It is not for general purpose use. + class_& _def_unboxed( + const std::string& name, + std::function func, + c10::FunctionSchema schema, + std::string doc_string = "") { + auto method = std::make_unique( + qualClassName + "." + name, + std::move(schema), + std::move(func), + std::move(doc_string)); + classTypePtr->addMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// def_pickle() is used to define exactly what state gets serialized + /// or deserialized for a given instance of a custom C++ class in + /// Python or TorchScript. This protocol is equivalent to the Pickle + /// concept of `__getstate__` and `__setstate__` from Python + /// (https://docs.python.org/2/library/pickle.html#object.__getstate__) + /// + /// Currently, both the `get_state` and `set_state` callables must be + /// C++ lambda expressions. They should have the following signatures, + /// where `CurClass` is the class you're registering and `T1` is some object + /// that encapsulates the state of the object. + /// + /// __getstate__(intrusive_ptr) -> T1 + /// __setstate__(T2) -> intrusive_ptr + /// + /// `T1` must be an object that is convertable to IValue by the same rules + /// for custom op/method registration. + /// + /// For the common case, T1 == T2. T1 can also be a subtype of T2. An + /// example where it makes sense for T1 and T2 to differ is if __setstate__ + /// handles legacy formats in a backwards compatible way. + /// + /// Example: + /// + /// .def_pickle( + /// // __getstate__ + /// [](const c10::intrusive_ptr>& self) { + /// return self->stack_; + /// }, + /// [](std::vector state) { // __setstate__ + /// return c10::make_intrusive>( + /// std::vector{"i", "was", "deserialized"}); + /// }) + template + class_& def_pickle(GetStateFn&& get_state, SetStateFn&& set_state) { + static_assert( + c10::guts::is_stateless_lambda>::value && + c10::guts::is_stateless_lambda>::value, + "def_pickle() currently only supports lambdas as " + "__getstate__ and __setstate__ arguments."); + def("__getstate__", std::forward(get_state)); + + // __setstate__ needs to be registered with some custom handling: + // We need to wrap the invocation of the user-provided function + // such that we take the return value (i.e. c10::intrusive_ptr) + // and assign it to the `capsule` attribute. + using SetStateTraits = + c10::guts::infer_function_traits_t>; + using SetStateArg = typename c10::guts::typelist::head_t< + typename SetStateTraits::parameter_types>; + auto setstate_wrapper = [set_state = std::forward(set_state)]( + c10::tagged_capsule self, + SetStateArg&& arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(set_state, std::forward(arg)); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + defineMethod( + "__setstate__", + detail::wrap_func( + std::move(setstate_wrapper))); + + // type validation + auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema(); + auto format_getstate_schema = [&getstate_schema]() { + std::stringstream ss; + ss << getstate_schema; + return ss.str(); + }; + TORCH_CHECK( + getstate_schema.arguments().size() == 1, + "__getstate__ should take exactly one argument: self. Got: ", + format_getstate_schema()); + auto first_arg_type = getstate_schema.arguments().at(0).type(); + TORCH_CHECK( + *first_arg_type == *classTypePtr, + "self argument of __getstate__ must be the custom class type. Got ", + first_arg_type->repr_str()); + TORCH_CHECK( + getstate_schema.returns().size() == 1, + "__getstate__ should return exactly one value for serialization. Got: ", + format_getstate_schema()); + + auto ser_type = getstate_schema.returns().at(0).type(); + auto setstate_schema = classTypePtr->getMethod("__setstate__").getSchema(); + auto arg_type = setstate_schema.arguments().at(1).type(); + TORCH_CHECK( + ser_type->isSubtypeOf(*arg_type), + "__getstate__'s return type should be a subtype of " + "input argument of __setstate__. Got ", + ser_type->repr_str(), + " but expected ", + arg_type->repr_str()); + + return *this; + } + + private: + template + torch::jit::Function* defineMethod( + std::string name, + Func func, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + // If default values are provided for function arguments, there must be + // none (no default values) or default values for all function + // arguments, except for self. This is because argument names are not + // extracted by inferFunctionSchemaSingleReturn, and so there must be a + // torch::arg instance in default_args even for arguments that do not + // have an actual default value provided. + TORCH_CHECK( + default_args.size() == 0 || + default_args.size() == schema.arguments().size() - 1, + "Default values must be specified for none or all arguments"); + + // If there are default args, copy the argument names and default values to + // the function schema. + if (default_args.size() > 0) { + schema = withNewArguments(schema, default_args); + } + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + // TODO: we need to figure out how to profile calls to custom functions + // like this! Currently can't do it because the profiler stuff is in + // libtorch and not ATen + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + qualMethodName, + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + // Register the method here to keep the Method alive. + // ClassTypes do not hold ownership of their methods (normally it + // those are held by the CompilationUnit), so we need a proxy for + // that behavior here. + auto method_val = method.get(); + classTypePtr->addMethod(method_val); + registerCustomClassMethod(std::move(method)); + return method_val; + } +}; + +/// make_custom_class() is a convenient way to create an instance of a +/// registered custom class and wrap it in an IValue, for example when you want +/// to pass the object to TorchScript. Its syntax is equivalent to APIs like +/// `std::make_shared<>` or `c10::make_intrusive<>`. +/// +/// For example, if you have a custom C++ class that can be constructed from an +/// `int` and `std::string`, you might use this API like so: +/// +/// IValue custom_class_iv = torch::make_custom_class(3, +/// "foobarbaz"); +template +c10::IValue make_custom_class(CtorArgs&&... args) { + auto userClassInstance = + c10::make_intrusive(std::forward(args)...); + return c10::IValue(std::move(userClassInstance)); +} + +// Alternative api for creating a torchbind class over torch::class_ this api is +// preffered to prevent size regressions on Edge usecases. Must be used in +// conjunction with TORCH_SELECTIVE_CLASS macro aka +// selective_class("foo_namespace", TORCH_SELECTIVE_CLASS("foo")) +template +inline class_ selective_class_( + const std::string& namespace_name, + detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + return torch::class_(namespace_name, class_name); +} + +template +inline detail::ClassNotSelected selective_class_( + const std::string&, + detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +// jit namespace for backward-compatibility +// We previously defined everything in torch::jit but moved it out to +// better reflect that these features are not limited only to TorchScript +namespace jit { + +using ::torch::class_; +using ::torch::getCustomClass; +using ::torch::init; +using ::torch::isCustomClass; + +} // namespace jit + +template +inline class_ Library::class_(const std::string& className) { + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + className, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, className); +} + +const std::unordered_set getAllCustomClassesNames(); + +template +inline class_ Library::class_(detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + class_name, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, class_name); +} + +template +inline detail::ClassNotSelected Library::class_(detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h new file mode 100644 index 0000000000000000000000000000000000000000..736d5aacdaa3226e7a247383333823870f978405 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { + +namespace detail { +/** + * In the Facebook internal build (using BUCK), this macro is enabled by + * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer + * binary. + */ +#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE +TORCH_API void record_custom_class(std::string name); + +/** + * Record an instance of a custom class being loaded + * grab portion of string after final '.' from qualified name + * as this seemingly aligns with how users name their custom classes + * example: __torch__.torch.classes.xnnpack.Conv2dOpContext + */ +#define RECORD_CUSTOM_CLASS(NAME) \ + auto name = std::string(NAME); \ + detail::record_custom_class(name.substr(name.find_last_of(".") + 1)); +#else +#define RECORD_CUSTOM_CLASS(NAME) +#endif +} // namespace detail + +/// This struct is used to represent default values for arguments +/// when registering methods for custom classes. +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod, {torch::arg("name") = name}); +struct arg { + // Static method for representing a default value of None. This is meant to + // be used like so: + // torch::arg("name") = torch::arg::none + // and is identical to: + // torch::arg("name") = IValue() + static c10::IValue none() { + return c10::IValue(); + } + + // Explicit constructor. + explicit arg(std::string name) + : name_(std::move(name)), value_(c10::nullopt) {} + // Assignment operator. This enables the pybind-like syntax of + // torch::arg("name") = value. + arg& operator=(const c10::IValue& rhs) { + value_ = rhs; + return *this; + } + + // The name of the argument. This is copied to the schema; argument + // names cannot be extracted from the C++ declaration. + std::string name_; + // IValue's default constructor makes it None, which is not distinguishable + // from an actual, user-provided default value that is None. This boolean + // helps distinguish between the two cases. + c10::optional value_; +}; + +namespace detail { + +// Argument type utilities +template +struct types { + using type = types; +}; + +template +struct WrapMethod; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...)) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...); +}; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...) const) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...) const; +}; + +// Adapter for different callable types +template < + typename CurClass, + typename Func, + std::enable_if_t< + std::is_member_function_pointer>::value, + bool> = false> +WrapMethod wrap_func(Func f) { + return WrapMethod(std::move(f)); +} + +template < + typename CurClass, + typename Func, + std::enable_if_t< + !std::is_member_function_pointer>::value, + bool> = false> +Func wrap_func(Func f) { + return f; +} + +template < + class Functor, + bool AllowDeprecatedTypes, + size_t... ivalue_arg_indices> +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack( + Functor& functor, + jit::Stack& stack, + std::index_sequence) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would + // be unused and we have to silence the compiler warning. + + constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices); + + using IValueArgTypes = + typename c10::guts::infer_function_traits_t::parameter_types; + // TODO We shouldn't use c10::impl stuff directly here. We should use the + // KernelFunction API instead. + return (functor)(c10::impl::ivalue_to_arg< + typename c10::impl::decay_if_not_tensor< + c10::guts::typelist:: + element_t>::type, + AllowDeprecatedTypes>:: + call(torch::jit::peek( + stack, ivalue_arg_indices, num_ivalue_args))...); +} + +template +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack(Functor& functor, jit::Stack& stack) { + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + return call_torchbind_method_from_stack( + functor, stack, std::make_index_sequence()); +} + +template +struct BoxedProxy; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + auto retval = call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(c10::ivalue::from(std::move(retval))); + } +}; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(); + } +}; + +inline bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); +} + +inline void checkValidIdent(const std::string& str, const char* type) { + for (const auto i : c10::irange(str.size())) { + TORCH_CHECK( + validIdent(i, str[i]), + type, + " must be a valid Python/C++ identifier." + " Character '", + str[i], + "' at index ", + i, + " is illegal."); + } +} + +class TORCH_API class_base { + protected: + explicit class_base( + const std::string& namespaceName, + const std::string& className, + std::string doc_string, + const std::type_info& intrusivePtrClassTypeid, + const std::type_info& taggedCapsuleClass); + + static c10::FunctionSchema withNewArguments( + const c10::FunctionSchema& schema, + std::initializer_list default_args); + std::string qualClassName; + at::ClassTypePtr classTypePtr; +}; + +} // namespace detail + +TORCH_API void registerCustomClass(at::ClassTypePtr class_type); +TORCH_API void registerCustomClassMethod(std::unique_ptr method); + +// Given a qualified name (e.g. __torch__.torch.classes.Foo), return +// the ClassType pointer to the Type that describes that custom class, +// or nullptr if no class by that name was found. +TORCH_API at::ClassTypePtr getCustomClass(const std::string& name); + +// Given an IValue, return true if the object contained in that IValue +// is a custom C++ class, otherwise return false. +TORCH_API bool isCustomClass(const c10::IValue& v); + +// This API is for testing purposes ONLY. It should not be used in +// any load-bearing code. +TORCH_API std::vector customClassSchemasForBCCheck(); + +namespace jit { +using ::torch::registerCustomClass; +using ::torch::registerCustomClassMethod; +} // namespace jit + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/extension.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/extension.h new file mode 100644 index 0000000000000000000000000000000000000000..671ae1aadb8d5f2ad33cfe27a8fe1481856e668b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/extension.h @@ -0,0 +1,9 @@ +#pragma once + +#ifndef TORCH_INDUCTOR_CPP_WRAPPER +// All pure C++ headers for the C++ frontend. +#include +#endif + +// Python bindings for the C++ frontend (includes Python.h). +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/library.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/library.h new file mode 100644 index 0000000000000000000000000000000000000000..535bd7640f8f99eb4de2587042e20ddc924564db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/library.h @@ -0,0 +1,1047 @@ +#pragma once + +/// \file +/// +/// This header provides an API for extending PyTorch's core library +/// of operators with user defined operators and data types. This +/// API can be used in a few ways: +/// +/// * You can define new custom operators and classes with TORCH_LIBRARY(), +/// making them available for use in both eager Python as well as in +/// TorchScript. This API is modeled off of pybind11's `PYBIND11_MODULE` +/// macro, as the provided functionality is similar (pybind11 lets you bind +/// C++ to Python only; `torch/library.h` lets you bind C++ simultaneously to +/// Python and TorchScript). +/// +/// * You can override existing operators with TORCH_LIBRARY_IMPL(), +/// providing a new implementation for these operators for a custom +/// backend (e.g., XLA). When you pass operators with tensors of your custom +/// backend, your overridden implementations will be called instead +/// of the standard implementations. +/// +/// * You can use both capabilities at the same time, allowing you +/// to write custom operators that register CPU/CUDA/Autograd +/// implementations without having to write the boilerplate +/// conditionals yourself. +/// +/// For a tutorial style introduction to the library API, check +/// out the [Extending TorchScript with Custom C++ +/// Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html) +/// tutorial. +/// +/// ``` +/// // Define a library whose operators live in the namespace 'myops'. +/// // You must define all of the operators for this library in +/// // this namespace. +/// TORCH_LIBRARY(myops, m) { +/// // Define a operator with exactly one implementation for all backends. +/// m.def("add(Tensor self, Tensor other) -> Tensor", &add_impl); +/// +/// // Define a schema for an operator, but provide no implementation +/// // (use this syntax if you want to use the dispatcher) +/// m.def("mul(Tensor self, Tensor other) -> Tensor"); +/// +/// // Provide an implementation for a defined operator (you can +/// // provide multiple; one per backend). The dispatcher takes care of +/// // calling the correct implementation depending on if we get a CPU +/// // tensor or a CUDA tensor +/// m.impl("mul", torch::kCPU, &mul_cpu_impl); +/// m.impl("mul", torch::kCUDA, &mul_cuda_impl); +/// } +/// +/// // Define implementations for operators for a non-standard backend, +/// // e.g., XLA (valid values are entries of DispatchKey). This can +/// // be used to define operators in a different file than the initial +/// // TORCH_LIBRARY definition (e.g., if it is in an external library) +/// TORCH_LIBRARY_IMPL(myops, XLA, m) { +/// m.impl("mul", &mul_xla_impl); +/// } +/// ``` + +#include +#include +#include +#include +#include + +// Just for inferFunctionSchemaFromFunctor +#include +#include + +namespace torch { + +#if defined C10_MOBILE +/** + * The NoInferSchemaTag is a type name used to indicate that this call to the + * CppFunction constructor should not trigger schema inference from functor. + * Schema inference from functor utilizes template meta-programming, and is + * costly from a size perspective. Ideally, one would expect that the schema + * inference would require very little binary size since most of the + * computation can be done by the compiler at build time, but that isn't + * necessarily the case. + * + * Schema inference is elided only for mobile use-cases where we don't need + * the additional runtime cost or size overhead on client devices. + * + */ +struct NoInferSchemaTag {}; +#endif + +#define HAS_PT2_COMPLIANT_TAG + +// For multipy/torchdeploy use case +enum class _RegisterOrVerify { REGISTER, VERIFY }; + +template +class class_; + +#define HAS_IMPL_ABSTRACT_PYSTUB + +/// Represents a C++ function that implements an operator. Most users won't +/// interact directly with this class, except via error messages: the +/// constructors this function define the set of permissible "function"-like +/// things you can bind via the interface. +/// +/// This class erases the type of the passed in function, but durably records +/// the type via an inferred schema for the function. +class TORCH_API CppFunction final { + // TODO: This is morally the same thing as KernelRegistrationConfig, but it's + // opaque to the user. + + public: + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl)` + template + explicit CppFunction( + Func* f, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()), + schema_( + c10::detail::inferFunctionSchemaFromFunctor>()), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl))` + template + explicit CppFunction( + FuncPtr f, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + typename FuncPtr::FuncType>()), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... })` + template + explicit CppFunction( + Lambda&& f, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + std::decay_t>()), + debug_() {} + +#if defined C10_MOBILE + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl, + /// NoInferSchemaTag())` + template + explicit CppFunction( + Func* f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl), NoInferSchemaTag())` + template + explicit CppFunction( + FuncPtr f, + NoInferSchemaTag, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... }. NoInferSchemaTag())` + template + explicit CppFunction( + Lambda&& f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} +#endif + + ~CppFunction(); + + CppFunction(CppFunction&&) noexcept = default; + + CppFunction& operator=(CppFunction&&) = default; + + /// \private + /// Creates a function from a type-erased boxed kernel. + static CppFunction makeFromBoxedKernel(c10::BoxedKernel kernel) { + return CppFunction( + c10::KernelFunction::makeFromBoxedKernel(std::move(kernel)), + /* cpp_signature */ c10::nullopt, // not known for boxed functions + /* schema */ nullptr); + } + + /// This creates a fallthrough function. Fallthrough functions + /// immediately redispatch to the next available dispatch key, + /// but are implemented more efficiently than a hand written + /// function done in the same way. + static CppFunction makeFallthrough() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFallthrough()); + } + + /// \private + /// + /// Creates a function that raises an error saying that named tensors + /// are not supported when called. + static CppFunction makeNamedNotSupported() { + return makeFromBoxedKernel(c10::BoxedKernel::makeNamedNotSupported()); + } + + /// Create a function from a boxed kernel function with signature + /// `void(const OperatorHandle&, Stack*)`; i.e., they receive a + /// stack of arguments in a boxed calling convention, rather than + /// in the native C++ calling convention. Boxed functions are + /// typically only used to register backend fallbacks via + /// torch::Library::fallback(). + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + // Variant that takes in a boxed kernel function with a plumbed + // DispatchKeySet. See Note [Plumbing Keys Through The Dispatcher] for + // details. + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + /// Create a function from a boxed kernel functor which defines + /// `operator()(const OperatorHandle&, DispatchKeySet, Stack*)` + /// (receiving arguments from boxed calling convention) and inherits + /// from `c10::OperatorKernel`. Unlike makeFromBoxedFunction, functions + /// registered in this way can also carry additional state which + /// is managed by the functor; this is useful if you're writing an + /// adapter to some other implementation, e.g., a Python callable, which + /// is dynamically associated with the registered kernel. + template + static CppFunction makeFromBoxedFunctor( + std::unique_ptr kernelFunctor) { + return makeFromBoxedKernel( + c10::BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); + } + + /// Create a function from an unboxed kernel function. + /// This is typically used to register common operators. + template < + typename FuncPtr, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr* f) { + return CppFunction(f); + } + + /// Create a function from a compile time unboxed kernel function pointer. + /// This is typically used to register common operators. + /// Compile time function pointers can be used to allow the compiler + /// to optimize (e.g. inline) calls to it. + template < + typename FuncPtr, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr f) { + return CppFunction(f); + } + + CppFunction&& debug(std::string d) && { + debug_ = std::move(d); + return std::move(*this); + } + + private: + c10::optional dispatch_key_; + c10::KernelFunction func_; + c10::optional cpp_signature_; + std::unique_ptr schema_; + std::string debug_; + + // The "setter" for dispatch_key_ + template + friend CppFunction dispatch(c10::DispatchKey, Func&&); + + // The only class which actually pulls out values from CppFunction (does so + // destructively, felt too lazy to write accessors that I don't even + // want users to use) + friend class Library; + + CppFunction( + c10::KernelFunction func, + c10::optional cpp_signature, + std::unique_ptr schema); +}; + +/// \defgroup torch-dispatch-overloads torch::dispatch overloads + +/// Create a torch::CppFunction which is associated with a specific +/// dispatch key. torch::CppFunctions that are tagged with a +/// c10::DispatchKey don't get invoked unless the dispatcher determines +/// that this particular c10::DispatchKey is the one that should be +/// dispatched to. +/// +/// This function is generally not used directly, instead, prefer using +/// TORCH_LIBRARY_IMPL(), which will implicitly set the c10::DispatchKey +/// for all registration calls inside of its body. +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DispatchKey k, Func&& raw_f) { + CppFunction f(std::forward(raw_f)); + if (k == c10::DispatchKey::CatchAll) { + f.dispatch_key_ = c10::nullopt; + } else { + f.dispatch_key_ = k; + } + return f; +} + +/// Convenience overload of dispatch() which accepts c10::DeviceType +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) { + auto deviceTypeToDispatchKey = [](c10::DeviceType t) { + switch (t) { + // This list is synchronized with the k-constants in c10/core/DeviceType.h + case c10::DeviceType::CPU: + return c10::DispatchKey::CPU; + case c10::DeviceType::CUDA: + return c10::DispatchKey::CUDA; + case c10::DeviceType::IPU: + return c10::DispatchKey::IPU; + case c10::DeviceType::XLA: + return c10::DispatchKey::XLA; + case c10::DeviceType::Lazy: + return c10::DispatchKey::Lazy; + case c10::DeviceType::XPU: + return c10::DispatchKey::XPU; + case c10::DeviceType::MPS: + return c10::DispatchKey::MPS; + case c10::DeviceType::Meta: + return c10::DispatchKey::Meta; + case c10::DeviceType::HIP: + return c10::DispatchKey::HIP; + case c10::DeviceType::ORT: + return c10::DispatchKey::ORT; + case c10::DeviceType::HPU: + return c10::DispatchKey::HPU; + case c10::DeviceType::MTIA: + return c10::DispatchKey::MTIA; + case c10::DeviceType::PrivateUse1: + return c10::DispatchKey::PrivateUse1; + default: + TORCH_CHECK( + false, + "Device type ", + t, + " cannot be overloaded at dispatch time, " + "please file a bug report explaining what you were trying to do."); + } + }; + return dispatch(deviceTypeToDispatchKey(type), std::forward(raw_f)); +} + +/// \defgroup torch-schema-overloads torch::schema overloads + +/// Construct a c10::FunctionSchema from a string, with an explicitly +/// specified c10::AliasAnalysisKind. Ordinarily, schemas are simply +/// passed in as strings, but if you need to specify a custom alias +/// analysis, you can replace the string with a call to this function. +/// +/// ``` +/// // Default alias analysis (FROM_SCHEMA) +/// m.def("def3(Tensor self) -> Tensor"); +/// // Pure function alias analysis +/// m.def(torch::schema("def3(Tensor self) -> Tensor", +/// c10::AliasAnalysisKind::PURE_FUNCTION)); +/// ``` +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) { + c10::FunctionSchema s = torch::jit::parseSchema(str); + s.setAliasAnalysis(k); + return s; +} + +/// Function schemas can be directly constructed from string literals. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* s) { + return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA); +} + +/// \private +/// +/// Already constructed function schemas are accepted if they are +/// rvalues. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema&& schema(c10::FunctionSchema&& s) { + return std::move(s); +} + +namespace detail { + +inline std::variant constructSchemaOrName( + c10::FunctionSchema&& s) { + return std::move(s); +} +inline std::variant constructSchemaOrName( + c10::OperatorName&& n) { + return std::move(n); +} +inline std::variant +constructSchemaOrName(const char* str) { + auto s = torch::jit::parseSchemaOrName(str); + if (std::holds_alternative(s)) { + std::get(s).setAliasAnalysis( + c10::AliasAnalysisKind::FROM_SCHEMA); + } + return s; +} + +class TorchLibraryInit; + +} // namespace detail + +// Note [Selective build] +// ~~~~~~~~~~~~~~~~~~~~~~ +// In some settings, especially mobile, it is important to avoid compiling any +// references to functions that you aren't actually going to use, so that they +// can be eliminated by the linker. We call this capability "selective build". +// +// A very easy way to implement selective build which results in a lot of +// boilerplate is to just add ifdef's around every registration call, but this +// means you have to write a lot of extra lines of code at every registration +// site, and it also means you have to define some munging scheme to map +// operators to macros. +// +// Instead of doing this, we have a different mechanism centered around the +// concept of a SelectiveStr. A selective name is like a const char* string, +// except it also carries at compile time a boolean saying whether or not a +// registration should actually happen or not. We then have extra overloads +// which bypass registration entirely if a selective name is disabled. We do a +// constexpr test to see if a operator should be enabled or not; this is +// currently implemented in ATen/core/op_registration/op_allowlist.h + +namespace detail { + +// dummy class for non selected custom torchbind classes +class ClassNotSelected { + public: + ClassNotSelected& def_pickle(...) { + return *this; + } + ClassNotSelected& def(...) { + return *this; + } +}; + +// A SelectiveStr is like a const char*, except that it also comes +// with a type brand that says whether or not the name is enabled or +// not. If the string is disabled, then (at compile time) we DON'T generate +// a registration call for it. This class is not intended to be called +// directly; use TORCH_SELECTIVE_NAME or TORCH_SELECTIVE_SCHEMA macros below +// to create it. +template +class SelectiveStr { + public: + constexpr explicit SelectiveStr(const char* name) : name_(name) {} + constexpr operator const char*() { + return name_; + } + + private: + const char* name_; +}; + +#define TORCH_SELECTIVE_CLASS(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_NAME(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_SCHEMA(n) \ + torch::detail::SelectiveStr(n) + +} // namespace detail + +/// This object provides the API for defining operators and providing +/// implementations at dispatch keys. Typically, a torch::Library +/// is not allocated directly; instead it is created by the +/// TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() macros. +/// +/// Most methods on torch::Library return a reference to itself, +/// supporting method chaining. +/// +/// ``` +/// // Examples: +/// +/// TORCH_LIBRARY(torchvision, m) { +/// // m is a torch::Library +/// m.def("roi_align", ...); +/// ... +/// } +/// +/// TORCH_LIBRARY_IMPL(aten, XLA, m) { +/// // m is a torch::Library +/// m.impl("add", ...); +/// ... +/// } +/// ``` +/// +class TORCH_API Library final { + public: + /// \private + /// + /// Which type of macro produced this Library + enum Kind { + DEF, // from TORCH_LIBRARY (no qualifier) + IMPL, + FRAGMENT, + }; + + /// \private + /// + /// Use TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() instead of using these + /// constructors directly + Library( + Kind kind, + std::string ns, + c10::optional k, + const char* file, + uint32_t line); + + Library(const Library&) = delete; + Library& operator=(const Library&) = delete; + Library(Library&&) = default; + Library& operator=(Library&&) = default; + + // Some notes about the API design here. We had the following constraints: + // + // - We need to support multiple "types" of arguments for schema and + // functions (e.g., unnamed lambda types, regular functions, const char*, + // fully instantiated schemas) + // - We don't want to write exponentially many overloads + // - We don't want to rely on implicit conversion to a common type, + // because the C++ compiler will only be willing to do a single + // implicit conversion (reducing the set of valid types which you + // can invoke with); also error messages are worse when an implicit + // conversion is not selected (as the compiler will not explain + // why it didn't select an implicit conversion; this is different + // from overloads where it will explain each candidate overload and + // why it didn't apply) + // + // To solve all of these constraints at the same time, we use a trick taken + // from the pybind11 library: template over the argument in the user visible + // API, and inside of the templated function explicitly call an overloaded + // function to resolve the argument to a real type. You get the good error + // messages from overloads, but at the same time you only need to write the + // overload for any given argument type once. + + /// Declare an operator with a schema, but don't provide any implementations + /// for it. You're expected to then provide implementations using the + /// impl() method. All template arguments are inferred. + /// + /// \param raw_schema The schema of the operator to be defined. + /// Typically, this is a `const char*` string literal, but any type + /// accepted by torch::schema() is accepted here. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add(Tensor self, Tensor other) -> Tensor"); + /// } + /// ``` + + template + Library& def( + Schema&& raw_schema, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + c10::FunctionSchema s = schema(std::forward(raw_schema)); + return _def(std::move(s), nullptr, tags, rv); + } + + /// Declares that for all operators that are subsequently def'ed, their + /// abstract impls may be found in the given Python module (pymodule). + /// This registers some help text that is used if the abstract impl + /// cannot be found. + /// + /// Args: + /// - pymodule: the python module + /// - context: We may include this in the error message. + Library& impl_abstract_pystub(const char* pymodule, const char* context = "") { + impl_abstract_pystub_ = {pymodule, context}; + return *this; + } + + /// Define an operator for a schema and then register an implementation for + /// it. This is typically what you would use if you aren't planning + /// on making use of the dispatcher to structure your operator + /// implementation. It's roughly equivalent to calling def() and + /// then impl(), but if you omit the schema of the operator, we will + /// infer it from the type of your C++ function. All template + /// arguments are inferred. + /// + /// \param raw_name_or_schema The schema of the operator to be + /// defined, or just the name of the operator if the schema is to be + /// inferred from `raw_f`. Typically a `const char*` literal. + /// \param raw_f The C++ function that implements this operator. + /// Any valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add", add_fn); + /// } + /// ``` + template + Library& def(NameOrSchema&& raw_name_or_schema, Func&& raw_f, + const std::vector& tags = {}) & { + CppFunction f(std::forward(raw_f)); + return _def( + detail::constructSchemaOrName( + ::std::forward(raw_name_or_schema)), + ::std::move(f), tags); + } + + /// Register an implementation for an operator. You may register multiple + /// implementations for a single operator at different dispatch keys + /// (see torch::dispatch()). Implementations must have a corresponding + /// declaration (from def()), otherwise they are invalid. If you plan + /// to register multiple implementations, DO NOT provide a function + /// implementation when you def() the operator. + /// + /// \param name The name of the operator to implement. Do NOT provide + /// schema here. + /// \param raw_f The C++ function that implements this operator. Any + /// valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY_IMPL(myops, CUDA, m) { + /// m.impl("add", add_cuda); + /// } + /// ``` + template + Library& impl( + Name name, + Func&& raw_f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def +#if defined C10_MOBILE + CppFunction f(std::forward(raw_f), NoInferSchemaTag()); +#else + CppFunction f(std::forward(raw_f)); +#endif + return _impl(name, std::move(f), rv); + } + +#if defined C10_MOBILE + // Note: This overload is needed only for C10_MOBILE, since the automatically + // defined copy constructor for the CppFunction doesn't have the additional + // NoInferSchemaTag argument. We define the overload for the impl() function + // to accept a CppFunction&& argument. The already constructed CppFunction + // object may or may not have the inferred schema, but it doesn't matter + // for our purposes since if it already has the inferred schema, then we + // might as well just pass it through directly. + // + template + Library& impl(Name name, CppFunction&& raw_f) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def + CppFunction f(std::forward(raw_f)); + return _impl(name, std::move(f)); + } +#endif + + // Helper for getting an OperatorName for a const char*. You probably + // don't need this. + c10::OperatorName _resolve(const char* name) const; + + /// \private + /// + /// Convenience overload for directly specifying the dispatch key when + /// impl(). You probably don't need this; instead, prefer specifying + /// the dispatch key for the entire block in TORCH_LIBRARY_IMPL() + template + Library& impl(Name name, Dispatch&& key, Func&& raw_f) & { + return impl( + name, dispatch(std::forward(key), std::forward(raw_f))); + } + + template + Library& impl_UNBOXED(Name /*name*/, Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + // These overloads cover cases when a SelectiveStr (see Note [Selective + // build]) has been disabled at compile time. In that case, don't generate + // any code referencing the passed in functions at all. + Library& def(detail::SelectiveStr, const std::vector& tags = {}) & { + return *this; + } + Library& def(detail::SelectiveStr raw_schema, const std::vector& tags = {}) & { + return def(raw_schema.operator const char*(), tags); + } + template + Library& def(detail::SelectiveStr, Func&& /*raw_f*/, const std::vector& tags = {}) & { + return *this; + } + template + Library& def(detail::SelectiveStr raw_name_or_schema, Func&& raw_f, const std::vector& tags = {}) & { + return def( + raw_name_or_schema.operator const char*(), std::forward(raw_f), tags); + } + + template + Library& impl(detail::SelectiveStr, Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl( + detail::SelectiveStr, + Dispatch&& /*key*/, + Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + template + Library& impl(detail::SelectiveStr name, Func&& raw_f) & { + return impl(name.operator const char*(), std::forward(raw_f)); + } + template + Library& impl( + detail::SelectiveStr name, + Dispatch&& key, + Func&& raw_f) & { + return impl( + name.operator const char*(), + std::forward(key), + std::forward(raw_f)); + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + /// Register a fallback implementation for all operators which will be used + /// if there is not a specific implementation for an operator available. + /// There MUST be a DispatchKey associated with a fallback; e.g., + /// only call this from TORCH_LIBRARY_IMPL() with namespace `_`. + /// + /// \param raw_f The function that implements the fallback. Unboxed + /// functions typically do not work as fallback functions, as + /// fallback functions must work for every operator (even though + /// they have varying type signatures). Typical arguments are + /// CppFunction::makeFallthrough() or + /// CppFunction::makeFromBoxedFunction() + /// + /// ``` + /// // Example: + /// + /// TORCH_LIBRARY_IMPL(_, AutogradXLA, m) { + /// // If there is not a kernel explicitly registered + /// // for AutogradXLA, fallthrough to the next + /// // available kernel + /// m.fallback(torch::CppFunction::makeFallthrough()); + /// } + /// + /// // See aten/src/ATen/core/dispatch/backend_fallback_test.cpp + /// // for a full example of boxed fallback + /// ``` + template + Library& fallback(Func&& raw_f) & { + CppFunction f((std::forward(raw_f))); + return _fallback(std::move(f)); + } + + template + inline torch::class_ class_(const std::string& className); + + // These overloads enable the use of selective build on classes registered + // within a library. The API is the same as before with 1 minor change. + // Instead of m.class_("foo") you instead do + // m.class_(TORCH_SELECTIVE_CLASS("foo")) + template + inline torch::class_ class_(detail::SelectiveStr className); + + template + inline detail::ClassNotSelected class_(detail::SelectiveStr className); + + private: + Kind kind_; + c10::optional ns_; + c10::optional dispatch_key_; + c10::optional> impl_abstract_pystub_; + const char* file_; + uint32_t line_; + + std::vector registrars_; + + friend class detail::TorchLibraryInit; + + // Non-user visible actual implementations of functions. These aren't + // public because we only implement & qualifier and not && qualifier + Library& _def( + c10::FunctionSchema&& schema, + c10::OperatorName* out_name = nullptr, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _def( + std::variant&&, + CppFunction&& f, + const std::vector& tags = {}) &; + Library& _impl( + const char* name, + CppFunction&& f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _fallback(CppFunction&& f) &; + + at::OperatorName _parseNameForLib(const char* name_str) const; +}; + +namespace detail { + +class TorchLibraryInit final { + private: + using InitFn = void(Library&); + Library lib_; + + public: + TorchLibraryInit( + Library::Kind kind, + InitFn* fn, + const char* ns, + c10::optional k, + const char* file, + uint32_t line) + : lib_(kind, ns, k, file, line) { + fn(lib_); + } +}; + +} // namespace detail + +} // namespace torch + +// NB: The EXACT NAMING of the initializer functions (e.g., +// TORCH_LIBRARY_init_aten) matters for the code analyzer; +// see the regexes at tools/code_analyzer/run_analyzer.sh + +/// Macro for defining a function that will be run at static +/// initialization time to define a library of operators in the +/// namespace `ns` (must be a valid C++ identifier, no quotes). +/// Use this macro when you want to define a new set of custom operators +/// that do not already exist in PyTorch. +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY(myops, m) { +/// // m is a torch::Library; methods on it will define +/// // operators in the myops namespace +/// m.def("add", add_impl); +/// } +/// ``` +/// +/// The `m` argument is bound to a torch::Library that is used to +/// register operators. There may only be one TORCH_LIBRARY() +/// for any given namespace. +#define TORCH_LIBRARY(ns, m) \ + static void TORCH_LIBRARY_init_##ns(torch::Library&); \ + static const torch::detail::TorchLibraryInit TORCH_LIBRARY_static_init_##ns( \ + torch::Library::DEF, \ + &TORCH_LIBRARY_init_##ns, \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void TORCH_LIBRARY_init_##ns(torch::Library& m) + +/// \private +/// +/// This macro is a version of TORCH_LIBRARY() that doesn't enforce that there +/// is only one library (it is a "fragment"). This is used inside the +/// PerOpRegistration.cpp file, as well as in places where all op registrations +/// within the same namespace cannot be easily put into one macro block +/// (this is mostly the case for custom ops in fbcode that were ported from +/// the old API) +#define TORCH_LIBRARY_FRAGMENT(ns, m) _TORCH_LIBRARY_FRAGMENT(ns, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions This can happen if TORCH_LIBRARY_FRAGMENT is called +/// multiple times with the same namespace in the same translation unit. Note +/// that the TORCH_LIBRARY variant doesn't run into this problem, because it +/// enforces that it can only be called once for a given namespace. +#define _TORCH_LIBRARY_FRAGMENT(ns, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( \ + torch::Library::FRAGMENT, \ + &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library & m) + +/// Macro for defining a function that will be run at static +/// initialization time to define operator overrides for dispatch key +/// `k` (must be an unqualified enum member of c10::DispatchKey) in +/// namespace `ns` (must be a valid C++ identifer, no quotes). Use this +/// macro when you want to implement a preexisting set of custom +/// operators on a new dispatch key (e.g., you want to provide CUDA +/// implementations of already existing operators). One common usage +/// pattern is to use TORCH_LIBRARY() to define schema for all new +/// operators you want to define, and then use several +/// TORCH_LIBRARY_IMPL() blocks to provide implementations of the +/// operator for CPU, CUDA and Autograd. +/// +/// In some cases, you need to define something that applies to all namespaces, +/// not just one namespace (usually a fallback). In that case, use the reserved +/// namespace _, e.g., +/// +/// ``` +/// TORCH_LIBRARY_IMPL(_, XLA, m) { +/// m.fallback(xla_fallback); +/// } +/// ``` +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY_IMPL(myops, CPU, m) { +/// // m is a torch::Library; methods on it will define +/// // CPU implementations of operators in the myops namespace. +/// // It is NOT valid to call torch::Library::def() +/// // in this context. +/// m.impl("add", add_cpu_impl); +/// } +/// ``` +/// +/// If ``add_cpu_impl`` is an overloaded function, use a +/// ``static_cast`` to specify which overload you want +/// (by providing the full type). +/// +// NB: if the dispatch key is not whitelisted, we simply omit the Library +// call entirely +#define TORCH_LIBRARY_IMPL(ns, k, m) _TORCH_LIBRARY_IMPL(ns, k, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions. This can happen if TORCH_LIBRARY_IMPL is called +/// multiple times with the same namespace and dispatch key in the same +/// translation unit. +#define _TORCH_LIBRARY_IMPL(ns, k, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( \ + torch::Library::IMPL, \ + (c10::impl::dispatch_key_allowlist_check(c10::DispatchKey::k) \ + ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) \ + : [](torch::Library&) -> void {}), \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library & m) + +// These are variants of the macros above which are to be used for testing (they +// don't setup the static initializer, so you can control the visibility of +// the allocated library yourself). +// +// DO NOT use these in production code, they are NOT understood by the +// code analyzer and will be incorrectly analyzed in those situations. + +/// \private +#define MAKE_TORCH_LIBRARY(ns) \ + torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) +/// \private +#define MAKE_TORCH_LIBRARY_IMPL(ns, k) \ + torch::Library( \ + torch::Library::IMPL, \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__) + +// Make the custom class API visible, so it is available from +// torch::Library. + +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/script.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/script.h new file mode 100644 index 0000000000000000000000000000000000000000..58510670613b58ec9b39f3d69d652be6cc0ce998 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/script.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include