diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp new file mode 100644 index 0000000000000000000000000000000000000000..03cb8c42c193b177872b3983cde8124e85f6ee2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp @@ -0,0 +1,12 @@ +#include + +namespace c10d_functional { + +void register_process_group( + const std::string& tag, + c10::intrusive_ptr pg); + +c10::intrusive_ptr resolve_process_group( + const std::string& tag); + +} // namespace c10d_functional diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2b4885f02ffc1e13cd51c35c24d8a1e4eee62eab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp @@ -0,0 +1,473 @@ +#pragma once + +#ifdef USE_C10D_NCCL + +#include +#include + +#include +#include + +#include +#include +#include + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 14) +#define NCCL_HAS_COMM_NONBLOCKING +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 18) +#define NCCL_HAS_COMM_SPLIT +#endif + +// ncclGetLastError() is enabled only for NCCL versions 2.13+ +// ncclRemoteError only exists in NCCL versions 2.13+ +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 13) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#endif + +// Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort() +// and ncclCommGetAsyncError() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 4) +#define ENABLE_NCCL_ERROR_CHECKING +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_ERROR_CHECKING +#endif + +// P2P is enabled only for NCCL versions 2.7+ since ncclSend() +// and ncclRecv() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 7) +#define ENABLE_NCCL_P2P_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_P2P_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 11) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 17) +#define NCCL_HAS_COMM_CTA_CGA +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_CTA_CGA +#endif + +#if defined(NCCL_REGISTRATION_SUPPORTED) || \ + ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 19))) +#define NCCL_HAS_COMM_REGISTER +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_REGISTER +#endif + +// Macro to throw on a non-successful NCCL return value. +#define C10D_NCCL_CHECK(cmd, failureReason) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } while (0) + +// Macro to throw on a non-successful NCCL return value, non-blocking. +#define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \ + ncclResult_t result = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + while (result == ncclInProgress) { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(result) + "\n" + \ + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comm, &result); \ + } \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +#define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comms_, failureReason) \ + ncclResult_t state = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + if (state == ncclInProgress) { \ + for (const auto i : c10::irange(comms_.size())) { \ + do { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + \ + ":" + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(state) + "\n" + \ + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comms_[i]->getNcclComm(), &state); \ + } while (state == ncclInProgress); \ + if (state != ncclSuccess) { \ + break; /* fall through to failed case */ \ + } \ + } \ + } \ + if (state != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \ + "\n" + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +// Macro to print and abort on a non-successful NCCL return value. +#define C10D_NCCL_ASSERT(cmd) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = ncclGetErrorWithVersion(result); \ + fprintf( \ + stderr, \ + "NCCL error in: %s:%d, %s\n", \ + __FILE__, \ + __LINE__, \ + err.c_str()); \ + abort(); \ + } \ + } while (0) + +namespace c10d { + +std::string getNcclVersion(); +std::string ncclGetErrorWithVersion(ncclResult_t error); +bool nccl_use_nonblocking(); +int nccl_nonblocking_timeout(); + +// Provides additional detail into NCCL error codes based on when these are +// thrown in the NCCL codebase. +std::string getNcclErrorDetailStr( + ncclResult_t error, + c10::optional processGroupFailureReason = c10::nullopt); + +// Write NCCL debug info to local disk or any storage users define. +class TORCH_API DebugInfoWriter { + public: + DebugInfoWriter(int rank); + virtual ~DebugInfoWriter(); + virtual void write(const std::string& ncclTrace); + + protected: + std::string filename_; +}; + +// RAII wrapper for NCCL communicator +class NCCLComm { + public: + explicit NCCLComm(ncclComm_t ncclComm) + : ncclComm_(ncclComm), + aborted_(false), + ncclAsyncErr_(ncclSuccess), + commFailureReason_(c10::nullopt) {} + + NCCLComm() : NCCLComm(nullptr) {} + + ~NCCLComm() noexcept { + // Add lock in this destructor, as aborted_ needs to be read after memory + // barrier here. + std::unique_lock lock(mutex_); + if (ncclComm_ && !aborted_) { +#ifdef ENABLE_NCCL_ERROR_CHECKING + // Use ncclCommAbort instead of ncclCommDestroy here since + // ncclCommDestroy could block forever waiting for work to complete on + // the communicator. + C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_)); +#else + C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_)); +#endif + } + } + + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId) { + auto comm = std::make_shared(); + C10D_NCCL_CHECK( + ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank), + c10::nullopt); + comm->ncclId_ = commId; + comm->rank_ = rank; + return comm; + } + +#ifdef NCCL_HAS_COMM_NONBLOCKING + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId, + ncclConfig_t& config) { + auto comm = std::make_shared(); + if (nccl_use_nonblocking()) { + config.blocking = 0; + C10D_NCCL_CHECK_TIMEOUT( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + comm->ncclComm_, + c10::nullopt); + } else { + C10D_NCCL_CHECK( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + c10::nullopt); + } + comm->ncclId_ = commId; + comm->rank_ = rank; + return comm; + } +#endif + +#ifdef NCCL_HAS_COMM_SPLIT + static std::shared_ptr split( + NCCLComm* source, + int color_id, + int rank, + ncclConfig_t& config) { + auto comm = std::make_shared(); + C10D_NCCL_CHECK( + ncclCommSplit( + source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config), + c10::nullopt); + ++source->ncclCommSplitCounter_; + return comm; + } +#endif + + ncclUniqueId getNcclId() { + return ncclId_; + } + + // Must not be copyable + NCCLComm(const NCCLComm&) = delete; + NCCLComm& operator=(const NCCLComm&) = delete; + + // Do not support move assignment as there is no valid use case + NCCLComm& operator=(NCCLComm&& other) = delete; + + // Move constructable + NCCLComm(NCCLComm&& other) { + // Using other's lock, as it reads other's states + // Can not use this.mutex_, as this object is being constructed. + std::unique_lock lock(other.mutex_); + std::swap(ncclComm_, other.ncclComm_); + std::swap(aborted_, other.aborted_); + std::swap(ncclAsyncErr_, other.ncclAsyncErr_); + } + + ncclComm_t getNcclComm(); + + c10::optional getNcclCommFailureReason() const { + std::unique_lock lock(mutex_); + return commFailureReason_; + } + + void ncclCommAbort( + c10::optional commFailureReason = c10::nullopt) { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (aborted_) { + // Should not abort twice. + return; + } + +#ifdef NCCL_HAS_COMM_REGISTER + // Deregister all registered segments before aborting. + for (auto& it : registeredSegmentHandles_) { + void* handle = it.second; + C10D_NCCL_CHECK( + ::ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + " on ncclComm_ ", + ncclComm_)); + } + registeredSegmentHandles_.clear(); +#endif + + // Set true failure reason if provided by ProcessGroupNCCL (e.g. work + // timeout) + commFailureReason_ = commFailureReason; +#ifndef NCCL_HAS_COMM_NONBLOCKING + C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_); +#else + C10D_NCCL_CHECK_TIMEOUT( + ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_); +#endif + aborted_ = true; + ncclComm_ = nullptr; + + // Set an appropriate error so that we avoid using the communicator. + if (ncclAsyncErr_ == ncclSuccess) { + ncclAsyncErr_ = ncclSystemError; + } +#else + // This is a NOOP, if error checks are disabled. + return; +#endif + } + + bool isAborted() const { + std::unique_lock lock(mutex_); + return aborted_; + } + + uint64_t getCommSplitCounter() const { + return ncclCommSplitCounter_; + } + + ncclResult_t checkForNcclError() { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (ncclAsyncErr_ != ncclSuccess) { + return ncclAsyncErr_; + } + C10D_NCCL_CHECK( + ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_); + return ncclAsyncErr_; +#else + // Always return success, if error checks are disabled. + return ncclSuccess; +#endif + } + + ncclResult_t registerSegment(void* ptr, size_t size) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + // We register only segments from cache allocator + // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always + // maps to a unique handle and should not be registered before the current + // ptr is deregistered and freed. + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 0, + "Segment with ptr ", + ptr, + " has already been registered on ncclComm_ ", + ncclComm_); + + void* handle; + C10D_NCCL_CHECK( + ncclCommRegister(ncclComm_, ptr, size, &handle), + c10::str( + "Failed to register segment with ptr ", + ptr, + ", size ", + size, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_[ptr] = handle; + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + ncclResult_t deregisterSegment(void* ptr) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 1, + "Segment with ptr ", + ptr, + " is not registered on ncclComm_ ", + ncclComm_); + + void* handle = registeredSegmentHandles_[ptr]; + C10D_NCCL_CHECK( + ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_.erase(ptr); + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + protected: + ncclComm_t ncclComm_; + // Unique nccl_id for this communicator. + ncclUniqueId ncclId_; + bool aborted_; + uint64_t ncclCommSplitCounter_{0}; + ncclResult_t ncclAsyncErr_; + mutable std::mutex mutex_; + // Rank that this communicator corresponds to. + int rank_; + // Optional reason for communicator failure, provided by ProcessGroupNCCL for + // better error messaging. + c10::optional commFailureReason_; +#ifdef NCCL_HAS_COMM_REGISTER + // Stores handlers for tensors registered by NCCL + std::unordered_map registeredSegmentHandles_; +#endif +}; + +// Helper that automatically cleans up premul sums. +struct ncclRedOpRAII { + ncclRedOpRAII() = default; + ncclRedOpRAII(ncclRedOp_t op) : op_(op) {} + ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm) + : op_(op), comm_(comm), premul_sum_(true) {} + ncclRedOpRAII(const ncclRedOpRAII&) = delete; + ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete; + ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() { + std::swap(tmp.op_, this->op_); + std::swap(tmp.comm_, this->comm_); + std::swap(tmp.premul_sum_, this->premul_sum_); + } +#if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT) + ~ncclRedOpRAII() { + if (premul_sum_) { + ncclRedOpDestroy(op_, comm_); + } + } +#endif + operator ncclRedOp_t() const { + return op_; + } + ncclRedOp_t op_; + ncclComm_t comm_; + bool premul_sum_ = false; +}; + +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..25a0b6cdfec5e32b86df56b0e0aa5ce78b3afff3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp @@ -0,0 +1,139 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { + +class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase { + public: + ParamCommsDebugInfo() = default; + ParamCommsDebugInfo( + int rank, + std::string&& colName, + int inNelems, + int outNelems, + at::ScalarType dType, + std::vector inSplitSizes, + std::vector outSplitSizes, + int worldSize); + + ~ParamCommsDebugInfo() override = default; + + int getRank() const { + return rank_; + } + + int getWorldSize() const { + return worldSize_; + } + + const std::string getColumnName() const { + return columnName_; + } + + int getInMessageNelems() const { + return inMessageNelems_; + } + + int getOutMessageNelems() const { + return outMessageNelems_; + } + + at::ScalarType getDType() const { + return dType_; + } + + const std::vector& getInputSplitSizes() const { + return inputSplitSizes_; + } + + const std::vector& getOutputSplitSizes() const { + return outputSplitSizes_; + } + + private: + int rank_{}; + int worldSize_{}; + std::string columnName_; + int inMessageNelems_{}; + int outMessageNelems_{}; + at::ScalarType dType_ = at::kByte; + std::vector inputSplitSizes_; + std::vector outputSplitSizes_; +}; + +#define RECORD_PARAM_COMMS( \ + seq, \ + pg_ptr, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(seq), \ + c10::IValue(pg_ptr), \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION(at::kParamCommsCallName, paramInputs); + +#define RECORD_PARAM_COMMS_DATA( \ + seq, \ + pg_ptr, \ + InputTensors, \ + OutputTensors, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(InputTensors), \ + c10::IValue(seq), \ + c10::IValue(pg_ptr), \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \ + at::kParamCommsCallName, \ + paramInputs, \ + std::vector(1, c10::IValue(OutputTensors))); +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..669872f22a549593cacb507d7bab2d4df30f98e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp @@ -0,0 +1,721 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +// ************************************************************************* +// PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN +// versions 1.7 and 1.8. +// PLEASE DO NOT ADD ANY DEPENDENCIES. +// SEE RFC: https://github.com/pytorch/pytorch/issues/39662 +// ************************************************************************* + +constexpr auto kProcessGroupDefaultTimeout = + std::chrono::milliseconds(30 * 60 * 1000); + +namespace c10d { + +// ProcessGroup is a base class that captures collective and point to +// point communication in a fixed set of processes. +// +// The functions specified in the class below describe the API alone; +// implementations are provided in subclasses. +// +// Every function that performs I/O is executed asynchronously by a +// thread pool owned by the ProcessGroup (by default). They return an +// object that can be used to wait for completion or error. +// +// The ProcessGroup can instantiate subgroups with fewer or an equal +// number of members. Implementations must take care that multiple +// process groups can be used in parallel and synchronize accordingly. +// +// The ProcessGroup assumes a fixed set of processes. If the set +// changes, existing instances must be destructed and instantiation +// and initialization must start from scratch. For members of the +// process group to find each other (referred to as rendezvous from +// hereon) +// +class TORCH_API ProcessGroup : public torch::CustomClassHolder { + public: + // ProcessGroup Options is a base struct that defines the basic options + // when constructing a ProcessGroup. Each ProcessGroup subclass should + // extend this struct and define its options if it wants to provide more + // config options (beyond basic ones defined here) to end user. + struct TORCH_API Options : torch::CustomClassHolder { + explicit Options( + std::string backend, + std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout) + : timeout(timeout), backend(std::move(backend)) {} + ~Options() override = default; + + std::chrono::milliseconds timeout; + + // backend name + const std::string backend; + }; + + enum BackendType { + UNDEFINED = 0, + GLOO = 1, + NCCL = 2, + UCC = 3, + MPI = 4, + CUSTOM = 5, + }; + + // Not used, set for backwards compatibility and only used for TypeDef in + // Ops.cpp + explicit ProcessGroup(int rank, int size); + + explicit ProcessGroup( + const c10::intrusive_ptr<::c10d::Store>& store, + int rank, + int size, + c10::intrusive_ptr options); + ~ProcessGroup() override; + + int getRank() const { + return rank_; + } + + int getSize() const { + return size_; + } + + // Returns an unique opaque ID of this process group object. + int64_t getID() const { + return reinterpret_cast(this); + } + + // Returns an unique opaque ID of a backend for the specific backend type + // that can correlate with this process group's collectives. + int64_t getBackendID(BackendType backend_type) const { + return reinterpret_cast(getBackend(backend_type).get()); + } + + virtual const std::string getBackendName() const { + return options_->backend; + }; + + BackendType getBackendType() const { + return backendType_; + }; + + virtual void startCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented startCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + backend->startCoalescing(); + } + + virtual c10::intrusive_ptr endCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented endCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + auto work = backend->endCoalescing(); + return work; + } + + virtual c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::broadcast_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t, + bool, + int64_t)>(); + // It's awakward to unbox the opts here and box them again in the custom C++ + // op. But it's also complicated to make opts as a CustomClassHolder. Leave + // it as it is now. + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.rootTensor, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + const c10::optional& sparse_indices, + int64_t)>(); + + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.sparseIndices, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_coalesced_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.rootRank, + opts.rootTensor, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_", "") + .typed>, + c10::intrusive_ptr>( + const std::vector>&, + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + // Gathers a single tensor inputBuffer into a single buffer outputBuffer that + // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE. + // For implementers of ProcessGroup API and advanced users only. + // Note: this function will be deprecated in near future. + virtual c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_allgather_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + bool, + int64_t)>(); + + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is deprecated and will be moved out of ProcessGroup to comms: + // * do not add dependencies on this function, + // * do not implement it in your ProcessGroup, implement _allgather_base + // instead. + virtual c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_coalesced_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensorLists, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + // This function is a coalesced version of `allgather_into_tensor` (currently + // still named as `_allgather_base`). Each tensor in the vector corresponds to + // an input/output of one `allgather_into_tensor` operation. + virtual c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + virtual c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::gather_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count())); + } + + virtual c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_reduce_scatter_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is a coalesced version of `reduce_scatter_tensor` (currently + // still named as `_reduce_scatter_base`). Each tensor in the vector + // corresponds to an input/output of one `reduce_scatter_tensor` operation. + virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_base_", "") + .typed( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + std::vector, + std::vector, + int64_t)>(); + return op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + outputSplitSizes, + inputSplitSizes, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + virtual void monitoredBarrier( + const BarrierOptions& opts, + bool wait_all_ranks = false) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::monitored_barrier_", "") + .typed&, + const std::vector&, + int64_t, + bool)>(); + // Default to using cpu implementation, monitored barrier is only for GLOO + at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU)); + op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count(), + wait_all_ranks); + } + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + virtual void setSequenceNumberForGroup() { + auto backendType = getBackendType(); + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + getDefaultBackend()->setSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + virtual uint64_t getSequenceNumberForGroup() { + auto backendType = getBackendType(); + + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + return getDefaultBackend()->getSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + virtual c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::send", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + dstRank, + tag); + } + + virtual c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + srcRank, + tag); + } + + virtual c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_any_source_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + tag); + } + + virtual c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) { + static at::Tensor tensor; + // TODO: if nccl was specified then use it + auto device = opts.device; + if (device.has_value()) { + // set device tensor from argument + tensor = at::empty( + {1}, at::TensorOptions().device(device.value()).dtype(at::kByte)); + } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) { + // set cuda tensor + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte)); + } else { + // Default to using cpu implementation + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte)); + } + + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::barrier", "") + .typed( + at::Tensor, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const std::vector&, + int64_t)>(); + + return op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count()); + } + + c10::intrusive_ptr getOptions() { + return options_; + } + + bool hasBackends() { + return !deviceTypeToBackendType_.empty(); + } + + void setBackend( + c10::DeviceType deviceType, + BackendType backendType, + const c10::optional>& backend) { + // TODO: should we add these entries after the backend setting succeeds? + deviceTypeToBackendType_[deviceType] = backendType; + deviceTypes_.insert(deviceType); + // if the backendType is already set then reuse it for this device + if (backendTypeToBackend_.find(backendType) != + backendTypeToBackend_.end()) { + auto existingBackend = backendTypeToBackend_.at(backendType); + deviceTypeToBackend_[deviceType] = existingBackend; + } else { + // check if backend has value + if (backend.has_value()) { + deviceTypeToBackend_[deviceType] = backend.value(); + backendTypeToBackend_[backendType] = backend.value(); + } + } + } + + c10::intrusive_ptr getDefaultBackend() const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(), + "Could not find the default backend type ", + backendType_, + " for Process Group with name ", + getBackendName(), + "."); + return backendTypeToBackend_.at(backendType_); + } + + c10::intrusive_ptr getBackend(c10::DeviceType deviceType); + + c10::intrusive_ptr getBackend(BackendType backendType) const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(), + "Could not find backend type ", + backendType, + "."); + return backendTypeToBackend_.at(backendType); + } + + // Return device types supported by this ProcessGroup. + // Note: the return type is `Device` rather than `DeviceType` for the purpose + // of easy comparison at Python level. The `Device` will have default index + // (-1). + std::vector getDeviceTypes() const { + std::vector devices; + devices.reserve(deviceTypes_.size()); + for (auto& dt : deviceTypes_) { + devices.push_back(c10::Device(dt)); + } + return devices; + } + + void registerOnCompletionHook( + std::function)>&& hook) { + getDefaultBackend()->registerOnCompletionHook(std::move(hook)); + } + + void waitForPendingWorks() { + getDefaultBackend()->waitForPendingWorks(); + } + + bool hasHooks() const { + return getDefaultBackend()->hasHooks(); + } + + const std::string& getGroupName() const; + void setGroupName(const std::string& name); + void enableCollectivesTiming(); + + void release_resources() override; + + protected: + // Implementations of this interface need to call this to setup + // appropriate logging etc. + void init(); + + c10::intrusive_ptr store_; + const int rank_; + const int size_; + const c10::intrusive_ptr options_; + const BackendType backendType_; + + // Debug level setting. It is parsed once when ProcessGroup is constructed and + // remains the same across use of this process group. + DebugLevel dist_debug_level_; + + // Backend classes for this ProcessGroup + std::unordered_set deviceTypes_; + std::unordered_map deviceTypeToBackendType_; + std::unordered_map> + deviceTypeToBackend_; + std::unordered_map> + backendTypeToBackend_; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp new file mode 100644 index 0000000000000000000000000000000000000000..11158c80e2fcb84e46058dc964d2ed1965980bb6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -0,0 +1,438 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace c10d { + +constexpr const char* GLOO_BACKEND_NAME = "gloo"; + +// ProcessGroupGloo implements Gloo bindings for c10d. +// +// All functions on this class are expected to be called in the same +// order across processes in the group. This is the only way that we +// can guarantee to match up the same calls across processes. For +// multi-threaded usage of process groups, you can use consider using +// multiple process group instances. +// +// The Gloo algorithms that this class calls into are cached by their +// signature (see description of AlgorithmKey above). This cache works +// as follows: every function call instantiates an AlgorithmKey and +// looks in the cache for existing entries. If there is one, it is +// removed from the cache and returned to the caller. If there are +// none, a new entry is created and returned. If an entry was created +// before, but is still in use, the call will block and wait until the +// entry is returned to the cache. +// +// In the future, we hope to extend this to allow multiple entries per +// key, to enable parallelism for a single key. The number of entries +// per key must always be identical for all processes. This maximum +// number can be automatically tuned, but only if we let a single +// process take charge, and have it broadcast the limits. +// +class TORCH_API ProcessGroupGloo : public Backend { + public: + // AsyncWork is the Gloo specific superclass for asynchronous work items. + // We can split asynchronous work into 3 phases: + // 1) Sanity checks and prepare input (e.g. memcpy) + // 2) Run operation on background thread + // 3) Synchronize with completion on foreground thread + // + // There is state to be shared between these 3 phases and all of this state + // is captured in the AsyncWork class and its derivatives. + // + // Note: while we are porting operations to use new style collectives, there + // is a split between operations using the existing caching approach and + // operations using the new AsyncWork base class. Over time we will port + // all operations and perform needed cleanup. + // + // FIXME: This probably should be called WorkGloo since the work is executed + // in sync mode by a background thread. + class TORCH_API AsyncWork : public Work { + public: + explicit AsyncWork( + std::vector> outputTensors, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + ~AsyncWork() override = default; + + static void execute(c10::intrusive_ptr work); + + virtual void run() = 0; + + std::vector result() override; + + c10::intrusive_ptr getFuture() override; + uint64_t getSequencenumber() const override; + + protected: + friend class ProcessGroupGloo; + + private: + void finishWorkGloo(); + void finishWorkGlooError(std::exception_ptr eptr); + inline void recordAsyncWorkProfilingInfo( + const char* profilingTitle, + const c10::optional>& inputTensors); + + const std::vector> outputTensors_; + c10::intrusive_ptr future_; + std::function recordFunctionBeforeCallback_; + const uint64_t seq_; + }; + + // Wrap c10d store as Gloo store + class TORCH_API GlooStore : public ::gloo::rendezvous::Store { + public: + GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {} + + void setUint(const std::string& key, const std::vector& value) { + store_->set(key, value); + } + + void set(const std::string& key, const std::vector& value) override { + std::vector tmp(value.begin(), value.end()); + store_->set(key, tmp); + } + + std::vector getUint(const std::string& key) { + auto value = store_->get(key); + return value; + } + + std::vector get(const std::string& key) override { + auto value = store_->get(key); + return std::vector(value.begin(), value.end()); + } + + void wait(const std::vector& keys) override { + store_->wait(keys, ::c10d::Store::kDefaultTimeout); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override { + store_->wait(keys, timeout); + } + +#ifdef GLOO_STORE_HAS_STORE_V2 + bool has_v2_support() override { + return store_->hasExtendedApi(); + } + + std::vector> multi_get( + const std::vector& keys) override { + std::vector> res; + for (auto& value : store_->multiGet(keys)) { + res.emplace_back(std::vector(value.begin(), value.end())); + } + return res; + } + + void multi_set( + const std::vector& keys, + const std::vector>& values) override { + std::vector> u_values; + for (auto& value : values) { + u_values.emplace_back(std::vector(value.begin(), value.end())); + } + store_->multiSet(keys, u_values); + } + + void append(const std::string& key, const std::vector& value) + override { + std::vector tmp(value.begin(), value.end()); + return store_->append(key, tmp); + } + + int64_t add(const std::string& key, int64_t value) override { + return store_->add(key, value); + } +#endif + + protected: + c10::intrusive_ptr<::c10d::Store> store_; + }; + + // For send and recv operations there is no need to pass them to the + // thread pool as they are entirely completed by the device thread. + // This work object is used to synchronize completion of the send or + // recv operation. It keeps a reference to the tensor it is + // operating on to prevent it from being deallocated while the + // operation is still in flight. + class TORCH_API SendWork : public Work { + public: + explicit SendWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + uint64_t seq); + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + const uint64_t seq_; + }; + + class TORCH_API RecvWork : public Work { + public: + explicit RecvWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr); + + int sourceRank() const override; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + int srcRank_; + const uint64_t seq_; + }; + + struct TORCH_API Options : public Backend::Options { + explicit Options( + std::chrono::milliseconds timeout = kBackendDefaultTimeout); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + std::chrono::milliseconds timeout = kBackendDefaultTimeout) { + return c10::make_intrusive(timeout); + } + + std::vector> devices; + int threads; + }; + + const std::string getBackendName() const override { + return std::string(GLOO_BACKEND_NAME); + } + + // Helper functions to create a new device object. + // They are static functions on this class to keep them logically + // separate from the rest of the code base (e.g. torch/csrc/distributed). + + // Create new device instance for specific interface. + static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface( + const std::string& interface); + + // Create new device instance for specific hostname or address. + static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname( + const std::string& hostname); + + // Create new device instance. + // It tries to resolve this machine's hostname and bind to that address. + // If that fails (i.e. the hostname doesn't resolve to an address), it + // falls back to binding to the loopback address. + static std::shared_ptr<::gloo::transport::Device> createDefaultDevice(); + + // Create ProcessGroupGloo instance. + static c10::intrusive_ptr createProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + std::chrono::milliseconds timeout); + + explicit ProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + ~ProcessGroupGloo() override; + + c10::intrusive_ptr getOptions() { + return options_; + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& output_tensor, + at::Tensor& input_tensor, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& output_lists, + std::vector& input_list, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputs, + std::vector& inputs, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputs, + std::vector>& inputs, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputs, + std::vector>& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputCounts, + std::vector& inputCounts, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + void enableCollectivesTiming() override; + + const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const { + return store_; + } + + // Similar to barrier(), but blocks rank 0 until all other ranks have + // acknowledged that they are alive (through send/recv from rank 0). Rank 0 + // is able to report all failed ranks if waitAllRanks = true, otherwise + // reports the first rank it detected as failed. + void monitoredBarrier( + const BarrierOptions& opts = BarrierOptions(), + bool waitAllRanks = false) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + int getNumThreads() { + return options_->threads; + } + + protected: + std::unique_ptr<::gloo::rendezvous::Store> store_; + const c10::intrusive_ptr options_; + + // Every Gloo context represents a set of connections to its peers. + // In order to use more than one device (or allow for parallelism on + // a single device), you need multiple contexts. + std::vector> contexts_; + std::vector threads_; + bool stop_; + + // Incremented for every collective we kick off. + // The value is used as tag for collective operations. Collectives are kicked + // off in identical order across processes. Therefore the tag can be used + // to match up operations during concurrent execution. + uint32_t collectiveCounter_; + + // Returns next collective tag to use (uses collectiveCounter_). + uint32_t nextTag(); + + // Returns the context to use for the specified tag. + // With `nextTag` returning an increasing number, this should lead + // to contexts being used in a round-robin fashion. + std::shared_ptr<::gloo::Context> getContext(uint32_t tag); + + // Entrypoint for worker threads. + void runLoop(int workerIndex); + + // Queue work to run on worker thread. + void enqueue(c10::intrusive_ptr work); + + // Keep both a queue of pending work, and a vector with in progress work. + // Both of these can only be mutated when holding the queue lock. + // We keep both around instead of just the queue, so we can grab a weak_ptr + // to all in progress and pending work when executing a barrier. + // When executing a barrier, we need to ensure that all prior work + // has completed before completing itself. + std::deque> workQueue_; + std::vector> workInProgress_; + std::mutex workMutex_; + std::condition_variable workProduceCV_; + std::condition_variable workConsumeCV_; + uint64_t seq_{0}; +}; + +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp new file mode 100644 index 0000000000000000000000000000000000000000..e92f195c36a74cc56538448b4db1cf8cafbb2675 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp @@ -0,0 +1,271 @@ +#pragma once + +#ifdef USE_C10D_MPI + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include + +namespace c10d { + +constexpr const char* MPI_BACKEND_NAME = "mpi"; + +// WorkEntry is the state associated with a single MPI run instance. +// It include the source Tensor list and destination Tensor list, as well as +// The actual run function that will operate either on src or dst or both. +struct WorkEntry { + explicit WorkEntry( + std::vector* srcPtr, + std::vector* dstPtr, + std::function&)> run) + : dst(dstPtr ? *dstPtr : std::vector()), run(std::move(run)) { + if (srcPtr) { + src = *srcPtr; + } + } + + // Not copyable + WorkEntry(const WorkEntry&) = delete; + // Not copy assignable + WorkEntry& operator=(const WorkEntry&) = delete; + + // For input and output tensors (in-place), we will always use src + std::vector src; + + // Copy of user provided outputs. + const std::vector dst; + + // src rank returned, for recv only + int* srcRank = nullptr; + std::function&)> run; +}; + +// ProcessGroupMPI implements MPI bindings for c10d. +// +// All functions on this class are expected to be called in the same +// order across processes in the group. This is the only way that we +// can guarantee to match up the same calls across processes. +// +// All MPI functions provided by this class is asynchronously scheduled on a +// Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation +// that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED. +// That is, The process may be multi-threaded, and multiple threads may make +// MPI calls, but only one at a time: MPI calls are not made concurrently from +// two distinct threads (all MPI calls are serialized). However, with +// MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process +// group. In other words, no more than 1 process group can be created globally. +// +// If you would like to use multiple ProcessGroupMPI, it requires your MPI +// implementation to have a thread support value of MPI_THREAD_MULTIPLE, that +// is, multiple threads may call MPI, with no restriction. +// +// Also note that ProcessGroupMPI only supports a single Tensor operation. In +// other words, the size of the input Tensor vector should always be 1. +// +// CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and +// ProcessGroupMPI will automatically detect this support. +class TORCH_API ProcessGroupMPI : public Backend { + public: + class WorkMPI : public Work { + public: + explicit WorkMPI( + std::vector outputTensors, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt) + : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), + outputTensors_(std::move(outputTensors)), + future_(c10::make_intrusive( + c10::ListType::create(c10::TensorType::get()))) {} + + std::vector result() override; + + c10::intrusive_ptr getFuture() override; + + protected: + friend class ProcessGroupMPI; + + private: + void finishWorkMPI(); + void finishWorkMPIError(std::exception_ptr eptr); + + std::vector outputTensors_; + c10::intrusive_ptr future_; + }; + + class AsyncWork : public Work { + public: + AsyncWork( + MPI_Request request, + std::vector outputTensors, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + ~AsyncWork() override; + + bool isCompleted() override; + + bool isSuccess() const override; + + int sourceRank() const override; + + bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override; + + void abort() override; + + std::vector result() override; + + protected: + void populateException(); + + private: + const std::vector outputTensors_; + MPI_Request request_; + MPI_Status status_; + }; + + // Constructor will spawn up the worker thread loop + explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm); + + ~ProcessGroupMPI() override; + + // Abort the MPI program, needs to be called when exception is detected + void abort(); + + const std::string getBackendName() const override { + return std::string(MPI_BACKEND_NAME); + } + + c10::intrusive_ptr broadcast( + std::vector& data, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputbuffer, + at::Tensor& inputbuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensor, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + // Creating a new ProcessGroupMPI, will initialize MPI if not initialized + static c10::intrusive_ptr createProcessGroupMPI( + std::vector ranks = {}); + + protected: + using WorkType = + std::tuple, c10::intrusive_ptr>; + // Worker thread loop + void runLoop(); + // Helper function that is called by the destructor + void destroy(); + + c10::intrusive_ptr enqueue( + std::unique_ptr entry, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + bool stop_; + + std::mutex pgMutex_; + std::thread workerThread_; + + std::deque queue_; + std::condition_variable queueProduceCV_; + std::condition_variable queueConsumeCV_; + + // Global states + static void initMPIOnce(); + static void mpiExit(); + static c10::once_flag onceFlagInitMPI; + + static std::mutex pgGlobalMutex_; + static int mpiThreadSupport_; + + MPI_Comm pgComm_; +}; + +} // namespace c10d + +#endif // USE_C10D_MPI diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..05a5f9399d31ad7b0cdef245a0f29db5a70b1d12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp @@ -0,0 +1,181 @@ +#pragma once + +#include +#include +#include + +namespace c10d { + +// PyProcessGroup is a pybind11 trampoline class to allow a Python +// class to inherit from torch.distributed.ProcessGroup +class PyProcessGroup : public ProcessGroup { + public: + // PyWork is a pybind11 trampoline class to allow a Python + // class to inherit from torch.distributed.Work + class PyWork : public Work { + public: + PyWork() = default; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override { + PYBIND11_OVERRIDE( + bool, /* Return type */ + Work, /* Parent class */ + wait, /* Name of function in C++ */ + timeout); + } + + c10::intrusive_ptr getFuture() override { + // We cannot use PYBIND11_OVERRIDE because: + // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and + // 2. The python name is get_future + pybind11::gil_scoped_acquire gil; + auto override = + pybind11::get_override(static_cast(this), "get_future"); + + if (override) { + py::object o = override(); + auto futWrapper = + o.cast>(); + return futWrapper->fut; + } + + return Work::getFuture(); + } + }; + + using ProcessGroup::ProcessGroup; + + const std::string getBackendName() const override { + PYBIND11_OVERRIDE_PURE( + std::string, /* Return type */ + ProcessGroup, /* Parent class */ + getBackendName, /* Name of function in C++ */ + ); + } + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allgather, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allreduce, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + barrier, /* Name of function in C++ */ + opts); + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + broadcast, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + reduce_scatter, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + send, /* Name of function in C++ */ + tensors, + dstRank, + tag); + } + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + recv, /* Name of function in C++ */ + tensors, + srcRank, + tag); + } +}; + +class TORCH_PYTHON_API PythonOnCompletionHook { + public: + // Wraps a py::object hook and acquires Python GIL in dtor before + // destructing the hook object. + PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {} + + ~PythonOnCompletionHook() { + py::gil_scoped_acquire ag; + hook_.dec_ref(); + // Explicitly set hook_ to nullptr to prevent py::object's dtor + // to decref on the PyObject again. + // See Note [Destructing py::object] in python_ivalue.h + hook_.ptr() = nullptr; + } + + void operator()(std::shared_ptr workInfo) const { + std::exception_ptr eptr; + { + py::gil_scoped_acquire acquire; + try { + hook_(workInfo); + } catch (py::error_already_set& e) { + // py::error_already_set requires GIL to destruct, take + // special care. + eptr = std::make_exception_ptr(std::runtime_error(e.what())); + e.restore(); + PyErr_Clear(); + } catch (std::exception& e) { + eptr = std::current_exception(); + } + } + // No more Python-related stuff at this point, i.e., this + // exception can be captured and handled by PG backend. + if (eptr) + std::rethrow_exception(eptr); + } + + private: + py::object hook_; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3c0ae960ff7ca74aefd5a17037565e95d1bf76a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace c10d { + +// callback function will be given arguments (optional oldValue, +// optional newValue) +using WatchKeyCallback = + std::function, c10::optional)>; + +class TORCH_API Store : public torch::CustomClassHolder { + public: + static constexpr std::chrono::milliseconds kDefaultTimeout = + std::chrono::seconds(300); + static constexpr std::chrono::milliseconds kNoTimeout = + std::chrono::milliseconds::zero(); + + Store() : timeout_(kDefaultTimeout) {} + + explicit Store(const std::chrono::milliseconds& timeout) + : timeout_(timeout) {} + + Store(const Store&) = default; + Store(Store&&) noexcept = default; + + ~Store() override = default; + + void set(const std::string& key, const std::string& value); + + virtual void set( + const std::string& key, + const std::vector& value) = 0; + + std::string compareSet( + const std::string& key, + const std::string& currentValue, + const std::string& newValue); + + virtual std::vector compareSet( + const std::string& key, + const std::vector& currentValue, + const std::vector& newValue) { + TORCH_INTERNAL_ASSERT(false, "Not implemented."); + } + + std::string get_to_str(const std::string& key); + + virtual std::vector get(const std::string& key) = 0; + + virtual int64_t add(const std::string& key, int64_t value) = 0; + + virtual bool deleteKey(const std::string& key) = 0; + + virtual bool check(const std::vector& keys) = 0; + + virtual int64_t getNumKeys() = 0; + + virtual void wait(const std::vector& keys) = 0; + + virtual void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) = 0; + + virtual const std::chrono::milliseconds& getTimeout() const noexcept; + + virtual void setTimeout(const std::chrono::milliseconds& timeout); + + // watchKey() is deprecated and no longer supported. + virtual void watchKey( + const std::string& /* unused */, + WatchKeyCallback /* unused */) { + TORCH_CHECK(false, "watchKey is deprecated, no implementation support it."); + } + + virtual void append( + const std::string& key, + const std::vector& value); + + virtual std::vector> multiGet( + const std::vector& keys); + + virtual void multiSet( + const std::vector& keys, + const std::vector>& values); + + // Returns true if this store support append, multiGet and multiSet + virtual bool hasExtendedApi() const; + + protected: + std::chrono::milliseconds timeout_; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp new file mode 100644 index 0000000000000000000000000000000000000000..dc9a9856965addb9792796d4928c7592ea38c64a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp @@ -0,0 +1,180 @@ +#pragma once + +#include + +#include +#include + +#include +#include + +#include +#include + +namespace c10d { + +// Base class for supplementary data potentially needed by ReduceOps +struct TORCH_API _SupplementBase : torch::CustomClassHolder { + ~_SupplementBase() override = default; +}; + +// Supplementary data specific to NCCL PREMUL_SUM +// The point of use in ProcessGroupNCCL knows how to unpack it. +struct NCCLPreMulSumSupplement : _SupplementBase { + double double_factor{0.0}; + at::Tensor tensor_factor; + NCCLPreMulSumSupplement(double f) : double_factor{f} {} + NCCLPreMulSumSupplement(at::Tensor t) : tensor_factor{std::move(t)} { + TORCH_CHECK_EQ(tensor_factor.numel(), 1); + } +}; + +// Other ReduceOps that need different supplementary data can also +// derive from _SupplementBase. +struct TORCH_API ReduceOp : torch::CustomClassHolder { + // note(crcrpar): RedOpType could be defined outside of `ReduceOp` + enum RedOpType : uint8_t { + SUM = 0, + AVG = 1, + PRODUCT = 2, + MIN = 3, + MAX = 4, + BAND = 5, // Bitwise AND + BOR = 6, // Bitwise OR + BXOR = 7, // Bitwise XOR + PREMUL_SUM = 8, // Multiply by a user-supplied constant before summing. + UNUSED = 9 + }; + + ReduceOp() = default; + + ReduceOp(RedOpType op) : op_(op) { + TORCH_INTERNAL_ASSERT( + op_ != PREMUL_SUM, + "Use `torch.distributed._make_nccl_premul_sum` to create an instance of ReduceOp with PREMUL_SUM"); + } + + ReduceOp( + RedOpType op, + c10::intrusive_ptr<_SupplementBase> optional_supplement) { + if (optional_supplement.get()) { + op_ = op; + } else { + supplement_ = optional_supplement; + } + } + + // The heap resource supplement_, if it exists, is managed by a + // c10::intrusive_ptr, so constructors and operator= can be simple + ReduceOp(const ReduceOp& other) + : op_(other.op_), supplement_(other.supplement_) {} + + const ReduceOp& operator=(const ReduceOp& other) { + op_ = other.op_; + supplement_ = other.supplement_; + return *this; + } + + operator RedOpType() const { + return op_; + } + + bool operator==(const std::uint8_t other) { + TORCH_INTERNAL_ASSERT(other < 9, "Invalid other op value"); + return other == op_; + } + + bool operator==(const ReduceOp::RedOpType other) { + return *this == static_cast(other); + } + + // todo(crcrpar): Handle `RedOpType::PREMUL_SUM` with its scaling factor. + bool operator==(const ReduceOp& other) { + return *this == other.op_; + } + + RedOpType op_ = SUM; + // supplement_ is "type-erased" storage for optional supplementary + // data the op might need. + // The point of use will know the derived type supplement_ really is, + // and downcast its pointer to extract the data as the needed type(s). + // Right now, only PREMUL_SUM needs supplementary data, but the same + // mechanism could extend to support other nontrivial reduce ops with + // different supplementary payloads. + c10::intrusive_ptr<_SupplementBase> supplement_; +}; + +template +ReduceOp makeNCCLPreMulSum(const T& factor) { + ReduceOp rop; + rop.op_ = ReduceOp::PREMUL_SUM; + rop.supplement_ = c10::make_intrusive(factor); + return rop; +} + +constexpr auto kUnsetTimeout = std::chrono::milliseconds(-1); + +struct BroadcastOptions { + int64_t rootRank = 0; + int64_t rootTensor = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct AllreduceOptions { + ReduceOp reduceOp = ReduceOp::SUM; + std::chrono::milliseconds timeout = kUnsetTimeout; + c10::optional sparseIndices = c10::nullopt; +}; + +struct AllreduceCoalescedOptions : AllreduceOptions {}; + +struct ReduceOptions { + ReduceOp reduceOp = ReduceOp::SUM; + int64_t rootRank = 0; + int64_t rootTensor = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct AllgatherOptions { + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct GatherOptions { + int64_t rootRank = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct ScatterOptions { + int64_t rootRank = 0; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct ReduceScatterOptions { + ReduceOp reduceOp = ReduceOp::SUM; + std::chrono::milliseconds timeout = kUnsetTimeout; + bool asyncOp = true; +}; + +struct AllToAllOptions { + std::chrono::milliseconds timeout = kUnsetTimeout; +}; + +struct BarrierOptions { + std::vector device_ids; + std::chrono::milliseconds timeout = kUnsetTimeout; + c10::optional device; +}; + +struct DistributedBackendOptions { + c10::intrusive_ptr<::c10d::Store> store; + int group_rank; + int group_size; + std::chrono::duration timeout; + std::string group_id; + std::vector global_ranks_in_group; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a44e2de86ef7dc2477d59cbf221f477b00cc8370 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp @@ -0,0 +1,187 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include +#include +#include + +namespace c10d { + +// Macro to generate the error message on a non-successful UCC return value. +#define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \ + do { \ + _err = c10::str( \ + "[", \ + std::string(__FILE__), \ + ":", \ + std::to_string(__LINE__), \ + "] ", \ + logger->getLogPrefix(), \ + _error_msg, \ + ", error code ", \ + _result, \ + ": ", \ + ucc_status_string(_result), \ + ", system error code ", \ + errno); \ + } while (0) + +// Macro to throw on a non-successful UCC return value. +#define TORCH_UCC_CHECK(_cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macro and throw on a non-successful UCC return value and free its request. +#define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + if (_request != nullptr) { \ + ucc_collective_finalize(_request); \ + } \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macros to print logs with unified format +#define TORCH_UCC_LOG_ERROR(_phase, _msg) \ + LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg; +#define TORCH_UCC_LOG_INFO(_phase, _msg) \ + LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg; +#define TORCH_UCC_LOG_DEBUG(_phase, _msg) \ + VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg; + +enum torch_ucc_phase_t { + TORCH_UCC_UNKNOWN = -1, + TORCH_UCC_INIT, + TORCH_UCC_HEALTH_CHECK, + TORCH_UCC_READY, + TORCH_UCC_COLL_POST, + TORCH_UCC_COLL_PROGRESS, + TORCH_UCC_FINALIZE, +}; + +const std::map ucc_phase_map = { + {TORCH_UCC_UNKNOWN, "UNKNOWN"}, + {TORCH_UCC_INIT, "INIT"}, + {TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"}, + {TORCH_UCC_READY, "READY"}, + {TORCH_UCC_COLL_POST, "COLL_POST"}, + {TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"}, + {TORCH_UCC_FINALIZE, "FINALIZE"}, +}; + +class CommTraceLogger; + +class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder { + public: + ProcessGroupUCCLogger(); + ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase); + + std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN); + void setLogPrefix(std::string log_prefix); + inline void setPhase(torch_ucc_phase_t phase) { + local_phase = phase; + } + + void initCommsTracer(); + void flushComms(int rank, int world_size); + std::shared_ptr trace_generator = nullptr; + + protected: + std::string log_prefix; + torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN; + bool initialized_CommTraceLogger = false; +}; + +struct torch_ucc_oob_coll_info_t { + c10::intrusive_ptr store; + uint32_t comm_id; + int rank; + int size; + void* rbuf; + size_t msglen; + std::string getKey(std::string key) { + return std::to_string(comm_id) + key; + } +}; + +class CommBase { + public: + CommBase(const c10::intrusive_ptr& logger_) + : logger(logger_) {} + virtual void progress() = 0; + virtual void free_request(ucc_coll_req_h request) = 0; + virtual ~CommBase() {} + c10::intrusive_ptr logger; +}; +class CommUCC : public CommBase { + public: + ucc_lib_h lib{nullptr}; + ucc_context_h context{nullptr}; + + public: + void progress() override; + CommUCC( + std::shared_ptr oob, + const c10::intrusive_ptr& logger); + void free_request(ucc_coll_req_h request) override; + ~CommUCC(); +}; + +ucc_status_t oob_allgather( + void* sbuf, + void* rbuf, + size_t msglen, + void* coll_info, + void** req); + +ucc_status_t oob_allgather_test(void* req); + +ucc_status_t oob_allgather_free(void* req); + +// trim: remove spaces before and after the string view +// implementation borrowed from https://stackoverflow.com/a/17976541 +inline c10::string_view trim(c10::string_view s) { + auto wsfront = std::find_if_not( + s.begin(), s.end(), [](int c) { return std::isspace(c); }); + auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) { + return std::isspace(c); + }).base(); + return ( + wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront)); +} + +inline std::string tolower(c10::string_view s) { + std::string result; + result.reserve(s.size()); + for (auto c : s) { + result.push_back(std::tolower(c)); + } + return result; +} + +inline std::vector parse_list(std::string list) { + std::vector result; + list = tolower(trim(list)); + while (!list.empty()) { + const auto end_pos = list.find_first_of(','); + const auto token = trim(list.substr(0, end_pos)); + result.push_back(std::string(token)); + list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : ""; + } + return result; +} + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b2b1aa245f841eac7d61f2238bf7a8385846612 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace c10d { +namespace tcputil { + +#define CONNECT_SOCKET_OFFSET 1 + +inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) { + return WSAPoll(fdArray, fds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({(SOCKET)socket, events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {(SOCKET)socket, events}; + return res; +} + +} // namespace tcputil +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h new file mode 100644 index 0000000000000000000000000000000000000000..fff2b45c4c952b99b3ba2f27696cb6d2b9c29326 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h @@ -0,0 +1,56 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include + +#include + +namespace fmt { + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_category& cat, FormatContext& ctx) + const { + if (std::strcmp(cat.name(), "generic") == 0) { + return fmt::format_to(ctx.out(), "errno"); + } else { + return fmt::format_to(ctx.out(), "{} error", cat.name()); + } + } +}; + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_code& err, FormatContext& ctx) const { + return fmt::format_to( + ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message()); + } +}; + +} // namespace fmt + +namespace c10d { +namespace detail { + +inline std::error_code lastError() noexcept { + return std::error_code{errno, std::generic_category()}; +} + +} // namespace detail +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h new file mode 100644 index 0000000000000000000000000000000000000000..a00b6f70653aaa8d4456033800c5dc69942e3b03 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h @@ -0,0 +1,33 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include + +// Utility macro similar to C10_THROW_ERROR, the major difference is that this +// macro handles exception types defined in the c10d namespace, whereas +// C10_THROW_ERROR requires an exception to be defined in the c10 namespace. +#define C10D_THROW_ERROR(err_type, msg) \ + throw ::c10d::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +namespace c10d { + +using c10::DistNetworkError; + +class TORCH_API SocketError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +class TORCH_API TimeoutError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..48ad7cefae9418ffc989a334aa8b2636ec110219 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace c10d { + +class TORCH_PYTHON_API PythonCommHook : public CommHookInterface { + public: + // Takes a state and a callable hook. The inputs are Python objects. + // The state is passed to the hook in runHook method, and it can be used to + // maintain and update any state information during the execution of the hook. + // The hook performs user-specified processing and returns a future indicating + // asychronous communication of gradients. + PythonCommHook(py::object state, py::object hook) + : state_(std::move(state)), hook_(std::move(hook)) {} + + ~PythonCommHook() override; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; + + at::Tensor parseHookResult(const c10::IValue& result) override; + + private: + // Only needed for stateful communication. + py::object state_; + py::object hook_; +}; + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..acd8975c4d2db13cac2e988238a0a8a2a191df68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp @@ -0,0 +1,81 @@ +#pragma once +#include +#include + +namespace c10d { +constexpr int kUnsetTime = -1; + +inline int64_t current_time_in_nanos() { + return c10::getTime(); +} + +class TORCH_API Timer { + private: + // The timestamp of forward call start time in each iteration. + int64_t forward_start_time = kUnsetTime; + // The timestamp of backward computation start and end time in each + // iteration. + int64_t backward_compute_start_time = kUnsetTime; + int64_t backward_compute_end_time = kUnsetTime; + // The timestamp of first communication call start time in each iteration. + int64_t backward_comm_start_time = kUnsetTime; + // The timestamp of last communication call end time in each iteration. + int64_t backward_comm_end_time = kUnsetTime; + + public: + enum class Event { + kForwardStart, + kBackwardComputeStart, + kBackwardComputeEnd, + kBackwardCommStart, + kBackwardCommEnd, + }; + + // Record the current event, i.e., mark it as having occurred now. Default + // CPU implementation. + virtual void record(Event event) { + getTimeRef(event) = current_time_in_nanos(); + } + + // Return the difference between when two events occurred, in nanoseconds. + // Or nullopt if one of them hasn't been recorded. + virtual c10::optional measureDifference(Event start, Event end) = 0; + + virtual ~Timer() = default; + + // Return host-side timestamp, or nullopt if it has not yet been recorded. + c10::optional getTimestamp(Event event) { + auto time = getTimeRef(event); + if (time == kUnsetTime) { + return c10::nullopt; + } else { + return time; + } + } + + // Return host-side time member variable corresponding to the given event. + int64_t& getTimeRef(Event event) { + switch (event) { + case Event::kForwardStart: + return forward_start_time; + case Event::kBackwardComputeStart: + return backward_compute_start_time; + case Event::kBackwardComputeEnd: + return backward_compute_end_time; + case Event::kBackwardCommStart: + return backward_comm_start_time; + case Event::kBackwardCommEnd: + return backward_comm_end_time; + default: + TORCH_INTERNAL_ASSERT(false); + } + } +}; + +TORCH_DECLARE_TYPED_REGISTRY( + TimerRegistry, + c10::DeviceType, + Timer, + std::unique_ptr, + c10::Device); +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..52832722304cf651b6333f849f29fd9d96a0fc42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h @@ -0,0 +1,93 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace c10d { +namespace detail { + +class SocketOptions { + public: + SocketOptions& prefer_ipv6(bool value) noexcept { + prefer_ipv6_ = value; + + return *this; + } + + bool prefer_ipv6() const noexcept { + return prefer_ipv6_; + } + + SocketOptions& connect_timeout(std::chrono::seconds value) noexcept { + connect_timeout_ = value; + + return *this; + } + + std::chrono::seconds connect_timeout() const noexcept { + return connect_timeout_; + } + + private: + bool prefer_ipv6_ = true; + std::chrono::seconds connect_timeout_{30}; +}; + +class SocketImpl; + +class Socket { + public: + // This function initializes the underlying socket library and must be called + // before any other socket function. + static void initialize(); + + static Socket listen(std::uint16_t port, const SocketOptions& opts = {}); + + static Socket listenFromFd(int fd, std::uint16_t expected_port); + + static Socket connect( + const std::string& host, + std::uint16_t port, + const SocketOptions& opts = {}); + + Socket() noexcept = default; + + Socket(const Socket& other) = delete; + + Socket& operator=(const Socket& other) = delete; + + Socket(Socket&& other) noexcept; + + Socket& operator=(Socket&& other) noexcept; + + ~Socket(); + + Socket accept() const; + + int handle() const noexcept; + + std::uint16_t port() const; + + bool waitForInput(std::chrono::milliseconds timeout); + + private: + explicit Socket(std::unique_ptr&& impl) noexcept; + + std::unique_ptr impl_; +}; + +} // namespace detail + +} // namespace c10d diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h new file mode 100644 index 0000000000000000000000000000000000000000..18e9f67641e048fa78865716237c385dc5ba2321 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void AnnotateWarns(const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..87e86d8a7e4b2965a16422cd952418aef7e7db5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h @@ -0,0 +1,12 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckStrictFusion(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee635b667f90a1d56a186650c3bb023ae542660 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h @@ -0,0 +1,144 @@ +/** + * Cache utils in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +// Generic key and object cache with LRU expiration policy. The objects of type +// T will be stored as std::shared_ptr and taken and returned as such, by the +// cache API. +template < + typename K, + typename T, + typename H = std::hash, + typename E = std::equal_to> +class Cache { + public: + using TypePtr = std::shared_ptr; + using Element = std::pair; + + explicit Cache(size_t max_size) : max_size_(max_size) {} + + // Adds an object to the cache, unless it already exists. If the cache grows + // beyond the limit set during construction, the oldest used object will be + // removed from the cache. + TypePtr Add(K key, TypePtr object) { + if (!max_size_) { + return object; + } + std::lock_guard slock(lock_); + element_list_.emplace_front(Element(std::move(key), std::move(object))); + auto it = element_list_.begin(); + auto emplace_result = element_map_.emplace(&it->first, it); + if (!emplace_result.second) { + element_list_.erase(it); + DoLRU(emplace_result.first->second); + } else if (element_list_.size() > max_size_) { + Element* last = &element_list_.back(); + element_map_.erase(&last->first); + element_list_.pop_back(); + } + return emplace_result.first->second->second; + } + + // Retrieves the existing object if it exists. If it does, its position in + // the LRU list gets moved to the head of the list. + // Returns nullptr if no object with the specified key is found within the + // cache. + TypePtr Get(const K& key) { + if (!max_size_) { + return nullptr; + } + std::lock_guard slock(lock_); + auto it = element_map_.find(&key); + if (it == element_map_.end()) { + return nullptr; + } + DoLRU(it->second); + return it->second->second; + } + + TypePtr GetLatest() { + std::lock_guard g(lock_); + TORCH_CHECK(!element_list_.empty()); + return element_list_.front().second; + } + + bool Erase(const K& key) { + if (!max_size_) { + return false; + } + std::lock_guard slock(lock_); + auto it = element_map_.find(&key); + if (it == element_map_.end()) { + return false; + } + auto lit = it->second; + element_map_.erase(it); + element_list_.erase(lit); + return true; + } + + void Clear() { + if (!max_size_) { + return; + } + std::lock_guard slock(lock_); + element_map_.clear(); + element_list_.clear(); + } + + int Numel() const { + if (!max_size_) { + return 0; + } + std::lock_guard g(lock_); + TORCH_CHECK(element_map_.size() == element_list_.size()); + return element_map_.size(); + } + + private: + using ElementList = std::list; + + struct Hasher { + size_t operator()(const K* key) const { + return hasher(*key); + } + + H hasher; + }; + + struct Equaler { + bool operator()(const K* k1, const K* k2) const { + return equaler(*k1, *k2); + } + + E equaler; + }; + + using ElementMap = std:: + unordered_map; + + void DoLRU(typename ElementList::iterator it) { + element_list_.splice(element_list_.begin(), element_list_, it); + } + + mutable std::mutex lock_; + const size_t max_size_ = 0; + ElementList element_list_; + ElementMap element_map_; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h new file mode 100644 index 0000000000000000000000000000000000000000..ef4b81e1ca9c5dd7878603f4efcf8f381825dc4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace lazy { + +TORCH_API std::function()>& +GetPythonFramesFunction(); + +TORCH_API std::string GetFirstUserFrameInPython(); + +class TORCH_API DebugUtil { + public: + enum GraphFormat { + kText, + kDot, + kBackend, + }; + + static GraphFormat GetDefaultGraphFormat(); + + // Dumps the current Python frame and the IR Graph whose roots are the IR + // values held at the tensors. If indices is not nullptr, it selects the + // indices of the tensors whose graph will be emitted. + static std::string GetTensorsGraphInfo( + c10::ArrayRef tensors, + const std::vector* indices, + GraphFormat format = GetDefaultGraphFormat()); + + // If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper + // output path, an instance of the report returned by GetTensorsGraphInfo() is + // saved. + static void SaveTensorsGraphInfo( + const char* name, + c10::ArrayRef tensors, + const std::vector* indices, + GraphFormat format = GetDefaultGraphFormat()); + + static bool ExperimentEnabled(const std::string& name); +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h new file mode 100644 index 0000000000000000000000000000000000000000..8af7f4fae44ecd7cfec27236d930e129f0f68117 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h @@ -0,0 +1,59 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +/** + * The goal of "dynamic" Nodes is to patch a hole in our tracing. + * Previously, if a user called `sizes` on a Tensor, it would leak out + * of our tracing system, as `sizes` returns a torch.Size or an int. To + * prevent this from happening, we introduce DimensionNode, a new type + * of Node that abstracts the operation of getting the dimensions of a + * Tensor. + * + * Consider the following example: + * ``` + * numel = x.shape()[0] * x.shape()[1] + * ``` + * + * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode), + * and the multiplication of the two SizeNodes will be represented by + * a SizeMul (also a subclass of DimensionNode). Through this, we can + * prevent `numel` from being represented as a Python int and thus + * burned into the Graph. + */ + +class TORCH_API DimensionNode { + public: + virtual bool isSymbolic() const { + return false; + }; + virtual int64_t getDynamicValue() const { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t getStaticValue() const { + TORCH_CHECK(false, "NYI"); + }; + virtual ~DimensionNode() = default; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h new file mode 100644 index 0000000000000000000000000000000000000000..bb6a779555f22d64a63a70a1398614df530715db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h @@ -0,0 +1,238 @@ +/** + * Hash utils in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/e0e5f937a0ba8d904f9608137dc8c51ba439df2d/third_party/xla_client/util.h + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +using size_t = std::size_t; + +class TORCH_API hash_t : public c10::uint128 { + public: + // Swich from typedef hash_t = uint128 to provide explicit casters + hash_t(int8_t val) : uint128(static_cast(val)) {} + hash_t(int16_t val) : uint128(static_cast(val)) {} + hash_t(int32_t val) : uint128(static_cast(val)) {} + hash_t(int64_t val) : uint128(static_cast(val)) {} + hash_t(uint32_t val) : uint128(val) {} + hash_t(uint64_t val) : uint128(val) {} + hash_t(uint128 val) : uint128(val) {} + hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {} + hash_t() : uint128() {} +}; + +// Std* functions use 64-bit hash +size_t TORCH_API StdDataHash(const void* data, size_t size); + +size_t TORCH_API StdHashCombine(uintmax_t a, uintmax_t b); + +// Other functions are all 128-bit +hash_t TORCH_API HashBlock(const void* data, size_t n, const hash_t& seed); + +hash_t TORCH_API DataHash(const void* data, size_t size); + +hash_t TORCH_API HashCombine(const hash_t& a, const hash_t& b); + +size_t TORCH_API HashReduce(const hash_t& a); + +// Returns a string representation of a hash +std::string TORCH_API HashToString(const hash_t& a); + +struct HashReducer { + size_t operator()(const hash_t& value) const { + return HashReduce(value); + } +}; + +static inline hash_t StringHash(const char* data) { + return DataHash(data, std::strlen(data)); +} + +// Automatic templated implementation for 'arithmetic' types +template < + typename T, + typename std::enable_if::value>::type* = nullptr> +hash_t Hash(const T& value) { + return DataHash(&value, sizeof(value)); +} + +// added because on macos builds the vector specialization +// breaks falling through to the templated arithmetic types above +hash_t TORCH_API Hash(const std::vector& value); + +// Specialiazed implementations for proprietary types +static inline hash_t Hash(const c10::ScalarType& value) { + return DataHash(&value, sizeof(value)); +} + +static inline hash_t Hash(const c10::MemoryFormat& value) { + return DataHash(&value, sizeof(value)); +} + +static inline hash_t Hash(const c10::DeviceType& value) { + return DataHash(&value, sizeof(value)); +} + +static inline hash_t Hash(const c10::Device& value) { + return HashCombine(Hash(value.type()), Hash(value.index())); +} + +static inline hash_t Hash(const c10::Layout& value) { + return DataHash(&value, sizeof(value)); +} + +static inline hash_t Hash(const c10::Scalar& value) { + switch (value.type()) { + case c10::ScalarType::ComplexDouble: + return Hash(value.toComplexDouble()); + case c10::ScalarType::Double: + return Hash(value.toDouble()); + case c10::ScalarType::Long: + return Hash(value.toLong()); + case c10::ScalarType::Bool: + return Hash(value.toBool()); + default: + TORCH_INTERNAL_ASSERT(false, "Unknown scalar type.", value.type()); + } +} + +static inline hash_t TensorHash(const at::Tensor& tensor) { + at::Tensor ctensor = tensor.contiguous(); + int64_t size = ctensor.numel() * ctensor.element_size(); + switch (ctensor.scalar_type()) { + case at::ScalarType::Bool: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Byte: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Char: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Short: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Int: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Long: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Float: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Double: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::BFloat16: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::Half: + return DataHash(ctensor.const_data_ptr(), size); + case at::ScalarType::ComplexFloat: + return DataHash(ctensor.const_data_ptr>(), size); + case at::ScalarType::ComplexDouble: + return DataHash(ctensor.const_data_ptr>(), size); + default: + TORCH_INTERNAL_ASSERT( + false, "Unsupported scalar type:", ctensor.scalar_type()); + } +} + +static inline hash_t Hash(const std::string& value) { + return DataHash(value.data(), value.size()); +} + +static inline hash_t Hash(const c10::string_view& value) { + return DataHash(value.data(), value.size()); +} + +static inline hash_t Hash(const at::Generator& value) { + return TensorHash(value.get_state()); +} + +// Taken from glibc's implementation of hashing optionals, +// we want to include a contribution to the hash to distinguish +// cases where one or another option was null, but we hope it doesn't +// collide with an actually scalar value. +// +// Use an arbitrary randomly-selected 64-bit integer rather than a +// small constant that we then hash at runtime so we don't have to +// repeatedly hash a constant at runtime. +static const int64_t kNullOpt = 0x8655d738f3678dda; + +// Hashing for c10::optional types contributes to hash +// for optionals with null value, important to distinguish +// between and cases +template +hash_t Hash(const c10::optional& value) { + if (value.has_value()) { + return Hash(value.value()); + } else { + return kNullOpt; + } +} + +// Hashing of containers +// Forward declare to allow hashes of vectors of vectors to work. +template +hash_t ContainerHash(const T& values); + +template +hash_t Hash(const std::vector& values) { + return ContainerHash(values); +} + +// Need a special case for optional? +template +hash_t Hash(const c10::optional>& value) { + if (value.has_value()) { + return ContainerHash(value.value()); + } else { + return kNullOpt; + } +} + +template +hash_t Hash(const std::set& values) { + return ContainerHash(values); +} + +template +hash_t Hash(const std::pair& values) { + return HashCombine(Hash(values.first), Hash(values.second)); +} + +static inline hash_t Hash(const hash_t& value) { + return value; +} + +template +hash_t Hash(c10::ArrayRef values) { + return ContainerHash(values); +} + +template +hash_t ContainerHash(const T& values) { + hash_t h(static_cast(0x85ebca77c2b2ae63)); + for (const auto& value : values) { + h = HashCombine(h, Hash(value)); + } + return h; +} + +// Varargs hashing +template +hash_t MHash() { + return hash_t(static_cast(0x165667b19e3779f9)); +} + +template +hash_t MHash(T value, Targs... Fargs) { + return HashCombine(Hash(value), MHash(Fargs...)); +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ce62f2e51f539d04cf78e05a8a18677c1aca6f7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h @@ -0,0 +1,52 @@ +#pragma once + +#include + +#include + +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API OpKindWrapper { + public: + explicit OpKindWrapper(const char* name) : name_(name) {} + + const OpKind& operator*() const { + return get(); + } + + operator OpKind() const { + return get(); + } + + private: + const OpKind& get() const { + c10::call_once(once_, [this]() { op_kind_ = OpKind::Get(name_); }); + return op_kind_; + } + + const char* name_; + mutable OpKind op_kind_; + mutable c10::once_flag once_; +}; + +const OpKindWrapper ltc_all_to_all("lazy_tensors::all_to_all"); +const OpKindWrapper ltc_cast("lazy_tensors::cast"); +const OpKindWrapper ltc_collective_permute("lazy_tensors::collective_permute"); +const OpKindWrapper ltc_cross_replica_sum("lazy_tensors::cross_replica_sum"); +const OpKindWrapper ltc_device_data("lazy_tensors::device_data"); +const OpKindWrapper ltc_get_dimensions_size( + "lazy_tensors::ltc_get_dimensions_size"); +const OpKindWrapper ltc_moving_average("lazy_tensors::moving_average"); +const OpKindWrapper ltc_nms("lazy_tensors::nms"); +const OpKindWrapper ltc_not_supported("lazy_tensors::not_supported"); +const OpKindWrapper ltc_replication_pad("lazy_tensors::replication_pad"); +const OpKindWrapper ltc_replication_pad_backward( + "lazy_tensors::replication_pad_backward"); +const OpKindWrapper ltc_tensor_data("lazy_tensors::tensor_data"); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..0f40456e1bf56b3ec24c9a33f6f1f6f41c1b8445 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h @@ -0,0 +1,298 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +C10_DECLARE_bool(ltc_enable_dynamic_shapes); + +namespace torch { +namespace lazy { + +static const hash_t kHashSeed(static_cast(0x5a2d296e9)); + +class Node; +struct Output; +struct Value; + +using NodePtr = std::shared_ptr; + +// The Kind of operation a Node can be associated to. +struct TORCH_API OpKind { + OpKind() = default; + explicit OpKind(c10::Symbol op) : op(op) {} + + bool operator==(const OpKind& rhs) const { + return op == rhs.op; + } + bool operator!=(const OpKind& rhs) const { + return !operator==(rhs); + } + bool operator<(const OpKind& rhs) const { + return c10::unique_t(op) < c10::unique_t(rhs.op); + } + + hash_t hash() const; + + std::string ToString() const { + return op.toQualString(); + } + + // Retrieves an existing operation object, or creates a new one. Operations + // that are specific to lazy tensors, should live within the 'lazy_tensors::' + // namespace. + static OpKind Get(const std::string& name); + + c10::Symbol op; +}; + +inline std::ostream& operator<<(std::ostream& stream, const OpKind& op) { + stream << op.ToString(); + return stream; +} + +using OpList = c10::ArrayRef; + +hash_t OperandHashes( + const OpList& operands, + const hash_t& seed, + bool bakeInSizes); +// A node in the graph. Nodes for operations which require extra data to be +// stored for lowering should inherit from this class and add an operation +// specific member there. For example, a constant might create a new +// NodeConstant class (inheriting from Node) with an extra lazy_tensors::Literal +// field, or a tensor value might create a new NodeTensor with a computation +// client data handle in it. +class TORCH_API Node { + public: + static bool enableDynamicShape(); + + // Creates a new node with the given op name. The op is a unique identifier + // for the operation. The num_outputs tells how many outputs a given operation + // generates. + // + // None leaf node's node_hash does not contains shape information always. + // So we pass in the hash value rather than a function. + Node(OpKind op, size_t num_outputs); + + // Construct node with operands and shapes + Node( + OpKind op, + OpList operands, + std::vector&& shapes, + size_t num_outputs = 1); + + // Construct node with operands and shape generated from a function + Node( + OpKind op, + OpList operands, + const std::function& shape_fn, + size_t num_outputs = 1); + + // Construct node with operands and no shape + Node(OpKind op, OpList operands, size_t num_outputs = 1); + + // Construct node with shape and no operands + Node(OpKind op, Shape shape, size_t num_outputs = 1); + + virtual ~Node(); + + const OpKind& op() const { + return op_; + } + + size_t num_outputs() const { + return num_outputs_; + } + + // Retrieves the full shape of the IR Node. + virtual c10::ArrayRef shapes() const; + + virtual const Shape& shape(size_t output_index = 0) const; + + // Add the shape computed by the shape_fn + void addComputedShape(const std::function& shape_fn); + + // Compute the shape using the provided shape_fn if not previously cached + Shape computeShape(const std::function& shape_fn); + + virtual const std::vector& operands() const; + + virtual const Output& operand(size_t i) const; + + // Gets operand at index i if index is valid, or kNullOutput otherwise. + virtual const Output& nullable_operand(size_t i) const; + + // Returns the hash of the dag used to look up the compiled graph + virtual hash_t hash() const = 0; + + // Returns the hash of the dag used to for shape caching + virtual hash_t shapeHash() const = 0; + + const MetaData& metadata() const { + return metadata_; + } + + UserMetaData* user_metadata() const { + return user_metadata_.get(); + } + + std::shared_ptr SetUserMetadata( + std::shared_ptr user_meta) { + std::swap(user_metadata_, user_meta); + return user_meta; + } + + virtual std::string ToString() const; + + private: + // The ID of the operation captured by this node. + OpKind op_; + size_t num_outputs_ = 1; + + // The IR specific metadata attached to the IR node. + MetaData metadata_; + // The IR framework user can attach a user defined metadata object deriving + // from UserMetaData. + std::shared_ptr user_metadata_; + + protected: + // Adds node's index output number as operand. + void AddOperand(NodePtr node, size_t index = 0); + + std::vector shapes_; + // A node holds a real reference to its operands. + std::vector operands_; + // Outputs do not hold references on the nodes, and neither do the uses, since + // otherwise we get into circular reference counting. + std::vector operands_as_outputs_; +}; + +inline std::ostream& operator<<(std::ostream& stream, const Node& node) { + stream << node.ToString(); + return stream; +} + +// Note: Keep this version of NodeCast for smooth PyTorch/XLA migration, and +// clean up once the migration is done. +template +const T* NodeCast(const Node* node, OpKind op) { + if (op != node->op()) { + return nullptr; + } +#ifdef NDEBUG + return static_cast(node); +#else + return &dynamic_cast(*node); +#endif +} + +template +const T* NodeCast(const Node* node) { + if (T::ClassOpKind() != node->op()) { + return nullptr; + } + // TODO: Some IR classes share the same opkind, such as Mean and MeanDim, so + // static_cast is not safe here. Unless we have opkind unique for each class, + // we have to use dynamic_cast here. + return dynamic_cast(node); +} + +// Represents a specific output produced by a node. Since the output of a node +// can be composed by multiple outputs, the node+index coordinates fully qualify +// each single output. +struct TORCH_API Output { + struct Hasher { + size_t operator()(const Output& output) const; + }; + + Output() = default; + explicit Output(const Node* node, size_t index = 0) + : node(node), index(index) {} + + hash_t hash() const; + hash_t shapeHash() const; + + bool operator==(const Output& rhs) const { + return node == rhs.node && index == rhs.index; + } + + // To compare the operands of to-be-constructed node and to-be-reused node + bool operator==(const Value& rhs) const; + + bool operator!=(const Output& rhs) const { + return !operator==(rhs); + } + + const Shape& shape() const { + return node->shape(index); + } + + std::string ToString() const; + + // The node providing the output. + const Node* node{nullptr}; + // The index in the node's output this output refers to. + size_t index{0}; +}; + +inline std::ostream& operator<<(std::ostream& stream, const Output& output) { + stream << output.ToString(); + return stream; +} + +template +using OutputMap = std::unordered_map; + +// Represents an input/operand for a Node object. +struct TORCH_API Value { + Value() = default; + /* implicit */ Value(NodePtr&& node, size_t index = 0) + : node(std::move(node)), index(index) {} + /* implicit */ Value(const NodePtr& node, size_t index = 0) + : node(node), index(index) {} + + hash_t hash() const; + hash_t shapeHash() const; + + operator bool() const { + return node != nullptr; + } + + operator Output() const { + return Output(node.get(), index); + } + + const Shape& shape() const { + return node->shape(index); + } + + Node* operator->() const { + return node.get(); + } + + NodePtr node; + size_t index = 0; +}; + +} // namespace lazy +} // namespace torch + +namespace c10 { +// Explicit template instantiation to make ArrayRef work +template class at::ArrayRef; +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h new file mode 100644 index 0000000000000000000000000000000000000000..4b4e1e0749b24f619e2d97f41fba31b24d8fd31f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace lazy { + +class BackendDevice; + +class TORCH_API DumpUtil { + public: + static std::string ToDot(c10::ArrayRef nodes); + + static std::string PostOrderToDot( + c10::ArrayRef post_order, + c10::ArrayRef roots); + + static std::string ToText(c10::ArrayRef nodes); + + static std::string PostOrderToText( + c10::ArrayRef post_order, + c10::ArrayRef roots); + + static std::string ToBackend( + c10::ArrayRef values, + const BackendDevice& device); +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..ea413fcfb8263ec2ac80ca090b24e0e35f1529ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace lazy { +struct SourceLocation { + std::string file; + std::string function; + int line = -1; +}; + +TORCH_API void EmitShortFrameInfo( + std::ostream& stream, + const std::vector& frames); + +TORCH_API std::ostream& operator<<( + std::ostream& stream, + const std::vector& frames); + +// The base class for user defined metadata which is possible to attach to IR +// nodes. +struct TORCH_API UserMetaData { + virtual ~UserMetaData() = default; +}; + +struct TORCH_API MetaData { + std::string scope; + std::vector frame_info; +}; + +// TODO(whc) is this going to be used outside of in IR decompositions? +// RAII data structure to be used a stack variable to enter a new IR scope. IR +// scope names will appear in the IR and will help identifying the source of the +// single IR nodes. +struct TORCH_API ScopePusher { + explicit ScopePusher(const std::string& name); + ~ScopePusher(); + + static void ResetScopes(); +}; + +TORCH_API MetaData GetMetaDataIfDebugging(); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h new file mode 100644 index 0000000000000000000000000000000000000000..df3d0fd7ac406dce34e0e9ce4cab85ab6e3f130e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace lazy { + +class TORCH_API Util { + public: + // Tracks the emission status of the nodes during the post-order generation. + // It helps tracking loops within the computation graphs. + enum EmitStatus { + kNotEmitted, + kEmitting, + kEmitted, + }; + + using EmissionMap = std::unordered_map; + + // Computes the post order from the given node, without using recursion. The + // emission map can be used as saved state, for multiple separate calls to + // this API. The returned post-order can be empty if the node has already been + // emitted inside the emission map. An error is generated if a loop is + // detected. + static std::vector ComputePostOrder( + const Node* node, + EmissionMap* emap); + + static std::vector ComputePostOrder( + c10::ArrayRef nodes, + EmissionMap* emap); + + // Same as above, but computes the post order on the set of nodes specified as + // argument. + static std::vector ComputePostOrder( + c10::ArrayRef nodes); + + // Retrieves the number of nodes within the graph whose sink are passed in the + // nodes argument. + static size_t GetGraphSize(c10::ArrayRef nodes); +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h new file mode 100644 index 0000000000000000000000000000000000000000..b651ecea24ec334bf5de4591552c9f733f4dc718 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h @@ -0,0 +1,286 @@ +/** + * This file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +struct TORCH_API Sample { + Sample() = default; + Sample(int64_t timestamp_ns, double value) + : timestamp_ns(timestamp_ns), value(value) {} + + int64_t timestamp_ns = 0; + double value = 0; +}; + +using MetricReprFn = std::function; + +// Class used to collect time-stamped numeric samples. The samples are stored in +// a circular buffer whose size can be configured at constructor time. +class TORCH_API MetricData { + public: + // Creates a new MetricData object with the internal circular buffer storing + // max_samples samples. The repr_fn argument allow to specify a function which + // pretty-prints a sample value. + MetricData(MetricReprFn repr_fn, size_t max_samples); + + // Returns the total values of all the samples being posted to this metric. + double Accumulator() const; + + size_t TotalSamples() const; + + void AddSample(int64_t timestamp_ns, double value); + + // Returns a vector with all the current samples, from the oldest to the + // newer. If accumulator is not nullptr, it will receive the current value of + // the metrics' accumulator (the sum of all posted values). If total_samples + // is not nullptr, it will receive the count of the posted values. + std::vector Samples(double* accumulator, size_t* total_samples) const; + + std::string Repr(double value) const { + return repr_fn_(value); + } + + void Reset(); + + bool IsValid() const { + return TotalSamples() > 0; + } + + private: + mutable std::mutex lock_; + MetricReprFn repr_fn_; + size_t count_ = 0; + std::vector samples_; + double accumulator_ = 0.0; +}; + +// Counters are a very lightweight form of metrics which do not need to track +// sample time. +class TORCH_API CounterData { + public: + CounterData() : value_(0) {} + + void AddValue(int64_t value) { + value_ += value; + } + + int64_t Value() const { + return value_; + } + + void Reset() { + value_ = 0; + } + + bool IsValid() const { + return value_ > 0; + } + + private: + std::atomic value_; +}; + +class TORCH_API MetricsArena { + public: + static MetricsArena* Get(); + + void ResetCounters(); + void ResetMetrics(); + + // Registers a new metric in the global arena. + void RegisterMetric( + const std::string& name, + MetricReprFn repr_fn, + size_t max_samples, + std::shared_ptr* data); + + void RegisterCounter( + const std::string& name, + std::shared_ptr* data); + + void ForEachMetric( + const std::function& metric_func); + + void ForEachCounter( + const std::function& + counter_func); + + std::vector GetMetricNames(); + + MetricData* GetMetric(const std::string& name); + + std::vector GetCounterNames(); + + CounterData* GetCounter(const std::string& name); + + private: + std::mutex lock_; + std::map> metrics_; + std::map> counters_; +}; + +// Emits the value in a to_string() conversion. +TORCH_API std::string MetricFnValue(double value); +// Emits the value in a humanized bytes representation. +TORCH_API std::string MetricFnBytes(double value); +// Emits the value in a humanized time representation. The value is expressed in +// nanoseconds EPOCH time. +TORCH_API std::string MetricFnTime(double value); + +// The typical use of a Metric is one in which it gets created either in a +// global scope context: +// static Metric* metric = new Metric("RpcCount"); +// Or within a function scope: +// void MyFunction(...) { +// static Metric* metric = new Metric("RpcCount"); +// ... +// metric->AddSample(ts_nanos, some_value); +// } +class TORCH_API Metric { + public: + explicit Metric( + std::string name, + MetricReprFn repr_fn = MetricFnValue, + size_t max_samples = 0); + + const std::string& Name() const { + return name_; + } + + double Accumulator() const; + + void AddSample(int64_t timestamp_ns, double value); + + void AddSample(double value); + + std::vector Samples(double* accumulator, size_t* total_samples) const; + + std::string Repr(double value) const; + + private: + MetricData* GetData() const; + + std::string name_; + MetricReprFn repr_fn_; + size_t max_samples_; + mutable std::shared_ptr data_ptr_; + mutable std::atomic data_; +}; + +// A Counter is a lightweight form of metric which tracks an integer value which +// can increase or decrease. +// A typical use is as: +// static Counter* counter = new Counter("MyCounter"); +// ... +// counter->AddValue(+1); +class TORCH_API Counter { + public: + explicit Counter(std::string name); + + void AddValue(int64_t value) { + GetData()->AddValue(value); + } + + int64_t Value() const { + return GetData()->Value(); + } + + private: + CounterData* GetData() const; + + std::string name_; + mutable std::shared_ptr data_ptr_; + mutable std::atomic data_; +}; + +#define TORCH_LAZY_COUNTER(name, value) \ + do { \ + static ::torch::lazy::Counter* __counter = \ + new ::torch::lazy::Counter(name); \ + __counter->AddValue(value); \ + } while (0) + +#define TORCH_LAZY_FN_COUNTER(ns) TORCH_LAZY_COUNTER(c10::str(ns, __func__), 1) + +#define TORCH_LAZY_VALUE_METRIC(name, value) \ + do { \ + static ::torch::lazy::Metric* __metric = \ + new ::torch::lazy::Metric(name, torch::lazy::MetricFnValue); \ + __metric->AddSample(value); \ + } while (0) + +// Creates a report with the current metrics statistics. +TORCH_API std::string CreateMetricReport(); + +// Creates a report with the selected metrics statistics. +TORCH_API std::string CreateMetricReport( + const std::vector& counter_names, + const std::vector& metric_names); + +// Returns the currently registered metric names. Note that the list can grow +// since metrics are usually function intialized (they are static function +// variables). +TORCH_API std::vector GetMetricNames(); + +// Retrieves the metric data of a given metric, or nullptr if such metric does +// not exist. +TORCH_API MetricData* GetMetric(const std::string& name); + +// Returns the currently registered counter names. Note that the list can grow +// since counters are usually function intialized (they are static function +// variables). +TORCH_API std::vector GetCounterNames(); + +// Retrieves the counter data of a given counter, or nullptr if such counter +// does not exist. +TORCH_API CounterData* GetCounter(const std::string& name); + +// Retrieves the current EPOCH time in nanoseconds. +TORCH_API int64_t NowNs(); + +// Scope based utility class TORCH_API to measure the time the code takes within +// a given C++ scope. +class TORCH_API TimedSection { + public: + explicit TimedSection(Metric* metric) : metric_(metric), start_(NowNs()) {} + + ~TimedSection() { + int64_t now = NowNs(); + metric_->AddSample(now, now - start_); + } + + double Elapsed() const { + return 1e-9 * static_cast(NowNs() - start_); + } + + private: + Metric* metric_; + int64_t start_; +}; + +#define TORCH_LAZY_TIMED(name) \ + static torch::lazy::Metric* timed_metric = \ + new torch::lazy::Metric(name, torch::lazy::MetricFnTime); \ + torch::lazy::TimedSection timed_section(timed_metric) + +#define TORCH_LAZY_FN_COUNTER_TIMED_TRACING(ns) \ + TORCH_LAZY_FN_COUNTER(ns); \ + TORCH_LAZY_TIMED("LazyTracing") + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3abb6cb3b10858a13163957c992bba80fe9c2e27 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +TORCH_API NodePtr operator+(const Value& node1, const Value& node2); +TORCH_API NodePtr operator-(const Value& node1, const Value& node2); +TORCH_API NodePtr operator*(const Value& node1, const Value& node2); +TORCH_API NodePtr operator/(const Value& node1, const Value& node2); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..cc5d5bdbe25bc3412d9507dad3da5631d72ae818 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h @@ -0,0 +1,41 @@ +#include + +#include +#include + +namespace torch { +namespace lazy { + +TORCH_API bool StrideIsSupported(c10::ArrayRef stride); + +TORCH_API std::vector GetArrayStridePermutation( + c10::ArrayRef stride); + +TORCH_API Shape MakeDiagonalShape( + const Shape& shape, + int64_t offset, + int64_t dim1, + int64_t dim2); + +TORCH_API Shape +MakePermuteShape(const Shape& source_shape, c10::ArrayRef permutation); + +TORCH_API Shape MakeSelectShape( + const Shape& shape, + int64_t dim, + int64_t start, + int64_t end, + int64_t stride); + +TORCH_API int64_t GetStride(int64_t start, int64_t end, int64_t stride); + +TORCH_API std::vector BuildSqueezedDimensions( + c10::ArrayRef dimensions, + int64_t squeeze_dim); + +TORCH_API std::vector BuildUnsqueezedDimensions( + c10::ArrayRef dimensions, + int64_t squeeze_dim); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h new file mode 100644 index 0000000000000000000000000000000000000000..7a368301035e9fd8861458ef902a4012405a9590 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +TORCH_API std::vector InversePermutation( + c10::ArrayRef input_permutation); + +TORCH_API bool IsPermutation(c10::ArrayRef permutation); + +// Gathers the input using the order specified by the permutation. For each i, +// output[i] = dimensions[permutation[i]]. The given permutation must be the +// same size as the input. +template +std::vector PermuteDimensions( + c10::ArrayRef permutation, + const Container& dimensions) { + using T = typename Container::value_type; + TORCH_CHECK( + dimensions.size() == permutation.size(), + "Invalid permutation specified. dimensions.size() != permutation.size() (", + dimensions.size(), + " vs. ", + permutation.size(), + ")"); + TORCH_CHECK( + IsPermutation(permutation), + "Invalid permutation specified. Permutation is not permutation"); + std::vector output(dimensions.size()); + for (const auto i : c10::irange(permutation.size())) { + output[i] = dimensions[permutation[i]]; + } + return output; +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h new file mode 100644 index 0000000000000000000000000000000000000000..1c6b4d5bb3d811e2c31a7f5392362cfd2c528949 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include + +#include +#include +#include + +C10_DECLARE_bool(ltc_enable_symbolic_shapes); + +namespace torch { +namespace lazy { + +class TORCH_API Shape { + public: + Shape() = default; + + Shape( + at::ScalarType scalar_type, + c10::ArrayRef sizes, + c10::optional> is_symbolic = c10::nullopt); + + std::string to_string() const; + + c10::ScalarType scalar_type() const { + return scalar_type_; + } + void set_scalar_type(at::ScalarType value) { + scalar_type_ = value; + } + + int64_t dim() const { + return sizes_.size(); + } + c10::ArrayRef sizes() const { + return sizes_; + } + int64_t size(int64_t dim) const { + return sizes_.at(dim); + } + void set_size(int64_t dim, int64_t size) { + sizes_.at(dim) = size; + } + + const c10::optional>& is_symbolic() const { + return is_symbolic_; + } + + // Makes a copy with symbolic dims applied + Shape with_symbolic_dims( + c10::optional> symbolic_dims) const; + + size_t numel() const; + hash_t hash(bool bakeInSizes) const; + + bool operator==(const Shape& other) const; + + private: + c10::ScalarType scalar_type_{c10::ScalarType::Undefined}; + + // Sizes are the upper bound sizes for a tensor, used by XLA. + std::vector sizes_; + // Stores which dimmensions are symbolic + // If nullopt, either it hasn't been initialized or the symbolic + // dimmensions are not calculatable + c10::optional> is_symbolic_ = c10::nullopt; +}; + +TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape); + +TORCH_API bool symbolicShapeEnabled(); +// Calculate and applies symbolic shapes onto the +// Shape objects passed to result_shapes +TORCH_API void applySymbolicShapesOnLT( + const char* schema_str, + std::vector args, + std::vector& result_shapes); +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..a8388a0b223576af63a665c230da43336b3be858 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { +// Turn clang-format off, as we rely on the whole signature being on one line +// for codegen. +// clang-format off +TORCH_API std::vector compute_shape__adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API std::vector compute_shape__adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API std::vector compute_shape__adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API std::vector compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API std::vector compute_shape_abs(const at::Tensor & self); +TORCH_API std::vector compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, c10::optional generator); +TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional generator); +TORCH_API std::vector compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); +TORCH_API std::vector compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction); +TORCH_API std::vector compute_shape_cat(at::TensorList tensors, int64_t dim); +TORCH_API std::vector compute_shape_cholesky(const at::Tensor & self, bool upper); +TORCH_API std::vector compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min); +TORCH_API std::vector compute_shape_clone(const at::Tensor & self, c10::optional memory_format); +TORCH_API std::vector compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value); +TORCH_API std::vector compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); +TORCH_API std::vector compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API std::vector compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); +TORCH_API std::vector compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +TORCH_API std::vector compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit); +TORCH_API std::vector compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit); +TORCH_API std::vector compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims); +TORCH_API std::vector compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +TORCH_API std::vector compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); +TORCH_API std::vector compute_shape_grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API std::vector compute_shape_grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); +TORCH_API std::vector compute_shape_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index); +TORCH_API std::vector compute_shape_inverse(const at::Tensor & self); +TORCH_API std::vector compute_shape_isnan(const at::Tensor & self); +TORCH_API std::vector compute_shape_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +TORCH_API std::vector compute_shape_log_sigmoid_forward(const at::Tensor & self); +TORCH_API std::vector compute_shape_logdet(const at::Tensor & self); +TORCH_API std::vector compute_shape_logical_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API std::vector compute_shape_logical_not(const at::Tensor & self); +TORCH_API std::vector compute_shape_logical_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API std::vector compute_shape_logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API std::vector compute_shape_max(const at::Tensor & self); +TORCH_API std::vector compute_shape_mean(const at::Tensor & self, c10::optional dtype); +TORCH_API std::vector compute_shape_min(const at::Tensor & self); +TORCH_API std::vector compute_shape_mv(const at::Tensor & self, const at::Tensor & vec); +TORCH_API std::vector compute_shape_native_batch_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); +TORCH_API std::vector compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); +TORCH_API std::vector compute_shape_native_dropout(const at::Tensor & input, double p, c10::optional train); +TORCH_API std::vector compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale); +TORCH_API std::vector compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); +TORCH_API std::vector compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +TORCH_API std::vector compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API std::vector compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API std::vector compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API std::vector compute_shape_nonzero(const at::Tensor & self); +TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, c10::optional generator); +TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator); +TORCH_API std::vector compute_shape_relu(const at::Tensor & self); +TORCH_API std::vector compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats); +TORCH_API std::vector compute_shape_slogdet(const at::Tensor & self); +TORCH_API std::vector compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta); +TORCH_API std::vector compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending); +TORCH_API std::vector compute_shape_stack(at::TensorList tensors, int64_t dim); +TORCH_API std::vector compute_shape_std(const at::Tensor & self, bool unbiased); +TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); +TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); +TORCH_API std::vector compute_shape_sum(const at::Tensor & self, c10::optional dtype); +TORCH_API std::vector compute_shape__to_copy(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format); +TORCH_API std::vector compute_shape_take(const at::Tensor & self, const at::Tensor & index); +TORCH_API std::vector compute_shape_trace(const at::Tensor & self); +TORCH_API std::vector compute_shape_zero(const at::Tensor & self); +TORCH_API std::vector compute_shape_narrow_copy_symint(const at::Tensor & self, int64_t dim, int64_t start, c10::SymInt length); +TORCH_API std::vector compute_shape_hardswish(const at::Tensor & self); +TORCH_API std::vector compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API std::vector compute_shape_selu(const at::Tensor & self); +TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional generator); + +// Non-Native ops +TORCH_API std::vector compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type); +TORCH_API std::vector compute_shape_expand(const Output& input0, const std::vector& size, const bool& is_scalar_expand); +TORCH_API std::vector compute_shape_view(const Output& input0, const std::vector& output_sizes); +TORCH_API std::vector compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const c10::optional& stype); + +// View Ops +// (Now that functionalization pass is used, we should kill these in a later PR) +TORCH_API std::vector compute_shape_as_strided_view_update(const Output& target, const Output& input, const std::vector& size, const std::vector& stride, const int64_t& storage_offset); +TORCH_API std::vector compute_shape_as_strided(const Output& input, const std::vector& size, const std::vector& stride, const int64_t& storage_offset); +TORCH_API std::vector compute_shape_diagonal_view_update(const Output& target, const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2); +TORCH_API std::vector compute_shape_diagonal(const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2); +TORCH_API std::vector compute_shape_narrow_view_update(const Output& input, const Output& source, const std::vector& base_indices); +TORCH_API std::vector compute_shape_narrow(const Output& input, const std::vector& base_indices, const std::vector& sizes); +TORCH_API std::vector compute_shape_permute(const Output& input, const std::vector& dims); +TORCH_API std::vector compute_shape_resize(const Output& input, const std::vector& size); +TORCH_API std::vector compute_shape_select_view_update(const Output& target, const Output& source, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride); +TORCH_API std::vector compute_shape_select(const Output& input, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride); +TORCH_API std::vector compute_shape_squeeze(const Output& input, const int& dim); +TORCH_API std::vector compute_shape_unsqueeze(const Output& input, const int& dim); + +TORCH_API std::vector compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index); +TORCH_API std::vector compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); +TORCH_API std::vector compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); +TORCH_API std::vector compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); +// clang-format on +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3a15c91c03452d84d9b6b90a27d71b5129dd2de7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h @@ -0,0 +1,259 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API SymNodeImpl : public c10::SymNodeImpl { + public: + SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)){}; + NodePtr node_; +}; + +class LazyTensor; +using LazyTensorPtr = c10::intrusive_ptr; + +class TORCH_API LazyTensor : public c10::intrusive_ptr_target { + public: + // This is the core lazy tensor data structure where all the tensor data is + // held. The lazy tensor is nothing more than a shared pointer to a Data + // object. + struct Data { + Data(BackendDataPtr handle, BackendDevice device) + : handle(std::move(handle)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + Data(Value ir_value, BackendDevice device) + : ir_value(std::move(ir_value)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + Data(at::Tensor tensor_data, BackendDevice device) + : tensor_data(std::move(tensor_data)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + // TODO(alanwaketan): Remove this ctor. This is a + // temporary ctor to ease XLA LTC migration. It depends on + // XLA's Functionalization integration. + Data(BackendDevice device) + : device(std::move(device)), unique_id(GetNextTensorId()) {} + + virtual ~Data(); + + BackendDataPtr handle; + Value ir_value; + c10::optional tensor_data; + const BackendDevice device; + const int64_t unique_id = 0; + size_t generation = 1; + }; + + static LazyTensorPtr Create( + const at::Tensor& tensor, + const BackendDevice& device); + static LazyTensorPtr Create(Value ir_value, const BackendDevice& device); + static LazyTensorPtr Create(BackendDataPtr handle); + static LazyTensorPtr Create(std::shared_ptr data); + + // The default ctor previously created a null LazyTensor (one with no 'data' + // obj). Creating a null LazyTensor is no longer possible, since the same can + // be achieved by creating a null LazyTensorPtr and it is way too confusing to + // have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that + // used to rely on a LazyTensor obj with a null Data can now rely on a null + // LazyTensorPtr instead. + LazyTensor() = delete; + LazyTensor(const LazyTensor&) = default; + LazyTensor(LazyTensor&&) noexcept = default; + + ~LazyTensor() override = default; + + size_t generation() const { + return data()->generation; + } + + // Override it to use your own Shape. + virtual int64_t size(int64_t dim) const; + + // Override it to use your own graph executor. + virtual at::Tensor ToTensor(bool detached); + + void ShallowCopyTo(LazyTensorPtr dest) const; + + // Assigns the tensor value to the lazy tensor. + void SetTensor(at::Tensor tensor); + + void UpdateFromTensor(at::Tensor tensor, bool sync); + void UpdateFromTensorOut(at::Tensor tensor); + void UpdateFromTensorOut(const LazyTensorPtr& tensor); + + const std::shared_ptr& data() const; + + // Override it to use your own type conversion. + virtual at::ScalarType dtype() const; + + MaybeRef shape() const; + + const BackendDevice& GetDevice() const; + int64_t GetUniqueId() const; + + // Fetches the data behind the tensor. If the tensor has a graph defining + // its current value, executes the graph and fetches the data result. + BackendDataPtr GetDataHandle(); + + // Fetches the current value of the data, which can be missing (nullptr) + // in case the tensor has a graph defining its current value, + BackendDataPtr CurrentDataHandle() const; + + void SetDataHandle(BackendDataPtr handle); + void SetDataHandle(BackendDataPtr handle, bool sync); + + // Retrieves the current IR Node, or nullptr in case no active IR Node is + // available. + Value CurrentIrValue() const; + + // Retrieves the IR Node representing this LazyTensor. One will be created if + // missing. Note that although this is a const API, it actually changes the + // internal state ofthe object. + Value GetIrValue() const; + + void SetIrValue(Value ir_value); + void SetInPlaceIrValue(Value ir_value); + + c10::optional CurrentTensorData() const; + + std::vector MakeOutputTensors(NodePtr node) const; + + LazyTensorPtr CopyTensorToDevice(const BackendDevice& device); + + // Applies the queue of operations in preparation for using the data. + // Override it to use your own graph executor. + virtual void ApplyPendingGraph(); + + // Override it to set extra information. + virtual void AssignIrValue(Value ir_value) const; + + protected: + explicit LazyTensor(std::shared_ptr data); + + void SetTensorData(at::Tensor tensor_data); + + // We build a graph accumulating operations, but at a given point we + // need to force a rendering, otherwise the graph can grow without control. + // Think: + // for i in range(0, 100000): + // a = a + b + void TryLimitGraphSize(); + + // Override it to instantiate your own data. + virtual Value GetIrValueForTensor( + const at::Tensor& tensor, + const BackendDevice& device) const; + + Value CreateTensorNode(BackendDataPtr data, bool read_only) const; + + private: + LazyTensor(const at::Tensor& tensor, const BackendDevice& device); + LazyTensor(Value ir_value, const BackendDevice& device); + explicit LazyTensor(BackendDataPtr handle); + + static int64_t GetNextTensorId(); + + std::shared_ptr data_; +}; + +// Utils to convert at::Tensor to LazyTensor, and vice versa. + +// Section 0: c10::Tensorlist ==> lazy::TensorList +// note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList +// skips +// the LazyTensor wrappers, assuming that the list of underlying IR nodes +// is actually more useful for downstream computations. TBD. +TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors); + +// Section 1: at::Tensor => LazyTensor. +// Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor +// if the tensor is not a lazy tensor. +TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor); + +// Extracts the LazyTensor out of an at::Tensor. Throws an exception +// if the tensor is not a lazy tensor. +TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor); + +// Same as above, applied to a list of tensors. +TORCH_API std::vector GetLtcTensors( + c10::ArrayRef tensors); + +// If tensor is a lazy tensor type, returns the LazyTensor embedded within it, +// otherwise creates a new lazy tensor type with tensor as data. +TORCH_API LazyTensorPtr GetOrCreateLtcTensor( + const c10::optional& tensor, + const BackendDevice& device); + +TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber( + const at::Tensor& tensor, + const BackendDevice& device); + +// Section 2: LazyTensor => at::Tensor. +// Creates an ATen tensor from an LazyTensor. +TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor); +TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor); + +// Note [Lazy Tensor Functionalization] +// The functionalization pass is implemented by wrapping all TensorImpl +// objects in C++ with an extra FunctionalTensorWrapper object, +// that knows how to perform functionalization +// +// Certain functions in the aten API serve as entry/exit points for +// functionalization, where we need to perform the wrapping/unwrapping: +// - aten::to.device +// - aten::empty + +// Given a non-lazy tensor, this function creates a lazy tensor on the specified +// (lazy) device. The functionalize_output determines whether or not we should +// wrap the output in a "functional wrapper". +// +// How do you know whether to pass true/false for functionalize_output? +// +// Case 1: nonlazy -> lazy +// If you're implementing a function that takes in nonlazy tensors and returns +// lazy tensors, then you should think of that function as an "entrypoint" to +// functionalization, and use functionalize_output=true Examples include: +// - factory functions (the LTC kernel for at::empty) +// - CPU -> Lazy device converions (the LTC kernel for at::to_device) +// +// Case 2: lazy -> lazy +// If you're implementing a function that takes in lazy tensors and returns +// lazy tensors, +// **but** requires creating lazy tensors internally, +// then you can assume that the current function is running inside of some +// outer context where functionalization is already running, that will take +// care of doing the wrapping for you, and use functionalize_output=true +// Examples include: +// - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel, +// converts returns back to lazy tensors). +TORCH_API at::Tensor to_lazy_tensor( + const at::Tensor& self, + const c10::TensorOptions& options, + at::Device device, + bool non_blocking, + bool functionalize_output); + +template +auto TupleAtenFromLtcTensorsImpl( + const std::vector& tensors, + std::index_sequence) { + return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...); +} + +template +auto TupleAtenFromLtcTensors(const std::vector& tensors) { + return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence{}); +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..6eca2212c08eda493eaca0b330cac15c9c8d52f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +// Tensor implementation class used to be fed to the at::Tensor. +// Its scope is just to handle an LazyTensor. +class TORCH_API LTCTensorImpl final : public c10::TensorImpl { + public: + explicit LTCTensorImpl(const LazyTensorPtr& tensor); + explicit LTCTensorImpl(const LazyTensor& tensor); + explicit LTCTensorImpl(LazyTensor&& tensor); + + LazyTensorPtr tensor() { + return tensor_; + } + + void set_tensor(const LazyTensorPtr& lazy_tensor); + + void force_refresh_sizes() { + generation_ = 0; + } + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + void shallow_copy_from(const c10::intrusive_ptr& impl) override; + + at::IntArrayRef sizes_custom() const override; + at::IntArrayRef strides_custom() const override; + int64_t numel_custom() const override; + int64_t storage_offset_custom() const override; + int64_t dim_custom() const override; + bool is_contiguous_custom(at::MemoryFormat memory_format) const override; + bool is_strides_like_custom(at::MemoryFormat memory_format) const override; + bool is_non_overlapping_and_dense_custom() const override; + + c10::SymIntArrayRef sym_sizes_custom() const override; + c10::SymIntArrayRef sym_strides_custom() const override; + c10::SymInt sym_numel_custom() const override; + + private: + void setup_size_properties(); + + LazyTensorPtr tensor_; + mutable c10::optional> sym_sizes_; + size_t generation_{0}; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e4e6a1b7f0c26dc8e1e235b0487666fb0b3dfd9e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include + +#include + +#include +#include + +namespace torch { +namespace lazy { + +TORCH_API std::vector ComputeArrayStrides( + c10::ArrayRef sizes); + +TORCH_API std::vector DataHandlesToTensors( + c10::ArrayRef data_handles, + at::ScalarType dest_element_type); + +// Uploads an ATEN tensor data to the device and fetches the corresponding +// device data handle. +TORCH_API BackendDataPtr +TensorToDataHandle(const at::Tensor& tensor, const BackendDevice& device); + +// Retrieves the device data handles by parallel uploading data onto the +// corresponding devices. +TORCH_API std::vector CreateTensorsData( + const std::vector& tensors, + const std::vector& devices); + +// Makes a deep copy of an ATEN tensor. +inline at::Tensor CopyTensor(const at::Tensor& ref) { + return ref.to(ref.options(), /*non_blocking=*/false, /*copy=*/true); +} + +// Same as above, with an additional cast. +inline at::Tensor CopyTensor( + const at::Tensor& ref, + at::ScalarType dest_type, + bool copy = true) { + return ref.to(ref.options().dtype(dest_type), /*non_blocking=*/false, copy); +} + +template +T OptionalOr(const c10::optional& value, T defval) { + return value ? static_cast(*value) : defval; +} + +// Unwraps tensor to target dtype if it's a wrapped number. +inline at::Tensor UnwrapNumber(const at::Tensor& tensor, at::ScalarType dtype) { + return tensor.unsafeGetTensorImpl()->is_wrapped_number() ? tensor.to(dtype) + : tensor; +} + +template +at::Scalar MakeIntScalar(T value) { + return at::Scalar(static_cast(value)); +} + +// Routing values to device data maximizes the changes for compilation cache +// hits, but it can prevent the compiler to perform optimizations. So tensor +// values which are within a given set, are routed to constant scalars if this +// API returns true. +TORCH_API bool IsSpecialScalar(const at::Scalar& value); + +// Note: returns a reference instead of a fresh tensor to avoid refcount bumps. +inline const at::Tensor& maybe_unwrap_functional(const at::Tensor& tensor) { + if (at::functionalization::impl::isFunctionalTensor(tensor)) { + return at::functionalization::impl::unsafeGetFunctionalWrapper(tensor) + ->value(); + } else { + return tensor; + } +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h new file mode 100644 index 0000000000000000000000000000000000000000..bfb026d963cc0fa79020963f7d7c49310f41459c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace lazy { + +struct TORCH_API TrieNode { + static size_t GetNextUniqueId() { + static thread_local size_t id_generator = 0; + return id_generator++; + } + + size_t unique_id; + size_t hit_counter; + NodePtr ir_node; + std::list> successors; + + TrieNode() : unique_id(GetNextUniqueId()), hit_counter(0), ir_node(nullptr) {} + explicit TrieNode(NodePtr node) + : unique_id(GetNextUniqueId()), + hit_counter(0), + ir_node(std::move(node)) {} +}; + +class TORCH_API TrieCache { + public: + static TrieCache* Get(); + + TrieNode* Current() const; + // Take an iterator as the input because we want to move the corresponding + // node in the successor list to achieve a LRU caching effect + void SetCurrent(std::list>::iterator& iter); + // Used in MarkStep to indicate the end of one tracing + void ResetCurrent(); + + // Create a new TrieNode for ir_node and insert into the TrieCache + void Insert(NodePtr ir_node); + + // Clear all TrieCache nodes + // TODO: Because we don't expect user to explicitly call this function via + // a Python API, we may need to introduce a threshold on the size of the cache + // to avoid holding tensors for too long. + void Clear(); + + void DumpToDotFile(const std::string& file_name); + + private: + TrieCache(); + + std::shared_ptr root_; + TrieNode* current_; +}; + +template +NodePtr LookupNodeFromTrieCache(Args&&... args) { + auto& successors = TrieCache::Get()->Current()->successors; + for (auto it = successors.begin(); it != successors.end(); it++) { + NodePtr ir_node = (*it)->ir_node; + const T* concrete_node = NodeCast(ir_node.get()); + if (concrete_node && + concrete_node->CanBeReused(std::forward(args)...)) { + TORCH_LAZY_COUNTER( + "IrNodeReused_" + c10::demangle((typeid(T).name())), 1); + (*it)->hit_counter++; + TrieCache::Get()->SetCurrent(it); + return ir_node; + } + } + return nullptr; +} + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h new file mode 100644 index 0000000000000000000000000000000000000000..0b156a29eb906399aa2d1e613f4407b5b51c2ef3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h @@ -0,0 +1,56 @@ +/** + * Unique in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/unique.h + */ + +#pragma once + +#include + +#include +#include + +namespace torch { +namespace lazy { + +// Helper class to allow tracking zero or more things, which should be forcibly +// be one only thing. +template > +class Unique { + public: + std::pair set(const T& value) { + if (value_) { + TORCH_CHECK(C()(*value_, value), "'", *value_, "' vs '", value); + return std::pair(false, *value_); + } + value_ = value; + return std::pair(true, *value_); + } + + operator bool() const { + return value_.has_value(); + } + operator const T&() const { + return *value_; + } + const T& operator*() const { + return *value_; + } + const T* operator->() const { + return value_.operator->(); + } + + std::set AsSet() const { + std::set vset; + if (value_.has_value()) { + vset.insert(*value_); + } + return vset; + } + + private: + c10::optional value_; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h new file mode 100644 index 0000000000000000000000000000000000000000..8040a023de518476e65c04ff426f1807c19b14c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +c10::optional TORCH_PYTHON_API GetPythonFrameTop(); + +std::vector TORCH_PYTHON_API GetPythonFrames(); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h new file mode 100644 index 0000000000000000000000000000000000000000..ac0320b9d0ac3cca01ddfc12f01e4a232fdda0df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h @@ -0,0 +1,7 @@ +#pragma once +#include + +// TODO(whc) unclear if this is useful, has only been tested as true +C10_DECLARE_bool(torch_lazy_ts_tensor_update_sync); + +C10_DECLARE_bool(torch_lazy_ts_cuda); diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h new file mode 100644 index 0000000000000000000000000000000000000000..aa0ed1eb99321f53cf0f16ef3403bc7abf491738 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h @@ -0,0 +1,85 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +C10_DECLARE_bool(ltc_enable_dynamic_shapes); + +namespace torch { +namespace lazy { + +/** + * The goal of "dynamic" Nodes is to patch a hole in our tracing. + * Previously, if a user called `sizes` on a Tensor, it would leak out + * of our tracing system, as `sizes` returns a torch.Size or an int. To + * prevent this from happening, we introduce DimensionNode, a new type + * of Node that abstracts the operation of getting the dimensions of a + * Tensor. + * + * Consider the following example: + * ``` + * numel = x.shape()[0] * x.shape()[1] + * ``` + * + * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode), + * and the multiplication of the two SizeNodes will be represented by + * a SizeMul (also a subclass of DimensionNode). Through this, we can + * prevent `numel` from being represented as a Python int and thus + * burned into the Graph. + */ + +// Represents the result of calling `size` on a Tensor +class TORCH_API SizeNode : public TsNode, public DimensionNode { + public: + SizeNode(Value input, size_t dim); + int64_t getStaticValue() const override; + bool isSymbolic() const override; + std::string ToString() const override; + size_t dim_ = 0; + torch::lazy::TSOpVector Lower( + std::shared_ptr function, + TSLoweringContext* loctx) const override; +}; + +class TORCH_API SizeAdd : public TsNode, public DimensionNode { + public: + SizeAdd(Value a, Value b); + int64_t getStaticValue() const override; + bool isSymbolic() const override; + std::string ToString() const override; +}; + +class TORCH_API SizeMul : public TsNode, public DimensionNode { + public: + SizeMul(Value a, Value b); + int64_t getStaticValue() const override; + bool isSymbolic() const override; + std::string ToString() const override; +}; + +class TORCH_API SizeDiv : public TsNode, public DimensionNode { + public: + SizeDiv(Value a, Value b); + int64_t getStaticValue() const override; + bool isSymbolic() const override; + std::string ToString() const override; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..1f32a3521ba8a58e73ff83abaa71c034799b2941 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +struct TorchScriptIrBuilder : IrBuilder { + NodePtr MakeDeviceData( + const std::shared_ptr& data) const override { + return DeviceData::Create(data); + } + // TODO: Scalar node is not currently used by ts_backend. Enable reusing + // Scalar node later if needed. + NodePtr MakeScalar(const at::Scalar& value, const at::ScalarType& type) + const override { + return MakeNode(value, type); + } + NodePtr MakeExpand( + const Value& input0, + const std::vector& size, + const bool& is_scalar_expand) const override { + return ReuseOrMakeNode(input0, size, is_scalar_expand); + } + NodePtr MakeCast( + const Value& input0, + const at::ScalarType& dtype, + const c10::optional& stype = + c10::nullopt) const override { + return ReuseOrMakeNode(input0, dtype, stype); + } + NodePtr MakeTensorList(const OpList& inputs) const override { + return ReuseOrMakeNode(inputs); + } + // Generic needs cleanup + NodePtr MakeGeneric( + const OpKind& op, + const OpList& operands, + const Shape& shape, + const size_t& num_outputs = 1, + const hash_t& hash_seed = + static_cast(0x5a2d296e9)) const override { + return MakeNode(op, operands, shape, num_outputs, hash_seed); + } + + // dynamic ir nodes + // TODO: verify if IR node reusing works for Dynamic shape ops + NodePtr MakeSizeNode(const Value& input, size_t dim) const override { + return MakeNode(input, dim); + } + NodePtr MakeSizeAdd(const Value& a, const Value& b) const override { + return MakeNode(a, b); + } + NodePtr MakeSizeMul(const Value& a, const Value& b) const override { + return MakeNode(a, b); + } + NodePtr MakeSizeDiv(const Value& a, const Value& b) const override { + return MakeNode(a, b); + } +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf663f4ca6b1b78634a15bd65617328e92584239 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +////////////////////////////////////////////////////////////////////////////// +// ATEN operators follows here, listed in alphabetical order. +////////////////////////////////////////////////////////////////////////////// + +void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src); +// Fills the input with the given value. +void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..7e01724470384497a5fcdc9102d4b06403bdc640 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +struct MaxPool3dAutogradFunctionTS + : public torch::autograd::Function { + static at::Tensor forward( + torch::autograd::AutogradContext* ctx, + at::Tensor self, + at::IntArrayRef kernel_size, + at::IntArrayRef stride, + at::IntArrayRef padding, + at::IntArrayRef dilation, + bool ceil_mode); + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + torch::autograd::variable_list grad_output); +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..d238e8263e57781d09bbc744db5c92f9684ac09a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h @@ -0,0 +1,52 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +class TORCH_API TSData : public torch::lazy::BackendData { + public: + TSData(const at::Scalar& scalar, const torch::lazy::BackendDevice& device) + : torch::lazy::BackendData(device, torch::lazy::Shape(scalar.type(), {})), + scalar(scalar) {} + + TSData( + const at::Tensor& data, + const torch::lazy::Shape& shape, + const torch::lazy::BackendDevice& device) + : torch::lazy::BackendData(device, shape), data_(data) {} + + TSData( + const torch::lazy::Shape& shape, + const torch::lazy::BackendDevice& device) + : torch::lazy::BackendData(device, shape) {} + + Handle GetHandle() override { + return reinterpret_cast(this); + } + + void Assign(const torch::lazy::BackendData& data) override { + data_ = static_cast(data).data_; + } + + bool HasValue() const override { + return data_.defined(); + } + + at::Tensor data() { + return data_; + } + + c10::optional scalar; + + private: + at::Tensor data_; +}; + +TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl(); + +TORCH_API void InitTorchScriptBackend(); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h new file mode 100644 index 0000000000000000000000000000000000000000..9f993d6f30290eb5914603d013f49884fcb3ea69 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +bool force_eager_fallback(c10::Symbol op); +void ltc_eager_fallback( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +void ts_eager_fallback( + const c10::OperatorHandle& op, + torch::jit::Stack* stack, + c10::DeviceType device_type); + +// The TorchScript backend does not register itself with pytorch dispatcher +// until it is explicitly initialized. This function should only be called +// by the main Torchscript backend init function. +void register_ts_ltc_eager_fallback(); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h new file mode 100644 index 0000000000000000000000000000000000000000..a898dfea654ad0e04bcc5e8bf003430b563f79a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h @@ -0,0 +1,152 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +using TSOpVector = std::vector; + +class TORCH_API TSComputation : public Computation { + public: + TSComputation(const std::shared_ptr& graph) + : graph_(graph), graph_executor_(graph, "") { + for (torch::jit::Value* input : graph_->inputs()) { + parameter_names_.push_back(input->debugName()); + } + } + + int parameters_size() const override { + return parameter_names_.size(); + } + + const std::vector& parameter_shapes() const override { + throw std::runtime_error( + "TODO(whc) implement TS computation shapes or change interface"); + return parameter_shapes_; + } + + const std::vector& parameter_names() const override { + return parameter_names_; + } + + const Shape& result_shape() const override { + throw std::runtime_error( + "TODO(whc) implement TS computation shapes or change interface"); + return result_shape_; + } + + const std::string to_string() const override { + std::ostringstream oss; + oss << *graph_; + return oss.str(); + } + + std::shared_ptr graph() const { + return graph_; + } + + torch::jit::GraphExecutor& graph_executor() { + return graph_executor_; + } + + private: + std::shared_ptr graph_; + torch::jit::GraphExecutor graph_executor_; + std::vector parameter_names_; + std::vector parameter_shapes_; + Shape result_shape_; +}; + +class TORCH_API TSLoweringContext : public LoweringContext { + public: + TSLoweringContext(const std::string& name, const BackendDevice device); + + TSLoweringContext( + const std::string& name, + BackendDevice device, + c10::ArrayRef post_order, + Util::EmissionMap emit_status); + + size_t AddResult(const Output& output) override { + return AddResult(GetOutputOp(output)); + } + + void AddParameter( + const torch::lazy::Output& output, + size_t index, + const Shape& shape, + const std::string& name) override { + TORCH_INTERNAL_ASSERT(false, "not implemented"); + } + + void Lower(const Node* node); + + ComputationPtr Build() override { + for (torch::jit::Value* output : root_tuple_) { + graph_->block()->registerOutput(output); + } + return std::shared_ptr(new TSComputation(graph_)); + } + + // Retrieves the lowered operation for an output. If the requested output is + // not available yet, the graph behind the output's Node is lowered, and the + // corresponding TS operation returned. + torch::jit::Value* GetOutputOp(const Output& output) { + auto it = emitted_outputs_.find(output); + if (it == emitted_outputs_.end()) { + auto post_order = Util::ComputePostOrder(output.node, &emit_status_); + for (auto node : post_order) { + Lower(node); + } + // At this point the output better be present, otherwise there is an issue + // with the lowering code. + it = emitted_outputs_.find(output); + TORCH_CHECK( + it != emitted_outputs_.end(), + "No TS operation emitted for output: ", + output.ToString()); + } + return it->second; + } + + // Assigns the given TS operation to the specified output. As outputs are + // lowered in a post-order fashion, later nodes should always find their + // operands among the emitted outputs. + void AssignOutputOp(const Output& output, torch::jit::Value* op); + + // If a parameter associated with data has already been declared, it will be + // returned. Otherwise a new one will be created, associated with the tensor + // held in data. + torch::jit::Value* GetParameter(BackendDataPtr data); + + std::shared_ptr graph() const { + return graph_; + } + + private: + struct Parameter { + torch::jit::Value* param{nullptr}; + size_t index = 0; + }; + + size_t AddResult(torch::jit::Value* op) { + root_tuple_.push_back(std::move(op)); + return root_tuple_.size() - 1; + } + + std::shared_ptr graph_; + std::shared_ptr function_; + std::unordered_map parameters_map_; + std::vector root_tuple_; + OutputMap emitted_outputs_; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h new file mode 100644 index 0000000000000000000000000000000000000000..62cc9016f6ffa2e50fb4d19501823eedcc0befde --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h @@ -0,0 +1,106 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +using TSOpVector = std::vector; + +class TORCH_API TsNode : public lazy::Node { + public: + TsNode( + OpKind op, + OpList operands, + std::vector&& shapes, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + OpList operands, + const std::function& shape_fn, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + OpList operands, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + Shape shape, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + ~TsNode() override = default; + + hash_t hash() const override; + + hash_t shapeHash() const override; + + const std::string getPythonStacktrace() const; + + // Lower is a backend-specific method since it returns a backend specific + // type. hence, it is convenient to define it differently per-backend rather + // than at Node API + virtual TSOpVector Lower( + std::shared_ptr function, + TSLoweringContext* loctx) const; + + private: + // The hash of the dag WITH size info. Used for shape caching + hash_t shape_hash_; + // The hash of the dag used to look up the compiled graph by a hash + // in this case, we will use the dag hash WITHOUT size info if dynamic shape + // is enabled and use the dag hash WITH size info otherwise. + hash_t dag_hash_; +}; + +// Note: this OpKind is separate from ltc_ops.h since it would be a circular +// import otherwise, I like leaving TensorList in this file, and I think most of +// ltc_ops special cases will be deleted anyway +const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list"); + +// TensorList represents an at::TensorList which is a vector[Tensor] but is also +// a first-class IValue and can be fed as a single input to a TS program. It is +// much easier to handle TensorLists in Lazy Tensor code if they are represented +// as a single Node so there can be more than one TensorList and more than one +// Tensor side-by-side as operands to an op. +// +// Note: shape is undefined for TensorList. We assert in some places that +// #shapes matches #outputs and this stems from +// the fact that currently all IR nodes represent tensors (there is no +// type system for this IR). Becuase of this, TensorList is a bit of a +// hack. +// +// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and +// then implement it as NotImplemented for TensorList, also fixing the assertion +// that would fail. +struct TORCH_API TensorList : public TsNode { + static OpKind ClassOpKind() { + return tensor_list_opkind; + } + + TensorList() = delete; + TensorList(OpList values); + + bool CanBeReused(OpList values) const { + return operands() == std::vector(values.begin(), values.end()); + } + + TSOpVector Lower( + std::shared_ptr function, + TSLoweringContext* loctx) const override; +}; + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h new file mode 100644 index 0000000000000000000000000000000000000000..cf46311ca24b3375f73130ed895429dc3f8dd446 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace torch { +namespace lazy { +using TSOpVector = std::vector; + +TORCH_API TSOpVector LowerTSBuiltin( + std::shared_ptr function, + c10::Symbol sym, + const std::vector& arguments, + const std::vector& kwarguments = {}); + +} // namespace lazy +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e58c8f9d874d75154c201a2383afd3e0941b58 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace torch::onnx { + +// The following constants are defined here to avoid breaking Meta's internal +// usage of ONNX which pre-dates ONNX 1.14 and thus does not support FLOAT8: +// cf. https://github.com/pytorch/pytorch/pull/106379#issuecomment-1675189340 +// -abock, 2023-08-25 +// +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FN +constexpr auto TensorProto_DataType_FLOAT8E4M3FN = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(17); +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2 +constexpr auto TensorProto_DataType_FLOAT8E5M2 = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(19); + +} // namespace torch::onnx diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h new file mode 100644 index 0000000000000000000000000000000000000000..923aca2097d32d1a6b770b60cd2c9a5b3786b8b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::onnx { + +void initONNXBindings(PyObject* module); + +} // namespace torch::onnx diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..df887844ff66564662ab4a179911180a73132c37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h @@ -0,0 +1,20 @@ +#pragma once + +namespace torch::onnx { + +enum class OperatorExportTypes { + ONNX, // Strict ONNX export + ONNX_ATEN, // ONNX With ATen op everywhere + ONNX_ATEN_FALLBACK, // ONNX export with ATen fallback + ONNX_FALLTHROUGH, // Export supported ONNX ops. Pass through unsupported ops. +}; + +enum class TrainingMode { + EVAL, // Inference mode + PRESERVE, // Preserve model state (eval/training) + TRAINING, // Training mode +}; + +constexpr char kOnnxNodeNameAttribute[] = "onnx_name"; + +} // namespace torch::onnx diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h new file mode 100644 index 0000000000000000000000000000000000000000..8349803af26956155af4e7b54896839e50887132 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +// There are some components which use these symbols. Until we migrate them +// we have to mirror them in the old autograd namespace. +namespace torch { +namespace autograd { +namespace profiler { +using torch::profiler::impl::ActivityType; +using torch::profiler::impl::getProfilerConfig; +using torch::profiler::impl::ProfilerConfig; +using torch::profiler::impl::profilerEnabled; +using torch::profiler::impl::ProfilerState; +} // namespace profiler +} // namespace autograd +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h new file mode 100644 index 0000000000000000000000000000000000000000..3678e04bfbdacd430af863f9ffae1d81713bc107 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h @@ -0,0 +1,661 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +enum class EventType : uint8_t { + TorchOp = 0, + Backend, + Vulkan, + Allocation, + OutOfMemory, + PyCall, + PyCCall, + Kineto +}; + +// ============================================================================ +// == Value (Tensor, Scalar) summary ========================================== +// ============================================================================ +struct TORCH_API RawTensorMetadataBase { + RawTensorMetadataBase() = default; + explicit RawTensorMetadataBase(const at::Tensor& t); + + StorageImplData data_; + c10::ScalarType dtype_{c10::ScalarType::Undefined}; + c10::Layout layout_{c10::Layout::Strided}; + uint32_t dim_{0}; +}; + +// Collected during profiling. +struct TORCH_API RawTensorMetadata : RawTensorMetadataBase { + RawTensorMetadata() = default; + RawTensorMetadata(const RawTensorMetadata&) = default; + RawTensorMetadata(RawTensorMetadata&&) noexcept = default; + RawTensorMetadata& operator=(const RawTensorMetadata&) = default; + RawTensorMetadata& operator=(RawTensorMetadata&&) noexcept = default; + explicit RawTensorMetadata(const at::Tensor& t); + + // Wrap `weak_self_` in `c10::optional` and split device into components to + // keep struct default constructable. (which the std::array initializer needs) + c10::optional weak_self_; + c10::DeviceType device_type_{c10::DeviceType::CPU}; + c10::DeviceIndex device_index_{-1}; +}; + +// Used during post processing. +struct TORCH_API TensorMetadata : public RawTensorMetadataBase { + TensorMetadata( + const RawTensorMetadata& r, + std::vector sizes, + std::vector strides); + + TensorImplAddress impl() const { + return weak_self_.get(); + } + + WeakTensor weak_self_; + c10::Device device_; + std::vector sizes_; + std::vector strides_; + + // Set during `calculateUniqueTensorIDs`. + c10::optional id_; + c10::optional allocation_id_; +}; + +using op_input_t = std::variant< + TensorMetadata, + std::vector, + c10::IValue, + c10::nullopt_t>; + +// ============================================================================ +// == ExtraFields ============================================================= +// ============================================================================ +template +struct ExtraFields; + +struct TorchOpBasicFields { + int64_t sequence_number_{0}; + uint64_t forward_tid_{0}; + at::RecordScope scope_{}; + bool is_async_{false}; + int64_t debug_handle_{0}; + std::string name_; + + // Set in the exit callback. + uint64_t end_tid_{0}; +}; + +using jit_stack_t = std::vector; +using jit_modules_t = std::vector; +using extra_args_t = std::unordered_map; +using extra_meta_t = std::unordered_map; + +struct FallbackPair { + ProfilerVoidEventStub device_event_start_ = nullptr; + ProfilerVoidEventStub device_event_end_ = nullptr; +}; + +template <> +struct ExtraFields : TorchOpBasicFields { + ExtraFields( + TorchOpBasicFields&& f, + uint64_t correlation_id, + c10::time_t end_time_ns, + std::vector&& inputs, + std::vector&& concrete_inputs, + jit_stack_t&& jit_stack, + jit_modules_t&& jit_modules, + extra_args_t&& extra_args, + extra_meta_t&& extra_meta, + FallbackPair&& device_fallback, + bool allow_tf32_cublas, + std::unique_ptr&& perf_event_counters) + : TorchOpBasicFields(std::move(f)), + correlation_id_{correlation_id}, + end_time_ns_{end_time_ns}, + inputs_{std::move(inputs)}, + concrete_inputs_{std::move(concrete_inputs)}, + jit_stack_{std::move(jit_stack)}, + jit_modules_{std::move(jit_modules)}, + extra_args_{std::move(extra_args)}, + extra_meta_{std::move(extra_meta)}, + device_fallback_{std::move(device_fallback)}, + allow_tf32_cublas_{allow_tf32_cublas}, + perf_event_counters_{std::move(perf_event_counters)} {} + uint64_t correlation_id_; + c10::time_t end_time_ns_; + std::vector inputs_; + std::vector concrete_inputs_; + jit_stack_t jit_stack_; + jit_modules_t jit_modules_; + extra_args_t extra_args_; + extra_meta_t extra_meta_; + FallbackPair device_fallback_; + bool allow_tf32_cublas_; + std::unique_ptr perf_event_counters_; +}; + +template <> +struct ExtraFields { + int64_t start_time_us_; + int64_t end_time_us_; + int64_t debug_handle_; + at::RecordScope scope_; + std::string name_; + std::string backend_; + jit_stack_t jit_stack_; + jit_modules_t jit_modules_; +}; + +template <> +struct ExtraFields { + using raw_event_t = std::pair; + std::string name_; + int64_t duration_ns_{0}; + // While building the event tree, we want to report a vulkan event's duration + // as 0 so that its end time doesn't exceed that of its parent cpu op + bool in_tree_building_{false}; +}; + +struct RawAllocation { + c10::approx_time_t start_time_; + void* ptr_; + int64_t alloc_size_; + size_t total_allocated_; + size_t total_reserved_; + c10::DeviceType device_type_; + c10::DeviceIndex device_index_; +}; + +// For performance. +static_assert(c10::is_pod_v, "Non-POD member of RawAllocation."); + +template <> +struct ExtraFields : RawAllocation { + ExtraFields(const RawAllocation& allocation) : RawAllocation(allocation) {} + + c10::Device device() const { + return {device_type_, device_index_}; + } + + c10::optional id_; + c10::optional allocation_id_; +}; + +template <> +struct ExtraFields { + c10::approx_time_t start_time_; + int64_t alloc_size_; + size_t total_allocated_; + size_t total_reserved_; + c10::DeviceType device_type_; + c10::DeviceIndex device_index_; +}; + +// For performance. +static_assert( + c10::is_pod_v>, + "Non-POD member of ExtraFields."); + +struct PyFrameState { + int line_no_; + at::StringView filename_; + at::StringView funcname_; +}; + +template +using strong_t = strong:: + type, strong::hashable>; + +using PyModuleSelf = strong_t; +using PyModuleCls = strong_t; +using PyMethod = strong_t; +using PyOptimizerSelf = strong_t; +using PyOptimizerCls = strong_t; + +struct NNModuleInfo { + struct ParameterInfo { + std::string name_; + TensorMetadata metadata_; + c10::optional grad_metadata_; + }; + + PyModuleSelf self_; + PyModuleCls cls_; + at::StringView cls_name_; + + std::vector parameters_; + // Indicates that `self_` is the kth instance of `cls_` observed. + size_t id_{std::numeric_limits::max()}; +}; + +struct OptimizerInfo { + struct ParameterInfo { + TensorMetadata metadata_; + c10::optional grad_metadata_; + std::vector> state_; + }; + + PyOptimizerSelf self_; + PyOptimizerCls cls_; + at::StringView cls_name_; + + std::vector parameters_; +}; + +struct PyExtraFieldsBase { + PyExtraFieldsBase( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller) + : end_time_ns_{end_time_ns}, + python_tid_{python_tid}, + caller_{std::move(caller)} {} + + c10::time_t end_time_ns_; + size_t python_tid_; + PyFrameState caller_; + + // kth python event observed. (Used by TensorBoard) + size_t id_{std::numeric_limits::max()}; +}; + +template <> +struct ExtraFields : public PyExtraFieldsBase { + struct args_t { + PyFrameState frame_state_; + c10::optional module_info_; + c10::optional optimizer_info_; + }; + + ExtraFields( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller, + args_t args) + : PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)), + callsite_{std::move(args.frame_state_)}, + module_{std::move(args.module_info_)}, + optimizer_{std::move(args.optimizer_info_)} {} + + PyFrameState callsite_; + c10::optional module_; + c10::optional optimizer_; +}; + +template <> +struct ExtraFields : public PyExtraFieldsBase { + using args_t = at::StringView; + + ExtraFields( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller, + args_t args) + : PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)), + function_name_{std::move(args)} {} + + at::StringView function_name_; +}; + +template <> +struct ExtraFields { + // Mirrors `libkineto::GenericTraceActivity::Flow`. This information is used + // during post processing to properly embed Kineto events into the broader + // profiler tree structure. End users are not generally expected to use these + // fields directly, but they are available for debugging. + struct Flow { + uint32_t id{0}; + uint32_t type{0}; + uint32_t start{0}; + }; + + std::string name_; + int64_t duration_us_{0}; + uint64_t correlation_id_{0}; + libkineto::ActivityType activity_type_; + Flow flow; + std::weak_ptr linked_activity_{}; +}; + +struct TORCH_API Result : public std::enable_shared_from_this { + template + [[nodiscard]] static std::shared_ptr create(Args... args) { + return std::shared_ptr(new Result(std::forward(args)...)); + } + + template + decltype(auto) visit(T&& visitor) { + return std::visit(std::forward(visitor), extra_fields_); + } + + template + decltype(auto) visit(T&& visitor) const { + return std::visit(std::forward(visitor), extra_fields_); + } + + template + void visit_if_base(Fn&& fn) const { + visit([&](const auto& extra_fields) { + using extra_fields_t = typename std::remove_cv_t< + typename std::remove_reference_t>; + + if constexpr (std::is_base_of_v) { + fn(extra_fields); + } + }); + } + + EventType tag() const { + return visit([](const auto& i) { return deduceTag(i); }); + } + + std::string name() const; + libkineto::ActivityType kinetoType() const; + uint64_t correlationID() const; + int64_t endTimeNS() const; + uint64_t endTID() const; + c10::DeviceType deviceType() const; + + int64_t start_time_ns_; + uint64_t start_tid_; + kineto::DeviceAndResource kineto_info_; + std::variant< + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields> + extra_fields_; + + std::weak_ptr parent_; + std::vector> children_; + bool finished_{false}; + + const torch::profiler::impl::kineto::activity_t* kineto_activity_{nullptr}; + + private: + template + Result( + int64_t start_time_ns, + uint64_t start_tid, + kineto::DeviceAndResource kineto_info, + ExtraFields&& extra_fields) + : start_time_ns_{start_time_ns}, + start_tid_{start_tid}, + kineto_info_{kineto_info}, + extra_fields_{std::move(extra_fields)} {} + + template + static EventType deduceTag(const ExtraFields&) { + return E; + } +}; + +struct KinetoObserverContext : public at::ObserverContext { + struct Event { + TorchOpBasicFields basic_fields_; + c10::approx_time_t start_time_; + + // Set in the exit callback. + c10::approx_time_t end_time_{ + std::numeric_limits::min()}; + + bool allow_tf32_cublas_; + std::unique_ptr counters_; + }; + + explicit KinetoObserverContext(Event* event) : event_{event} {} + + Event* event_; + FallbackPair* fallback_{nullptr}; +}; + +constexpr int IO_ENCODER_DEFAULT_BLOCK_SIZE = 1024; + +constexpr int SCALAR_LIST_LENGTH_LIMIT = 30; + +// InputOutputEncoder +// Stores each op_events' shapes and dtypes, and concrete values into a +// contiguous AppendOnlyList so that we no longer create vectors for shapes +// and dtypes on every op. Those vectors can be created during +// post-processing. +// It splits the data into two categories: input shapes and concrete inputs. +class InputOutputEncoder final { + public: + void push(c10::ArrayRef values); + + // Used during post-processing to unpack the encoded data. + // Each method returns a "supplier" lambda which takes no arguments; + // invoking the lambda once will return a list of args that represent + // the inputs for one op. + // The data is split into two streams: "input shapes" and "concrete inputs". + // Note: "auto" only works because these are only used in collection.cpp, + // where they are implemented. + auto getInputShapeGenerator(); + auto getConcreteInputGenerator(); + + bool isSupportedScalarList(const c10::IValue& list_candidate); + + void clear(); + + enum class Tag { + Tensor = 0, + UndefinedTensor, + TensorListBegin, // TODO: generalize to other lists. + ScalarList, + Scalar, + Other, + TERMINATOR + }; + + enum class IOType { Shapes, ConcreteInputs, None }; + + private: + void push(const at::Tensor& t); + + // Implementation detail for getInputShapeGenerator and + // getConcreteInputGenerator + auto getIValueGenerator(const IOType& io_type); + + AppendOnlyList tags_; + AppendOnlyList + tensor_metadata_; + AppendOnlyList tensor_sizes_strides_; + AppendOnlyList ivalues_; +}; + +using perf_profiler_t = torch::profiler::impl::linux_perf::PerfProfiler; + +class TORCH_API ThreadLocalSubqueue { + public: + ThreadLocalSubqueue(const uint64_t tid, ProfilerConfig config); + + std::unique_ptr begin_op(const at::RecordFunction& fn); + + template + void emplace_backend_event(Args&&... args) { + backend_events_.emplace_back(std::forward(args)...); + } + + template + void emplace_vulkan_event(Args&&... args) { + vulkan_events_.emplace_back(std::forward(args)...); + } + + template + void emplace_allocation_event(Args&&... args) { + allocations_.emplace_back(std::forward(args)...); + } + + template + void emplace_ooms_event(Args&&... args) { + ooms_.emplace_back(std::forward(args)...); + } + + template + void emplace_py_call(Args&&... args) { + py_calls_.emplace_back(std::forward(args)...); + } + + uint64_t tid() const { + return tid_; + } + + const kineto::DeviceAndResource& kineto_info() const { + return kineto_info_; + } + + inline void disable_perf_profiler(perf_counters_t& counters) const { + perf_profiler_->Disable(counters); + } + + private: + uint64_t tid_; + ProfilerConfig config_; + kineto::DeviceAndResource kineto_info_; + std::unique_ptr perf_profiler_; + + friend class RecordQueue; + // See `containers.h` for block size benchmarks. + static constexpr size_t BlockSize = 512; + + struct TorchOpStorage { + // NB: This is a destructive operation. + void materialize( + std::vector>& out, + const std::function& time_converter, + const uint64_t tid, + const kineto::DeviceAndResource& kineto_info); + + template + class EventBlock : public std::array { + public: + EventBlock(); + uint64_t correlation_id(const T* ptr) const; + + private: + uint64_t id_start_; + }; + + using event_t = KinetoObserverContext::Event; + class OpList : public AppendOnlyList { + public: + template + std::pair emplace_back(Args&&... args); + static uint64_t correlationID(const OpList::Iterator& e); + } op_events_; + + // report_input_shapes + InputOutputEncoder inputs_outputs_; + + // with_stack (JIT) + AppendOnlyList jit_stack_; + + // with_modules + AppendOnlyList jit_modules_; + + // with_flops + AppendOnlyList extra_args_; + + // report extra metadata, i.e. collective communication meta + AppendOnlyList extra_meta_; + + // ProfilerState::KINETO_GPU_FALLBACK or + // ProfilerState::KINETO_PRIVATEUSE1_FALLBACK + AppendOnlyList device_fallback_; + } torch_ops_; + + // reportBackendEventToActiveKinetoProfiler + AppendOnlyList, BlockSize> backend_events_; + + // _reportVulkanEventToProfiler + AppendOnlyList::raw_event_t, BlockSize> + vulkan_events_; + + // reportMemoryUsage + AppendOnlyList allocations_; + + // reportOOMs + AppendOnlyList, BlockSize> ooms_; + + // with_stack (Python) + AppendOnlyList< + std::pair, + BlockSize> + py_calls_; +}; + +class TORCH_API RecordQueue { + public: + RecordQueue(ProfilerConfig config, std::set activities); + + bool tracePython() const; + ThreadLocalSubqueue* getSubqueue(); + void stop(); + + // NB: This is a destructive operation. + std::pair< + std::vector>, + std::unique_ptr> + getRecords( + std::function time_converter, + uint64_t start_time_us, + uint64_t end_time_us); + + private: + uint32_t id_; + ProfilerConfig config_; + std::set activities_; + ska::flat_hash_map> + sub_queues_; + std::mutex sub_queue_mutex_; + std::unique_ptr python_tracer_; +}; + +TORCH_API bool get_record_concrete_inputs_enabled(); +TORCH_API void set_record_concrete_inputs_enabled_fn(std::function); +TORCH_API void set_record_concrete_inputs_enabled_val(bool); + +TORCH_API bool get_fwd_bwd_enabled(); +TORCH_API void set_fwd_bwd_enabled_fn(std::function); +TORCH_API void set_fwd_bwd_enabled_val(bool); + +TORCH_API bool get_cuda_sync_enabled(); +TORCH_API void set_cuda_sync_enabled_fn(std::function); +TORCH_API void set_cuda_sync_enabled_val(bool); + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h new file mode 100644 index 0000000000000000000000000000000000000000..04c96bb4281323a7cc1861411c2258333852f1a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include + +namespace torch { + +// struct that holds the result of symbolizing multiple tracebacks +// each traceback is a list of indices into all_frames +// (lots of Frames get duplicated across traces) +struct TORCH_API SymbolizedTracebacks { + std::vector all_frames; + // index into all_frames, so that + // it is possible to dedupe frame objects in + // construction of python objects + std::vector> tracebacks; +}; + +struct TORCH_API CapturedTraceback : public c10::GatheredContext { + struct PyFrame { + void* code; // PyCodeObject*, but python headers not present + int lasti; + }; + + static std::shared_ptr gather( + bool python, + bool script, + bool cpp); + CapturedTraceback() = default; + CapturedTraceback(const CapturedTraceback&) = delete; + CapturedTraceback& operator=(const CapturedTraceback&) = delete; + ~CapturedTraceback() override; + + using visitproc = int (*)(void* self, void* arg); + + struct Python { + virtual std::vector gather() = 0; + virtual void release(std::vector& frames) = 0; + virtual void appendSymbolized( + const std::vector& to_symbolize, + SymbolizedTracebacks& st) = 0; + // tp_traverse/tp_clear implementations + virtual int traverse( + std::vector& frames, + visitproc visit, + void* arg) = 0; + virtual int clear(std::vector& frames) = 0; + virtual ~Python() = default; + Python* next_ = nullptr; + }; + // called once by each python interpreter to + // register python stack recording functionality + // p cannot be deleted once added. + static void addPythonUnwinder(Python* p); + + int traversePython(visitproc visit, void* arg); + int clearPython(); + + private: + std::vector frames_; + std::vector cpp_frames_; + std::vector script_frames_; + friend TORCH_API SymbolizedTracebacks + symbolize(const std::vector& to_symbolize); + + // non-owning reference to one of the immortal Python* objects + // registered above. + Python* python_ = nullptr; +}; + +TORCH_API SymbolizedTracebacks +symbolize(const std::vector& to_symbolize); + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h new file mode 100644 index 0000000000000000000000000000000000000000..3de4930ad985709cc702ade8fac3dfe2082749c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h @@ -0,0 +1,206 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +// ============================================================================ +// == AppendOnlyList ========================================================== +// ============================================================================ +// During profiling, we have a very predictable access pattern: we only +// append to the end of the container. We can specialize and outperform both +// std::vector (which must realloc) and std::deque (which performs a double +// indirection), and this class of operation is sufficiently important to the +// profiling hot path to warrant specializing: +// https://godbolt.org/z/rTjozf1c4 +// https://quick-bench.com/q/mmfuu71ogwaiULDCJyHdKnHZms4 (Prototype #1, +// int) https://quick-bench.com/q/5vWDW6jjdXVdoffev2zst8D09no (Prototype +// #1, int pair) https://quick-bench.com/q/IfEkfAQMeJSNBA52xtMP6Agcl-Q +// (Prototype #2, int pair) +// https://quick-bench.com/q/wJV2lKmuXL4XyGJzcI5hs4gEHFg (Prototype #3, int +// pair) https://quick-bench.com/q/xiO8ZaBEkYRYUA9dFrMuPLlW9fo (Full impl, +// int pair) +// AppendOnlyList has 2x lower emplace overhead compared to more generic STL +// containers. +// +// The optimal value of `ChunkSize` will vary by use case, but testing shows +// that a value of 1024 does a good job amortizing the `malloc` cost of growth. +// Performance drops off for larger values, so testing on a case-by-case basis +// is recommended if performance is absolutely critical. + +template < + typename T, + size_t ChunkSize, + template class block_t = std::array> +class AppendOnlyList { + public: + using array_t = block_t; + static_assert( + std::is_base_of_v, array_t>, + "AppendOnlyList expects raw low level pointer storage."); + static_assert(ChunkSize > 0, "Block cannot be empty."); + + AppendOnlyList() : buffer_last_{buffer_.before_begin()} {} + AppendOnlyList(const AppendOnlyList&) = delete; + AppendOnlyList& operator=(const AppendOnlyList&) = delete; + + size_t size() const { + return n_blocks_ * ChunkSize - (size_t)(end_ - next_); + } + + template + T* emplace_back(Args&&... args) { + maybe_grow(); + if constexpr ( + std::is_trivially_destructible_v && + std::is_trivially_destructible_v) { + ::new ((void*)next_) T{std::forward(args)...}; + } else { + *next_ = T{std::forward(args)...}; + } + return next_++; + } + + template + typename std::enable_if< + std::is_same::value && std::is_trivially_copyable::value>::type + copy(c10::ArrayRef src) { + size_t n = src.size(); + if (C10_UNLIKELY(n == 0)) { + return; + } + maybe_grow(); + if (C10_LIKELY(next_ && (next_ + n <= end_))) { + std::memcpy((void*)next_, (void*)src.begin(), n * sizeof(T0)); + next_ += n; + } else { + // We could chunk this into several `memcpy`s, but because we expect this + // fallback to be infrequent (n << ChunkSize) the performance impact is + // negligible. + for (auto i : src) { + emplace_back(i); + } + } + } + + void clear() { + buffer_.clear(); + buffer_last_ = buffer_.before_begin(); + n_blocks_ = 0; + next_ = nullptr; + end_ = nullptr; + } + + struct Iterator { + using iterator_category = std::forward_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = T; + using pointer = T*; + using reference = T&; + + Iterator(std::forward_list& buffer, const size_t size) + : block_{buffer.begin()}, size_{size} {} + + // End iterator. + Iterator() = default; + + bool exhausted() const { + return current_ >= size_; + } + + reference operator*() const { + return *current_ptr(/*checked=*/true); + } + pointer operator->() { + return current_ptr(/*checked=*/true); + } + + // Prefix increment + Iterator& operator++() { + if (!(++current_ % ChunkSize)) { + block_++; + } + return *this; + } + + // Postfix increment + Iterator operator++(int) { + Iterator tmp = *this; + ++(*this); + return tmp; + } + + friend bool operator==(const Iterator& a, const Iterator& b) { + return a.current_ptr() == b.current_ptr(); + } + friend bool operator!=(const Iterator& a, const Iterator& b) { + return a.current_ptr() != b.current_ptr(); + } + + std::pair address() const { + if (current_ >= size_) { + return {nullptr, 0}; + } + return {&(*block_), current_ % ChunkSize}; + } + + private: + T* current_ptr(bool checked = false) const { + auto a = address(); + if (a.first == nullptr) { + TORCH_INTERNAL_ASSERT(!checked, "Invalid access on AppendOnlyList."); + return nullptr; + } + return a.first->data() + a.second; + } + + typename std::forward_list::iterator block_; + size_t current_{0}; + size_t size_{0}; + }; + + Iterator begin() { + return Iterator(buffer_, size()); + } + Iterator end() { + return Iterator(); + } + // TODO: cbegin and cend() + + private: + void maybe_grow() { + if (C10_UNLIKELY(next_ == end_)) { + buffer_last_ = buffer_.emplace_after(buffer_last_); + n_blocks_++; + next_ = buffer_last_->data(); + end_ = next_ + ChunkSize; + } + } + + std::forward_list buffer_; + + // We maintain a pointer to the last element of `buffer_` so that we can + // insert at the end in O(1) time. + size_t n_blocks_{0}; + T* next_{nullptr}; + T* end_{nullptr}; + + protected: + typename std::forward_list::iterator buffer_last_; +}; + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..535dcbc8b2ef9b053f7b32bc8ea238d30fa47173 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h @@ -0,0 +1,94 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +// Identity is a complex concept in PyTorch. A Tensor might not have a +// an associated storage, multiple Tensors might share the same underlying +// storage, the storage of a Tensor might change over time, etc. +// +// For the purpose of profiling we're mostly interested in data flow +// analysis. As a result, we can take an expansive view of identity: +// Tensors share an ID if they share a TensorImpl or storage data. +// +// This identity equality is transitive; If Tensors T0 and T1 share a storage +// S0 and T1 later points to a different storage S1 then all Tensors which +// point to either S0 or S1 are considered to have the same identity. (Since +// profiler cannot reason beyond that.) +// +// The profiler will handle lifetime analysis to ensure that identities do +// not run afoul of the ABA problem. This does, however, mean that identities +// can only be assigned when memory profiling is enabled. +using TensorID = strong::type; + +// Uniquely identifies an allocation. (Generally a StorageImpl's data ptr.) +using AllocationID = strong::type< + size_t, + struct StorageID_, + strong::ordered, + strong::regular, + strong::hashable>; + +// We use a Tensor's TensorImpl adress and StorageImpl data start to build the +// data flow graph. We do not hold an owning reference so we wrap them in strong +// types to prevent direct access. +using TensorImplAddress = strong::type< + const c10::TensorImpl*, + struct TensorImplAddress_, + strong::regular, + strong::hashable, + strong::boolean>; + +using StorageImplData = strong::type< + const void*, + struct StorageImplData_, + strong::regular, + strong::hashable, + strong::boolean>; + +// ============================================================================ +// == weak_intrusive_ptr and the ABA problem for TensorImpl* ================== +// ============================================================================ +// Tracking `TensorImpl`s is an important part of identity tracking, because +// a Tensor might change storage; however when it does we want to retain the +// fact that the old and new storage belong to the same logical Tensor. We +// cannot take an owning reference to the Tensor because that would change +// program semantics by extending the lifetime of the Tensor. However if we +// store a raw TensorImpl* pointer the TensorImpl might be deleted and a new +// TensorImpl might be created that reuses the address. (ABA problem) +// +// Fortunately, there is a feature of `c10::intrusive_ptr` that we can use to +// prevent address reuse for the duration of profiling: the weak intrusive ptr. +// When a Tensor's refcount reaches zero but there are outstanding weak +// references (`weakcount_ > 0`) it will free the underlying managed resources +// by calling `target_->release_resources()`, but it will not call `delete`. +// (Instead, `delete` is called when the last weak reference is destroyed.) +// This means that we can safely use address identity to track `TensorImpls`. +class WeakTensor { + public: + explicit WeakTensor(const at::Tensor& t) : weak_self_(t.getIntrusivePtr()) {} + + auto get() const { + return TensorImplAddress{weak_self_._unsafe_get_target()}; + } + + private: + c10::weak_intrusive_ptr weak_self_; +}; + +struct Result; + +void calculateUniqueTensorIDs( + std::vector>& sorted_results); + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h new file mode 100644 index 0000000000000000000000000000000000000000..42642f2afa6c7e6b6fc256fd981cb9c174d1d0fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace profiler { + +/* A vector type to hold a list of performance counters */ +using perf_counters_t = std::vector; + +/* Standard list of performance events independent of hardware or backend */ +constexpr std::array ProfilerPerfEvents = { + /* + * Number of Processing Elelement (PE) cycles between two points of interest + * in time. This should correlate positively with wall-time. Measured in + * uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs + * participating (i.e. threadpool). + */ + "cycles", + + /* Number of PE instructions between two points of interest in time. This + * should correlate positively with wall time and the amount of computation + * (i.e. work). Across repeat executions, the number of instructions should + * be more or less invariant. Measured in uint64_t. PE can be non cpu. + */ + "instructions"}; +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..e92cbf003d6a1c664a9699ac4126b595853747ea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h @@ -0,0 +1,151 @@ +#pragma once + +#include +#include + +// Skip Kineto dependency on mobile unless explicitly asked for. +// When is it explicitly asked for? +// KinetoEdgeCPUProfiler uses KinetoProfiler for cpu +// event profiling. This has a dependency on cpu only libkineto +#if defined(USE_KINETO) && defined(C10_MOBILE) && \ + !defined(EDGE_PROFILER_USE_KINETO) +#undef USE_KINETO +#endif + +#include + +#include +#include + +#ifdef USE_KINETO +// Forward declarations so we don't have to include `libkineto.h` in a header. +namespace libkineto { +class GenericTraceActivity; +struct CpuTraceBuffer; +class ActivityTraceInterface; +} // namespace libkineto +#endif + +namespace torch { +namespace profiler { + +#ifdef USE_KINETO +constexpr bool kKinetoAvailable{true}; +#else +constexpr bool kKinetoAvailable{false}; +#endif + +namespace impl { +namespace kineto { + +// ---------------------------------------------------------------------------- +// -- Interface (Does not require Kineto) ------------------------------------- +// ---------------------------------------------------------------------------- +struct DeviceAndResource { + int32_t device; + int32_t resource; +}; +const DeviceAndResource kineto_ids(); + +#ifdef USE_KINETO +using trace_t = libkineto::CpuTraceBuffer; +using interface_trace_t = libkineto::ActivityTraceInterface; +using activity_t = libkineto::GenericTraceActivity; +#else +struct DummyTraceBuffer {}; +struct DummyTraceInterface {}; + +using trace_t = DummyTraceBuffer; +using interface_trace_t = DummyTraceBuffer; +struct activity_t; +#endif // USE_KINETO + +void addMetadata( + activity_t* activity, + const std::string& key, + const std::string& value); + +// Wraps: libkineto::CpuTraceBuffer +struct TraceWrapper { + TraceWrapper(const int64_t start_time, const std::string& name); + TraceWrapper(TraceWrapper&&) = default; + TraceWrapper(const TraceWrapper&) = delete; + ~TraceWrapper(); + + // The caller is expected to hold a mutex when calling `addCPUActivity`. + activity_t* addCPUActivity( + const std::string& name, + const libkineto::ActivityType type, + const DeviceAndResource device_and_resource, + const uint64_t correlation_id, + const int64_t start_time, + const int64_t end_time); + + void transferCpuTrace(int64_t end_time); + + explicit operator bool() const; + + std::unique_ptr& get() { + return cpu_trace_; + } + + private: + std::unique_ptr cpu_trace_; +}; + +// Wraps libkineto::ActivityTraceInterface +struct ActivityTraceWrapper { + explicit ActivityTraceWrapper(std::unique_ptr&& trace); + ActivityTraceWrapper() = default; + ActivityTraceWrapper(ActivityTraceWrapper&&) = default; + ActivityTraceWrapper(const ActivityTraceWrapper&) = delete; + explicit operator bool() const; + void save(const std::string& path); + + const std::unique_ptr& get() { + return trace_; + } + + private: + std::unique_ptr trace_; +#ifdef USE_KINETO + bool saved_ = false; // Kineto's save is destructive +#endif +}; + +using ActivitySet = std::set; +void prepareTrace( + const bool cpuOnly, + const ActivitySet& activities, + const torch::profiler::impl::ExperimentalConfig& config); +void startTrace(); +ActivityTraceWrapper stopTrace(); +void pushCorrelationId(uint64_t correlation_id); +void pushUserCorrelationId(uint64_t correlation_id); +void popCorrelationId(); +void popUserCorrelationId(); +void recordThreadInfo(); + +void logInvariantViolation( + const std::string& assertion, + const std::string& error, + const std::string& profile_id, + const std::string& group_profile_id); + +} // namespace kineto +} // namespace impl +} // namespace profiler + +namespace autograd { +namespace profiler { +c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type); + +TORCH_API void addMetadataJson( + const std::string& key, + const std::string& value); + +TORCH_API void profilerStep(); + +} // namespace profiler +} // namespace autograd +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/observer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/observer.h new file mode 100644 index 0000000000000000000000000000000000000000..da675e0f3dae8690f16957d1ecc2b0f245b40175 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/observer.h @@ -0,0 +1,157 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace profiler { +namespace impl { + +// ---------------------------------------------------------------------------- +// -- Profiler Config --------------------------------------------------------- +// ---------------------------------------------------------------------------- +enum class C10_API_ENUM ActivityType { + CPU = 0, + XPU, // XPU kernels, runtime + CUDA, // CUDA kernels, runtime + MTIA, // MTIA kernels, runtime + NUM_KINETO_ACTIVITIES, // must be the last one +}; + +enum class C10_API_ENUM ProfilerState { + Disabled = 0, + CPU, // CPU-only profiling + CUDA, // CPU + CUDA events + NVTX, // only emit NVTX markers + ITT, // only emit ITT markers + KINETO, // use libkineto + KINETO_GPU_FALLBACK, // use CUDA events when CUPTI is not available + KINETO_PRIVATEUSE1_FALLBACK, // use PrivateUse1 events + KINETO_ONDEMAND, // run the profiler in on-demand mode + NUM_PROFILER_STATES, // must be the last one +}; + +enum class C10_API_ENUM ActiveProfilerType { + NONE = 0, + LEGACY, + KINETO, + NVTX, + ITT +}; + +struct TORCH_API ExperimentalConfig { + ExperimentalConfig( + std::vector profiler_metrics = {}, + bool profiler_measure_per_kernel = false, + bool verbose = false, + std::vector performance_events = {}, + bool enable_cuda_sync_events = false, + bool adjust_timestamps = false); + explicit operator bool() const; + + std::vector profiler_metrics; + bool profiler_measure_per_kernel; + bool verbose; + /* + * List of performance events to be profiled. + * An empty list will disable performance event based profiling altogether. + */ + std::vector performance_events; + /* + * For CUDA profiling mode, enable adding CUDA synchronization events + * that expose CUDA device, stream and event synchronization activities. + * This feature is new and currently disabled by default. + */ + bool enable_cuda_sync_events; + /* + * Controls whether or not timestamp adjustment occurs after profiling. + * The purpose of this is to adjust Vulkan event timelines to align with those + * of their parent CPU events. + * This sometimes requires increasing CPU event durations (to fully contain + * their child events) and delaying CPU event start times (to + * prevent overlaps), so this should not be used unless Vulkan events are + * being profiled and it is ok to use this modified timestamp/duration + * information instead of the original information. + */ + bool adjust_timestamps; +}; + +struct TORCH_API ProfilerConfig { + ProfilerConfig( + ProfilerState state, + bool report_input_shapes = false, + bool profile_memory = false, + bool with_stack = false, + bool with_flops = false, + bool with_modules = false, + ExperimentalConfig experimental_config = ExperimentalConfig()); + + bool disabled() const; + bool global() const; + + ProfilerState state; + ExperimentalConfig experimental_config; + bool report_input_shapes; + bool profile_memory; + bool with_stack; + bool with_flops; + bool with_modules; + + // For serialization + at::IValue toIValue() const; + static ProfilerConfig fromIValue(const at::IValue& profilerConfigIValue); +}; + +// ---------------------------------------------------------------------------- +// -- Profiler base class ----------------------------------------------------- +// ---------------------------------------------------------------------------- +struct TORCH_API ProfilerStateBase : public c10::MemoryReportingInfoBase { + explicit ProfilerStateBase(ProfilerConfig config); + ~ProfilerStateBase() override; + + static ProfilerStateBase* get(bool global); + static ProfilerStateBase* get() { + auto* out = get(/*global=*/true); + return out ? out : get(/*global=*/false); + } + + static void push(std::shared_ptr&& state); + + static std::shared_ptr pop(bool global); + static std::shared_ptr pop() { + auto out = pop(/*global=*/true); + return out ? std::move(out) : pop(/*global=*/false); + } + + const ProfilerConfig& config() const { + return config_; + } + + void setCallbackHandle(at::CallbackHandle handle); + void removeCallback(); + + bool memoryProfilingEnabled() const override { + return config_.profile_memory; + } + + virtual ActiveProfilerType profilerType() = 0; + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::mutex state_mutex_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + ProfilerConfig config_ = ProfilerConfig(ProfilerState::Disabled); + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + at::CallbackHandle handle_ = 0; +}; + +// Note: The following are only for the active *thread local* profiler. +TORCH_API bool profilerEnabled(); +TORCH_API ActiveProfilerType profilerType(); +TORCH_API ProfilerConfig getProfilerConfig(); + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/python_tracer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/python_tracer.h new file mode 100644 index 0000000000000000000000000000000000000000..f05aefecfe16947abd7d1f3645f084cc1fc68f78 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/python_tracer.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +class RecordQueue; +struct Result; +namespace python_tracer { + +using TraceKey = strong::type< + uint64_t, + struct TraceKey_, + strong::regular, + strong::hashable, + strong::ostreamable>; + +struct CompressedEvent { + TraceKey key_; + uint64_t system_tid_; + kineto::DeviceAndResource kineto_info_; + c10::time_t enter_t_; +}; + +/* +Libtorch does not depend on Python (e.g. cannot #include ); however +when we call the profiler from libtorch_python we need the profiler to be able +to ingest the data that we collect from the Python tracer. (`PyEval_SetProfile`) + +In order to solve this dependency issue we define a virtual base and a function +to register a getter. The python tracer then implements these functions and +exposes itself by calling `registerTracer` from `torch/csrc/autograd/init.cpp`. +This pattern of registration for faux python dependencies in libtorch is common +in the PyTorch codebase. +*/ +struct TORCH_API PythonTracerBase { + static std::unique_ptr make(RecordQueue* queue); + virtual ~PythonTracerBase() = default; + + virtual void stop() = 0; + virtual std::vector> getEvents( + std::function time_converter, + std::vector& enters, + c10::time_t end_time_ns) = 0; +}; + +using MakeFn = std::unique_ptr (*)(RecordQueue*); +TORCH_API void registerTracer(MakeFn make_tracer); +} // namespace python_tracer +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/vulkan.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/vulkan.h new file mode 100644 index 0000000000000000000000000000000000000000..2b11d5a0e21e5ed83fe4cf8ca1e042325d47073b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/orchestration/vulkan.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { +namespace vulkan { + +// Using function pointer i.e. [std::tuple (*)(int64_t)] +// doesn't work because we need to capture the QueryPool in the lambda context +// https://stackoverflow.com/a/28746827 +using GetShaderNameAndDurationNsFn = + std::function(int64_t)>; +TORCH_API void registerGetShaderNameAndDurationNs( + GetShaderNameAndDurationNsFn get_shader_name_and_duration_ns); + +TORCH_API void deregisterGetShaderNameAndDurationNs(); + +std::tuple getShaderNameAndDurationNs( + const vulkan_id_t& vulkan_id); + +} // namespace vulkan +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..0dfa45ac6f2be0ac7106d232262f96161961ce0d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h @@ -0,0 +1,72 @@ +#pragma once + +#if defined(__ANDROID__) || defined(__linux__) + +#include + +#include +#include + +#include + +#endif /* __ANDROID__ || __linux__ */ + +#include + +namespace torch { +namespace profiler { +namespace impl { +namespace linux_perf { + +/* + * PerfEvent + * --------- + */ + +inline void PerfEvent::Disable() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_DISABLE, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +inline void PerfEvent::Enable() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_ENABLE, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +inline void PerfEvent::Reset() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_RESET, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +/* + * PerfProfiler + * ------------ + */ + +inline uint64_t PerfProfiler::CalcDelta(uint64_t start, uint64_t end) const { + if (end < start) { // overflow + return end + (std::numeric_limits::max() - start); + } + // not possible to wrap around start for a 64b cycle counter + return end - start; +} + +inline void PerfProfiler::StartCounting() const { + for (auto& e : events_) { + e.Enable(); + } +} + +inline void PerfProfiler::StopCounting() const { + for (auto& e : events_) { + e.Disable(); + } +} + +} // namespace linux_perf +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf.h new file mode 100644 index 0000000000000000000000000000000000000000..9d5d00cc67d1fdc499646631612499d37b58c4ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace torch { +namespace profiler { +namespace impl { +namespace linux_perf { + +/* + * Maximum number of events supported + * This stems from the hardware limitation on CPU performance counters, and the + * fact that we don't support time multiplexing just yet. + * Time multiplexing involves scaling the counter values proportional to + * the enabled and running time or running the workload multiple times. + */ +constexpr uint8_t MAX_EVENTS = 4; + +struct PerfCounter { + uint64_t value; /* The value of the event */ + uint64_t time_enabled; /* for TIME_ENABLED */ + uint64_t time_running; /* for TIME_RUNNING */ +}; + +/* + * Basic perf event handler for Android and Linux + */ +class PerfEvent { + public: + explicit PerfEvent(std::string& name) : name_(name) {} + + PerfEvent& operator=(PerfEvent&& other) noexcept { + if (this != &other) { + fd_ = other.fd_; + other.fd_ = -1; + name_ = std::move(other.name_); + } + return *this; + } + + PerfEvent(PerfEvent&& other) noexcept { + *this = std::move(other); + } + + ~PerfEvent(); + + /* Setup perf events with the Linux Kernel, attaches perf to this process + * using perf_event_open(2) */ + void Init(); + + /* Stop incrementing hardware counters for this event */ + void Disable() const; + + /* Start counting hardware event from this point on */ + void Enable() const; + + /* Zero out the counts for this event */ + void Reset() const; + + /* Returns PerfCounter values for this event from kernel, on non supported + * platforms this always returns zero */ + uint64_t ReadCounter() const; + + private: + /* Name of the event */ + std::string name_; + + int fd_ = -1; +}; + +class PerfProfiler { + public: + /* Configure all the events and track them as individual PerfEvent */ + void Configure(std::vector& event_names); + + /* Enable events counting from here */ + void Enable(); + + /* Disable counting and fill in the caller supplied container with delta + * calculated from the start count values since last Enable() */ + void Disable(perf_counters_t&); + + private: + uint64_t CalcDelta(uint64_t start, uint64_t end) const; + void StartCounting() const; + void StopCounting() const; + + std::vector events_; + std::stack start_values_; +}; +} // namespace linux_perf +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/stubs/base.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/stubs/base.h new file mode 100644 index 0000000000000000000000000000000000000000..bac3f5ed3787bb0c19bd89c5af09597070dd1015 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/stubs/base.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include + +#include +#include + +struct CUevent_st; + +namespace torch { +namespace profiler { +namespace impl { + +// ---------------------------------------------------------------------------- +// -- Annotation -------------------------------------------------------------- +// ---------------------------------------------------------------------------- +using ProfilerEventStub = std::shared_ptr; +using ProfilerVoidEventStub = std::shared_ptr; + +struct TORCH_API ProfilerStubs { + virtual void record( + int* device, + ProfilerVoidEventStub* event, + int64_t* cpu_ns) const = 0; + virtual float elapsed( + const ProfilerVoidEventStub* event, + const ProfilerVoidEventStub* event2) const = 0; + virtual void mark(const char* name) const = 0; + virtual void rangePush(const char* name) const = 0; + virtual void rangePop() const = 0; + virtual bool enabled() const { + return false; + } + virtual void onEachDevice(std::function op) const = 0; + virtual void synchronize() const = 0; + virtual ~ProfilerStubs(); +}; + +TORCH_API void registerCUDAMethods(ProfilerStubs* stubs); +TORCH_API const ProfilerStubs* cudaStubs(); +TORCH_API void registerITTMethods(ProfilerStubs* stubs); +TORCH_API const ProfilerStubs* ittStubs(); +TORCH_API void registerPrivateUse1Methods(ProfilerStubs* stubs); +TORCH_API const ProfilerStubs* privateuse1Stubs(); + +using vulkan_id_t = strong::type< + int64_t, + struct _VulkanID, + strong::regular, + strong::convertible_to, + strong::hashable>; + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/action.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/action.h new file mode 100644 index 0000000000000000000000000000000000000000..e1ed407384fc900750404203c16fbfa8b41af8d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/action.h @@ -0,0 +1,55 @@ +#pragma once +#include +#include + +enum { + A_UNDEFINED = 0x0, + A_REG_PLUS_DATA = 0x1, // exp = REG[reg] + data0 + A_LOAD_CFA_OFFSET = 0x2, // exp = *(cfa + data0) + A_REG_PLUS_DATA_DEREF = 0x3 // exp = *(REG[reg] + data0) +}; + +// register numbers in dwarf info +enum { + D_UNDEFINED = -1, + D_RBP = 6, + D_RSP = 7, + D_RIP = 16, + D_REG_SIZE = 17, +}; + +struct Action { + uint8_t kind = A_UNDEFINED; + int32_t reg = -1; + int64_t data = 0; + static Action undefined() { + return Action{A_UNDEFINED}; + } + static Action regPlusData(int32_t reg, int64_t offset) { + return Action{A_REG_PLUS_DATA, reg, offset}; + } + static Action regPlusDataDeref(int32_t reg, int64_t offset) { + return Action{A_REG_PLUS_DATA_DEREF, reg, offset}; + } + static Action loadCfaOffset(int64_t offset) { + return Action{A_LOAD_CFA_OFFSET, D_UNDEFINED, offset}; + } + + friend std::ostream& operator<<(std::ostream& out, const Action& self) { + switch (self.kind) { + case A_UNDEFINED: + out << "u"; + break; + case A_REG_PLUS_DATA: + out << "r" << (int)self.reg << " + " << self.data; + break; + case A_REG_PLUS_DATA_DEREF: + out << "*(r" << (int)self.reg << " + " << self.data << ")"; + break; + case A_LOAD_CFA_OFFSET: + out << "*(cfa + " << self.data << ")"; + break; + } + return out; + } +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/communicate.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/communicate.h new file mode 100644 index 0000000000000000000000000000000000000000..79c27eaeba7faee58662bcd002326bf65ccb1efc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/communicate.h @@ -0,0 +1,65 @@ +#pragma once +#include +#include +#include +#include +#include + +// helper to open a process with stdin/stdout/stderr streams. +struct Communicate { + Communicate(const char* command, const char** args) { + if (pipe(inpipe_) < 0 || pipe(outpipe_) < 0 || pipe(errpipe_) < 0) { + throw UnwindError("pipe() failed"); + } + pid_t pid = fork(); + if (pid < 0) { + throw UnwindError("fork() failed"); + } else if (pid == 0) { // child process + close(inpipe_[1]); + close(outpipe_[0]); + close(errpipe_[0]); + + dup2(inpipe_[0], STDIN_FILENO); + dup2(outpipe_[1], STDOUT_FILENO); + dup2(errpipe_[1], STDERR_FILENO); + execvp(command, (char* const*)args); + throw UnwindError("failed execvp"); + } else { // parent process + close(inpipe_[0]); + close(outpipe_[1]); + close(errpipe_[1]); + outbuf_.reset( + new __gnu_cxx::stdio_filebuf(inpipe_[1], std::ios::out)); + inbuf_.reset( + new __gnu_cxx::stdio_filebuf(outpipe_[0], std::ios::in)); + errbuf_.reset( + new __gnu_cxx::stdio_filebuf(errpipe_[0], std::ios::in)); + in_.reset(new std::istream(inbuf_.get())); + out_.reset(new std::ostream(outbuf_.get())); + err_.reset(new std::ostream(errbuf_.get())); + } + } + ~Communicate() { + close(inpipe_[1]); + close(outpipe_[0]); + close(errpipe_[0]); + } + std::ostream& out() { + return *out_; + } + std::ostream& err() { + return *err_; + } + std::istream& in() { + return *in_; + } + + private: + int inpipe_[2]; + int outpipe_[2]; + int errpipe_[2]; + std::unique_ptr<__gnu_cxx::stdio_filebuf> outbuf_, inbuf_, errbuf_; + std::unique_ptr in_; + std::unique_ptr out_; + std::unique_ptr err_; +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/dwarf_enums.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/dwarf_enums.h new file mode 100644 index 0000000000000000000000000000000000000000..91af24b34e1f97b02bedba2e59397495c5213719 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/dwarf_enums.h @@ -0,0 +1,46 @@ +#pragma once + +enum { + DW_EH_PE_absptr = 0x00, + DW_EH_PE_omit = 0xff, + /* FDE data encoding. */ + DW_EH_PE_uleb128 = 0x01, + DW_EH_PE_udata2 = 0x02, + DW_EH_PE_udata4 = 0x03, + DW_EH_PE_udata8 = 0x04, + DW_EH_PE_sleb128 = 0x09, + DW_EH_PE_sdata2 = 0x0a, + DW_EH_PE_sdata4 = 0x0b, + DW_EH_PE_sdata8 = 0x0c, + DW_EH_PE_signed = 0x08, + /* FDE flags. */ + DW_EH_PE_pcrel = 0x10, + DW_EH_PE_textrel = 0x20, + DW_EH_PE_datarel = 0x30, + DW_EH_PE_funcrel = 0x40, + DW_EH_PE_aligned = 0x50, + DW_EH_PE_indirect = 0x80, +}; + +enum { + DW_CFA_nop = 0x0, + DW_CFA_advance_loc = 0x01, + DW_CFA_offset = 0x02, + DW_CFA_restore = 0x03, + DW_CFA_advance_loc1 = 0x02, + DW_CFA_advance_loc2 = 0x03, + DW_CFA_advance_loc4 = 0x04, + DW_CFA_restore_extended = 0x06, + DW_CFA_undefined = 0x07, + DW_CFA_register = 0x09, + DW_CFA_remember_state = 0x0a, + DW_CFA_restore_state = 0x0b, + DW_CFA_def_cfa = 0x0c, + DW_CFA_def_cfa_register = 0x0d, + DW_CFA_def_cfa_offset = 0x0e, + DW_CFA_def_cfa_expression = 0xf, + DW_CFA_expression = 0x10, + DW_CFA_offset_extended_sf = 0x11, + DW_CFA_GNU_args_size = 0x2e, + DW_OP_deref = 0x6, +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/eh_frame_hdr.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/eh_frame_hdr.h new file mode 100644 index 0000000000000000000000000000000000000000..9800166675093fd3bb939ea617fa61853f591500 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/eh_frame_hdr.h @@ -0,0 +1,95 @@ +#pragma once +#include +#include + +#include +#include + +// Overview of the format described in +// https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html + +struct EHFrameHdr { + EHFrameHdr(void* base) : base_(base) { + Lexer L(base, base); + version_ = L.read(); + eh_frame_ptr_enc_ = L.read(); + fde_count_enc_ = L.read(); + table_enc_ = L.read(); + if (table_enc_ == DW_EH_PE_omit) { + table_size_ = 0; + } else { + switch (table_enc_ & 0xF) { + case DW_EH_PE_udata2: + case DW_EH_PE_sdata2: + table_size_ = 2; + break; + case DW_EH_PE_udata4: + case DW_EH_PE_sdata4: + table_size_ = 4; + break; + case DW_EH_PE_udata8: + case DW_EH_PE_sdata8: + table_size_ = 8; + break; + case DW_EH_PE_uleb128: + case DW_EH_PE_sleb128: + throw UnwindError("uleb/sleb table encoding not supported"); + break; + default: + throw UnwindError("unknown table encoding"); + } + } + eh_frame_ = (void*)L.readEncodedOr(eh_frame_ptr_enc_, 0); + fde_count_ = L.readEncodedOr(fde_count_enc_, 0); + table_start_ = L.loc(); + } + size_t nentries() const { + return fde_count_; + } + + uint64_t lowpc(size_t i) const { + return Lexer(table_start_, base_) + .skip(2 * i * table_size_) + .readEncoded(table_enc_); + } + void* fde(size_t i) const { + return (void*)Lexer(table_start_, base_) + .skip((2 * i + 1) * table_size_) + .readEncoded(table_enc_); + } + + void* entryForAddr(uint64_t addr) const { + if (!table_size_ || !nentries()) { + throw UnwindError("search table not present"); + } + uint64_t low = 0; + uint64_t high = nentries(); + while (low + 1 < high) { + auto mid = (low + high) / 2; + if (addr < lowpc(mid)) { + high = mid; + } else { + low = mid; + } + } + return fde(low); + } + + friend std::ostream& operator<<(std::ostream& out, const EHFrameHdr& self) { + out << "EHFrameHeader(version=" << self.version_ + << ",table_size=" << self.table_size_ + << ",fde_count=" << self.fde_count_ << ")"; + return out; + } + + private: + void* base_; + void* table_start_; + uint8_t version_; + uint8_t eh_frame_ptr_enc_; + uint8_t fde_count_enc_; + uint8_t table_enc_; + void* eh_frame_ = nullptr; + int64_t fde_count_; + uint32_t table_size_; +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/fde.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/fde.h new file mode 100644 index 0000000000000000000000000000000000000000..5e8cc0baee18f128c95296ac49cd34e2d15cb284 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/fde.h @@ -0,0 +1,400 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +struct TableState { + Action cfa; + std::array registers; + friend std::ostream& operator<<(std::ostream& out, const TableState& self) { + out << "cfa = " << self.cfa << "; "; + for (auto r : c10::irange(self.registers.size())) { + if (self.registers.at(r).kind != A_UNDEFINED) { + out << "r" << r << " = " << self.registers.at(r) << "; "; + } + } + return out; + } +}; + +// FDE - Frame Description Entry (Concept in ELF spec) +// This format is explained well by +// https://www.airs.com/blog/archives/460 +// Details of different dwarf actions are explained +// in the spec document: +// https://web.archive.org/web/20221129184704/https://dwarfstd.org/doc/DWARF4.doc +// An overview of how DWARF unwinding works is given in +// https://dl.acm.org/doi/pdf/10.1145/3360572 +// A similar implementation written in rust is: +// https://github.com/mstange/framehop/ + +template +struct FDE { + FDE(void* data, const char* library_name, uint64_t load_bias) + : library_name_(library_name), load_bias_(load_bias) { + Lexer L(data); + auto length = L.read4or8Length(); + void* fde_start = L.loc(); + void* cie_data = (void*)((int64_t)fde_start - L.read()); + Lexer LC(cie_data); + auto cie_length = LC.read4or8Length(); + void* cie_start = LC.loc(); + auto zero = LC.read(); + TORCH_INTERNAL_ASSERT(zero == 0, "expected 0 for CIE"); + auto version = LC.read(); + TORCH_INTERNAL_ASSERT( + version == 1 || version == 3, "non-1 version for CIE"); + augmentation_string_ = LC.readCString(); + if (hasAugmentation("eh")) { + throw UnwindError("unsupported 'eh' augmentation string"); + } + code_alignment_factor_ = LC.readULEB128(); + data_alignment_factor_ = LC.readSLEB128(); + if (version == 1) { + ra_register_ = LC.read(); + } else { + ra_register_ = LC.readULEB128(); + } + // we assume this in the state + TORCH_INTERNAL_ASSERT(ra_register_ == 16, "unexpected number of registers"); + if (augmentation_string_ && *augmentation_string_ == 'z') { + augmentation_length_ = LC.readULEB128(); + Lexer A(LC.loc()); + for (auto ap = augmentation_string_ + 1; *ap; ap++) { + switch (*ap) { + case 'L': + lsda_enc = A.read(); + break; + case 'R': + fde_enc = A.read(); + break; + case 'P': { + uint8_t personality_enc = A.read(); + A.readEncoded(personality_enc); + } break; + case 'S': { + // signal handler + } break; + default: { + throw UnwindError("unknown augmentation string"); + } break; + } + } + } + LC.skip(augmentation_length_); + low_pc_ = L.readEncoded(fde_enc); + high_pc_ = low_pc_ + L.readEncodedValue(fde_enc); + + if (hasAugmentation("z")) { + augmentation_length_fde_ = L.readULEB128(); + } + L.readEncodedOr(lsda_enc, 0); + + cie_begin_ = LC.loc(); + fde_begin_ = L.loc(); + cie_end_ = (void*)((const char*)cie_start + cie_length); + fde_end_ = (void*)((const char*)fde_start + length); + } + + // OP Code implementations + + void advance_raw(int64_t amount) { + auto previous_pc = current_pc_; + current_pc_ += amount; + if (LOG) { + (*out_) << (void*)(previous_pc - load_bias_) << "-" + << (void*)(current_pc_ - load_bias_) << ": " << state() << "\n"; + } + } + + void advance_loc(int64_t amount) { + if (LOG) { + (*out_) << "advance_loc " << amount << "\n"; + } + advance_raw(amount * code_alignment_factor_); + } + + void offset(int64_t reg, int64_t offset) { + if (LOG) { + (*out_) << "offset " << reg << " " << offset << "\n"; + } + if (reg > (int64_t)state().registers.size()) { + if (LOG) { + (*out_) << "OFFSET OF BIG REGISTER " << reg << "ignored...\n"; + } + return; + } + state().registers.at(reg) = + Action{A_LOAD_CFA_OFFSET, -1, offset * data_alignment_factor_}; + } + + void restore(int64_t reg) { + if (LOG) { + (*out_) << "restore " << reg << "\n"; + } + if (reg > (int64_t)state().registers.size()) { + if (LOG) { + (*out_) << "RESTORE OF BIG REGISTER " << reg << "ignored...\n"; + } + return; + } + state().registers.at(reg) = initial_state_.registers.at(reg); + } + + void def_cfa(int64_t reg, int64_t off) { + if (LOG) { + (*out_) << "def_cfa " << reg << " " << off << "\n"; + } + last_reg_ = reg; + last_offset_ = off; + state().cfa = Action::regPlusData(reg, off); + } + void def_cfa_register(int64_t reg) { + def_cfa(reg, last_offset_); + } + void def_cfa_offset(int64_t off) { + def_cfa(last_reg_, off); + } + + void remember_state() { + if (LOG) { + (*out_) << "remember_state\n"; + } + state_stack_.push_back(state()); + } + void restore_state() { + if (LOG) { + (*out_) << "restore_state\n"; + } + state_stack_.pop_back(); + } + + void undefined(int64_t reg) { + if (LOG) { + (*out_) << "undefined " << reg << "\n"; + } + state().registers.at(reg) = Action::undefined(); + } + void register_(int64_t reg, int64_t rhs_reg) { + if (LOG) { + (*out_) << "register " << reg << " " << rhs_reg << "\n"; + } + state().registers.at(reg) = Action::regPlusData(reg, 0); + } + + TableState& state() { + return state_stack_.back(); + } + + void dump(std::ostream& out) { + out_ = &out; + out << "FDE(augmentation_string=" << augmentation_string_ + << ", low_pc=" << (void*)(low_pc_ - load_bias_) + << ",high_pc=" << (void*)(high_pc_ - load_bias_) + << ",code_alignment_factor=" << code_alignment_factor_ + << ", data_alignment_factor=" << data_alignment_factor_ + << ", ra_register_=" << ra_register_ << ")\n"; + readUpTo(high_pc_); + out_ = &std::cout; + } + + TableState readUpTo(uint64_t addr) { + if (addr < low_pc_ || addr > high_pc_) { + throw UnwindError("Address not in range"); + } + if (LOG) { + (*out_) << "readUpTo " << (void*)addr << " for " << library_name_ + << " at " << (void*)load_bias_ << "\n"; + } + state_stack_.emplace_back(); + current_pc_ = low_pc_; + // parse instructions... + Lexer LC(cie_begin_); + while (LC.loc() < cie_end_ && current_pc_ <= addr) { + readInstruction(LC); + } + if (current_pc_ > addr) { + return state(); + } + + initial_state_ = state_stack_.back(); + + if (LOG) { + (*out_) << "--\n"; + } + + Lexer L(fde_begin_); + while (L.loc() < fde_end_ && current_pc_ <= addr) { + readInstruction(L); + } + // so that we print the full range in debugging + if (current_pc_ <= addr) { + advance_raw(addr - current_pc_); + } + return state(); + } + + void dumpAddr2Line() { + std::cout << "addr2line -f -e " << library_name_ << " " + << (void*)(low_pc_ - load_bias_) << "\n"; + } + + void readInstruction(Lexer& L) { + uint8_t bc = L.read(); + auto op = bc >> 6; + auto lowbits = bc & 0x3F; + switch (op) { + case 0x0: { + switch (lowbits) { + case DW_CFA_nop: { + return; // nop + } + case DW_CFA_advance_loc1: { + auto delta = L.read(); + return advance_loc(delta); + } + case DW_CFA_advance_loc2: { + auto delta = L.read(); + return advance_loc(delta); + } + case DW_CFA_advance_loc4: { + auto delta = L.read(); + return advance_loc(delta); + } + case DW_CFA_restore_extended: { + auto reg = L.readULEB128(); + return restore(reg); + } + case DW_CFA_undefined: { + auto reg = L.readULEB128(); + return undefined(reg); + } + case DW_CFA_register: { + auto reg = L.readULEB128(); + auto rhs_reg = L.readULEB128(); + return register_(reg, rhs_reg); + } + case DW_CFA_def_cfa: { + auto reg = L.readULEB128(); + auto off = L.readULEB128(); + return def_cfa(reg, off); + } + case DW_CFA_def_cfa_register: { + auto reg = L.readULEB128(); + return def_cfa_register(reg); + } + case DW_CFA_def_cfa_offset: { + auto off = L.readULEB128(); + return def_cfa_offset(off); + } + case DW_CFA_offset_extended_sf: { + auto reg = L.readULEB128(); + auto off = L.readSLEB128(); + return offset(reg, off); + } + case DW_CFA_remember_state: { + return remember_state(); + } + case DW_CFA_restore_state: { + return restore_state(); + } + case DW_CFA_GNU_args_size: { + // GNU_args_size, we do not need to know it.. + L.readULEB128(); + return; + } + case DW_CFA_expression: { + auto reg = L.readULEB128(); + auto len = L.readULEB128(); + auto end = (void*)((uint64_t)L.loc() + len); + auto op = L.read(); + if ((op & 0xF0) == 0x70) { // DW_bregX + auto rhs_reg = (op & 0xF); + auto addend = L.readSLEB128(); + if (L.loc() == end) { + state().registers.at(reg) = + Action::regPlusDataDeref(rhs_reg, addend); + return; + } + } + throw UnwindError("Unsupported dwarf expression"); + } + case DW_CFA_def_cfa_expression: { + auto len = L.readULEB128(); + auto end = (void*)((uint64_t)L.loc() + len); + auto op = L.read(); + if ((op & 0xF0) == 0x70) { // DW_bregX + auto rhs_reg = (op & 0xF); + auto addend = L.readSLEB128(); + if (L.loc() != end) { + auto op2 = L.read(); + if (op2 == DW_OP_deref && L.loc() == end) { // deref + state().cfa = Action::regPlusDataDeref(rhs_reg, addend); + return; + } + } + } + throw UnwindError("Unsupported def_cfa dwarf expression"); + } + default: { + std::stringstream ss; + ss << "unknown op code " << (void*)(uint64_t)lowbits; + throw UnwindError(ss.str()); + } + } + } + case DW_CFA_advance_loc: { + return advance_loc(lowbits); + } + case DW_CFA_offset: { + auto off = L.readULEB128(); + return offset(lowbits, off); + } + case DW_CFA_restore: { + return restore(lowbits); + } + } + } + // used for debug printing + const char* library_name_; + uint64_t load_bias_; + + // parsed from the eh_string data structures: + const char* augmentation_string_ = nullptr; + int64_t augmentation_length_ = 0; + int64_t augmentation_length_fde_ = 0; + + int64_t code_alignment_factor_; + int64_t data_alignment_factor_; + void* cie_data_; + + int64_t ra_register_; + uint8_t lsda_enc = DW_EH_PE_omit; + uint8_t fde_enc = DW_EH_PE_absptr; + uint64_t low_pc_ = UINT64_MAX; + uint64_t high_pc_ = UINT64_MAX; + + void* cie_begin_; + void* fde_begin_; + void* cie_end_; + void* fde_end_; + + // state accumulated while parsing instructions + int64_t last_reg_ = 0; + int64_t last_offset_ = 0; + uint64_t current_pc_; + + TableState + initial_state_; // state after the initial instructions, used by restore + std::vector state_stack_; + + std::ostream* out_ = &std::cout; // for debug dumping + private: + bool hasAugmentation(const char* s) { + return strstr(augmentation_string_, s) != nullptr; + } +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/lexer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..0c1d33abe4e9edbf833ac6410bafcdcd8d8bc62a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/lexer.h @@ -0,0 +1,124 @@ +#pragma once +#include +#include + +#include +#include + +struct Lexer { + Lexer(void* data, void* base = nullptr) + : next_((const char*)data), base_((int64_t)base) {} + + template + T read() { + T result; + memcpy(&result, next_, sizeof(T)); + next_ += sizeof(T); + return result; + } + + // SLEB/ULEB code adapted from LLVM equivalents + int64_t readSLEB128() { + int64_t Value = 0; + unsigned Shift = 0; + uint8_t Byte; + do { + Byte = read(); + uint64_t Slice = Byte & 0x7f; + if ((Shift >= 64 && Slice != (Value < 0 ? 0x7f : 0x00)) || + (Shift == 63 && Slice != 0 && Slice != 0x7f)) { + throw UnwindError("sleb128 too big for int64"); + } + Value |= Slice << Shift; + Shift += 7; + } while (Byte >= 128); + // Sign extend negative numbers if needed. + if (Shift < 64 && (Byte & 0x40)) { + Value |= (-1ULL) << Shift; + } + return Value; + } + + uint64_t readULEB128() { + uint64_t Value = 0; + unsigned Shift = 0; + uint8_t p; + do { + p = read(); + uint64_t Slice = p & 0x7f; + if ((Shift >= 64 && Slice != 0) || Slice << Shift >> Shift != Slice) { + throw UnwindError("uleb128 too big for uint64"); + } + Value += Slice << Shift; + Shift += 7; + } while (p >= 128); + return Value; + } + const char* readCString() { + auto result = next_; + next_ += strlen(next_) + 1; + return result; + } + int64_t readEncoded(uint8_t enc) { + int64_t r = 0; + switch (enc & (~DW_EH_PE_indirect & 0xF0)) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + r = (int64_t)next_; + break; + case DW_EH_PE_datarel: + r = base_; + break; + default: + throw UnwindError("unknown encoding"); + } + return r + readEncodedValue(enc); + } + int64_t readEncodedOr(uint8_t enc, int64_t orelse) { + if (enc == DW_EH_PE_omit) { + return orelse; + } + return readEncoded(enc); + } + int64_t read4or8Length() { + int64_t length = read(); + if (length == 0xFFFFFFFF) { + length = read(); + } + return length; + } + void* loc() const { + return (void*)next_; + } + Lexer& skip(int64_t bytes) { + next_ += bytes; + return *this; + } + int64_t readEncodedValue(uint8_t enc) { + switch (enc & 0xF) { + case DW_EH_PE_udata2: + return read(); + case DW_EH_PE_sdata2: + return read(); + case DW_EH_PE_udata4: + return read(); + case DW_EH_PE_sdata4: + return read(); + case DW_EH_PE_udata8: + return read(); + case DW_EH_PE_sdata8: + return read(); + case DW_EH_PE_uleb128: + return readULEB128(); + case DW_EH_PE_sleb128: + return readSLEB128(); + default: + throw UnwindError("not implemented"); + } + } + + private: + const char* next_; + int64_t base_; +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind.h new file mode 100644 index 0000000000000000000000000000000000000000..69b27f49e5b79988da6f68345cf61db0f802ec89 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind.h @@ -0,0 +1,40 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace unwind { +// gather current stack, relatively fast. +// gets faster once the cache of program counter locations is warm. +TORCH_API std::vector unwind(); + +struct Frame { + std::string filename; + std::string funcname; + uint64_t lineno; +}; + +// note: symbolize is really slow +// it will launch an addr2line process that has to parse dwarf +// information from the libraries that frames point into. +// Callers should first batch up all the unique void* pointers +// across a number of unwind states and make a single call to +// symbolize. +TORCH_API std::vector symbolize(const std::vector& frames); + +// returns path to the library, and the offset of the addr inside the library +TORCH_API c10::optional> libraryFor( + void* addr); + +struct Stats { + size_t hits = 0; + size_t misses = 0; + size_t unsupported = 0; + size_t resets = 0; +}; +Stats stats(); + +} // namespace unwind +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind_error.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind_error.h new file mode 100644 index 0000000000000000000000000000000000000000..af2e4dff010903e24ef4d1c5aa81228a81a167a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwind_error.h @@ -0,0 +1,6 @@ +#pragma once +#include + +struct UnwindError : public std::runtime_error { + using std::runtime_error::runtime_error; +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwinder.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwinder.h new file mode 100644 index 0000000000000000000000000000000000000000..1d0a30e2f919fa2b6eada88c817aa327d993bde2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/unwind/unwinder.h @@ -0,0 +1,77 @@ +#pragma once +#include +#include +#include +#include + +struct UnwindState { + int64_t rip, rbp, rsp; +}; + +struct Unwinder { + Unwinder(Action rsp, Action rip, Action rbp) + : kind_(rip.kind == A_UNDEFINED ? END : STANDARD), + reg_(rsp.reg), + off_(rsp.data), + rip_off_(rip.data), + rbp_off_( + rbp.kind == A_UNDEFINED ? std::numeric_limits::max() + : rbp.data), + deref_(rsp.kind == A_REG_PLUS_DATA_DEREF) { + check(rsp.reg == D_RSP || rsp.reg == D_RBP); + check(rip.kind == A_UNDEFINED || rip.kind == A_LOAD_CFA_OFFSET); + if (rsp.kind == A_REG_PLUS_DATA) { + check(rbp.kind == A_LOAD_CFA_OFFSET || rbp.kind == A_UNDEFINED); + } else if (rsp.kind == A_REG_PLUS_DATA_DEREF) { + if (rbp.kind == A_REG_PLUS_DATA_DEREF) { + check(rbp.reg == rsp.reg); + rbp_off_ -= rsp.data; + } else { + check(rbp.kind == A_UNDEFINED); + } + } else { + check(false); + } + } + void check(bool cond) { + if (!cond) { + throw UnwindError("Unwinding actions do not follow supported patterns"); + } + } + bool terminator() const { + return kind_ != STANDARD; + } + bool isUnknown() const { + return kind_ == UNKNOWN; + } + // unwinder representing some pattern unsupported in + // current implementation + static Unwinder unknown() { + return Unwinder(); + } + UnwindState run(const UnwindState& cur) const { + UnwindState r = cur; + r.rsp = (reg_ == D_RSP ? cur.rsp : cur.rbp) + off_; + r.rbp = rbp_off_ == std::numeric_limits::max() + ? cur.rbp + // NOLINTNEXTLINE(performance-no-int-to-ptr) + : *(int64_t*)(r.rsp + rbp_off_); + if (deref_) { + // NOLINTNEXTLINE(performance-no-int-to-ptr) + r.rsp = *(int64_t*)r.rsp; + } + // NOLINTNEXTLINE(performance-no-int-to-ptr) + r.rip = *(int64_t*)(r.rsp + rip_off_); + + return r; + } + + private: + Unwinder() : kind_(UNKNOWN), reg_(0), off_(0), rip_off_(0), rbp_off_(0) {} + enum Kind { STANDARD, END, UNKNOWN } kind_; + uint32_t reg_; + int64_t off_; + int64_t rip_off_; + int64_t rbp_off_; + bool deref_{false}; +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h new file mode 100644 index 0000000000000000000000000000000000000000..4b565c691ca047de3ed21fd7ea4a6a6b1557739c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h @@ -0,0 +1,170 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +// TODO: replace with pytorch/rfcs#43 when it is ready. +#define SOFT_ASSERT(cond, ...) \ + [&]() -> bool { \ + if (C10_UNLIKELY(!(cond))) { \ + torch::profiler::impl::logSoftAssert( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond, \ + ::c10::str(__VA_ARGS__)); \ + if (torch::profiler::impl::softAssertRaises()) { \ + TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__); \ + } else { \ + TORCH_WARN(__VA_ARGS__); \ + } \ + return false; \ + } \ + return true; \ + }() + +namespace torch { +namespace profiler { +namespace impl { +TORCH_API bool softAssertRaises(); +TORCH_API void setSoftAssertRaises(c10::optional value); +TORCH_API void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + const char* args); +TORCH_API inline void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + ::c10::detail::CompileTimeEmptyString args) { + logSoftAssert(func, file, line, cond, (const char*)args); +} +TORCH_API void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + const std::string& args); + +using shape = + std::variant, std::vector>>; +constexpr int TENSOR_LIST_DISPLAY_LENGTH_LIMIT = 30; + +std::string getNvtxStr( + const char* name, + int64_t sequence_nr, + const std::vector>& shapes, + at::RecordFunctionHandle op_id = 0, + const std::list>& input_op_ids = + {}); + +struct TORCH_API FileLineFunc { + std::string filename; + size_t line; + std::string funcname; +}; + +TORCH_API std::vector prepareCallstack( + const std::vector& cs); +TORCH_API std::vector callstackStr( + const std::vector& cs); +TORCH_API std::string stacksToStr( + const std::vector& stacks, + const char* delim); +TORCH_API std::vector> inputSizes( + const at::RecordFunction& fn, + const bool flatten_list_enabled = false); +TORCH_API std::string variantShapesToStr(const std::vector& shapes); +TORCH_API std::string shapesToStr( + const std::vector>& shapes); +TORCH_API std::string strListToStr(const std::vector& types); +TORCH_API std::string inputOpIdsToStr( + const std::list>& input_op_ids); +TORCH_API std::string ivalueListToStr(const std::vector& list); +TORCH_API std::vector inputTypes(const at::RecordFunction& fn); + +std::unordered_map TORCH_API +saveExtraArgs(const at::RecordFunction& fn); +std::unordered_map TORCH_API +saveNcclMeta(const at::RecordFunction& fn); + +uint64_t TORCH_API computeFlops( + const std::string& op_name, + const std::unordered_map& extra_args); + +std::string shapeToStr(const std::vector& shape); + +template +class TORCH_API GlobalStateManager { + public: + static GlobalStateManager& singleton() { + static GlobalStateManager singleton_; + return singleton_; + } + + static void push(std::shared_ptr&& state) { + if (singleton().state_) { + LOG(WARNING) << "GlobalStatePtr already exists!"; + } else { + singleton().state_ = std::move(state); + } + } + + static auto* get() { + return singleton().state_.get(); + } + + static std::shared_ptr pop() { + auto out = singleton().state_; + singleton().state_.reset(); + return out; + } + + private: + GlobalStateManager() = default; + + std::shared_ptr state_; +}; + +struct HashCombine { + template + size_t operator()(const std::pair& i) { + return c10::get_hash((*this)(i.first), (*this)(i.second)); + } + + template + size_t operator()(const std::tuple& i) { + return c10::get_hash(i); + } + + template + size_t operator()(const T& i) { + return c10::get_hash(i); + } +}; + +} // namespace impl +} // namespace profiler +} // namespace torch + +namespace torch { +namespace autograd { +namespace profiler { +using torch::profiler::impl::computeFlops; +} // namespace profiler +} // namespace autograd +} // namespace torch