diff --git a/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d42627a4a61a6d715504f6f383f69e1680999440 --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3599b233683cf5c07d6f254c52c98de7eaa4f5e3539175700226804e72b3359e +size 9372 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h new file mode 100644 index 0000000000000000000000000000000000000000..bf8bb7cdef8c06cd638532c4671cd2875b7d7796 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h @@ -0,0 +1,174 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +class RecvRpcBackward; + +// DistAutogradContext which stores information for a single distributed +// autograd pass on a worker. +class TORCH_API DistAutogradContext { + public: + using GradCallback = std::function; + + explicit DistAutogradContext(int64_t contextId); + + // Retrieves the autograd context id for this context. + int64_t contextId() const; + + // Records a 'send' autograd function for this context with the provided + // message id. + void addSendFunction( + const std::shared_ptr& func, + int64_t autograd_message_id); + + // Records a 'recv' autograd function for this context with the provided + // message id. + void addRecvFunction( + std::shared_ptr& func, + int64_t autograd_message_id); + + // Given an autograd_message_id, retrieve the appropriate send function. + std::shared_ptr retrieveSendFunction( + int64_t autograd_message_id); + + // Return all send functions for this context. + std::unordered_map> sendFunctions() + const; + + // Return all recv functions for this context. + std::unordered_map> recvFunctions() + const; + + // Adds a future message recording an outstanding RPC. + void addOutstandingRpc(const c10::intrusive_ptr& jitFuture); + + // Returns all gradients. + const c10::Dict getGradients() const; + + // This function gives a mutable grad reference to the callback. + // If the callback returns true, it means the grad in the context + // needs to be updated. + void runGradCallbackForVariable( + const torch::autograd::Variable& variable, + GradCallback&& cb); + + DistAutogradContext(const DistAutogradContext&) = delete; + DistAutogradContext& operator=(const DistAutogradContext&) = delete; + DistAutogradContext(DistAutogradContext&&) = delete; + DistAutogradContext& operator=(DistAutogradContext&&) = delete; + + // records the workerID of a node that we sent an RPC to. + // workerIDs are added here when we attach a send function to this autograd + // context + void addKnownWorkerId(const rpc::worker_id_t workerId); + + // Retrieves a set containing the known workerIds for this context + // These are the different workers that this context has sent RPCs to. + std::unordered_set getKnownWorkerIds() const; + + private: + friend class BackwardPassCleanupGuard; + friend class DistEngine; + friend class RecvRpcBackward; + friend class DistAccumulateGradCaptureHook; + + // Record that we would like to accumulate the provided gradient on the given + // variable. + void accumulateGrad( + const torch::autograd::Variable& variable, + const torch::Tensor& grad, + size_t num_expected_refs); + + // Retrieve the GraphTask. + std::shared_ptr retrieveGraphTask(); + + // Set the appropriate graph task for the backward pass. Can be called only + // once. + void setGraphTask(std::shared_ptr graphTask); + + // Resets the graph task to ensure we can run another distributed backward + // pass for the same autograd context. + void resetGraphTask(); + + // Waits for all outstanding RPCs for this context to finish and clears all + // outstanding rpcs held in this context. This should be called only once. + c10::intrusive_ptr clearAndWaitForOutstandingRpcsAsync(); + + void clearOutstandingRpcs(); + + // Record an event to mark the completion of gradient computation. These + // events will later help to properly synchronize gradients consumptions + // in getGradients(). We need these events because backward and + // optimizer.step are separate RPC calls, and will occur on different CUDA + // streams. Without synchronization, it is possible that gradients are + // consumed before they are ready. + void recordGradEvent(c10::Device device); + + const int64_t contextId_; + + // Set containing known worker IDs, used in cleaning up autograd context. + // Whenever a sendRpcBackward is attached to the autograd graph for this + // context, the destination is added here. + std::unordered_set knownWorkerIds_; + + // Map from autograd_message_id to appropriate 'send' autograd function. + std::unordered_map> + sendAutogradFunctions_; + + // Map from autograd_message_id to appropriate 'recv' autograd function. + std::unordered_map> + recvAutogradFunctions_; + + // Gradients accumulated in this context so far. The key is the variable on + // which the gradient needs to be accumulated and the value is the gradient + // that needs to be accumulated on that variable.. + c10::Dict accumulatedGrads_; + + // See comments for recordGradEvent(c10::Device device); + std::unordered_map gradReadyEvents_; + const c10::impl::VirtualGuardImpl impl_; + + // The autograd GraphTask for the backward pass on this node for this context. + std::shared_ptr graphTask_; + + // List of futures for RPCs initiated by this node to propagate gradients to + // other nodes. The distributed autograd engine on this node can return + // successfully only if all these futures are done and are successful. + std::vector> outStandingRpcs_; + + // Lock to protect concurrent modification of the context. + mutable std::mutex lock_; +}; + +using ContextPtr = std::shared_ptr; + +// This class stores a shared_ptr to a DistAutogradContext instance in a +// thread local variable. The instance is given by the call site. The class +// doesn't know the current context. It's just a util class. +class TORCH_API ThreadLocalDistAutogradContext { + public: + // Store 'new_context' to the thread local variable maintained by this class. + explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context); + ~ThreadLocalDistAutogradContext(); + + // Retrieve the stored DistAutogradContext instance. + static ContextPtr getContextPtr(); + + private: + ContextPtr prev_context_ptr_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6e6678b1289859eb162a96ccd3063d94a9e5e0fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Forward declarations. +class DistAutogradContext; + +// As part of our distributed autograd implementation, whenever we receive an +// RPC from a node, we add a 'RecvRpcBackward' autograd function to the +// autograd graph. This is more or less a placeholder function that is used to +// pass gradients to the remote host during the backward pass. The inputs to the +// RPC function are the inputs to this autograd function. +class TORCH_API RecvRpcBackward : public torch::autograd::Node { + public: + explicit RecvRpcBackward( + const AutogradMetadata& autogradMetadata, + std::shared_ptr autogradContext, + rpc::worker_id_t fromWorkerId, + rpc::DeviceMap deviceMap); + + torch::autograd::variable_list apply( + torch::autograd::variable_list&& grads) override; + + private: + const AutogradMetadata autogradMetadata_; + + // Hold a weak reference to the autograd context to avoid circular + // dependencies with the context (since it holds a reference to + // RecvRpcBackward). + std::weak_ptr autogradContext_; + + // The worker id from which the RPC was received. During the backward pass, + // we need to propagate the gradients to this workerId. + rpc::worker_id_t fromWorkerId_; + + // Device mapping for tensors sent over RPC. + const rpc::DeviceMap deviceMap_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ff576ace174fdfae29abafdec3678532e03cc29d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// As part of our distributed autograd implementation, whenever we send an RPC +// from one node to another, we add a 'SendRpcBackward' autograd function to the +// autograd graph. This is more or less a placeholder function that is used to +// kickoff the autograd engine on the current worker on the backward pass. The +// edges for this autograd function are the inputs to the RPC method. +// +// During the backward pass, this function is queued for execution in the +// autograd engine which eventually runs the rest of the autograd graph. +struct TORCH_API SendRpcBackward : public torch::autograd::Node { + public: + torch::autograd::variable_list apply( + torch::autograd::variable_list&& inputs) override; + + // SendRpcBackward is actually the root of an autograd graph on the local + // node. As a result, it doesn't receive any 'inputs', but rather the RPC + // framework passes gradients over to this function to kickoff local autograd + // computation. + void setGrads(const torch::autograd::variable_list& grads); + + // Retrieve the grads for the function. + const torch::autograd::variable_list& getGrads() const; + + private: + torch::autograd::variable_list grads_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5aefbd2010a76a05ab90de3c71d096377ecc82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// This structure represents autograd metadata that we need to pass across +// different nodes when we call an RPC which needs autograd computation. +struct TORCH_API AutogradMetadata { + AutogradMetadata(int64_t autogradContextId, int64_t autogradMessageId); + + // autogradContextId_ is a globally unique integer that identifies a + // particular distributed autograd pass. + int64_t autogradContextId; + // autogradMessageId_ is a globally unique integer that identifies a pair + // of send/recv autograd functions. + int64_t autogradMessageId; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h new file mode 100644 index 0000000000000000000000000000000000000000..525790b8c86b4d10f03b6f84a73a31c6a253bef1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Used to request other workers to clean up their autograd context. +class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase { + public: + explicit CleanupAutogradContextReq(int64_t context_id); + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Retrieve the context id we are cleaning up with this message. + int64_t getContextId(); + + private: + int64_t context_id_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..5b55fad7190c6efa71020dd024c7da62f58acfd9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Empty response for CleanupAutogradContextReq. Send to acknowledge receipt of +// a CleanupAutogradContextReq. +class TORCH_API CleanupAutogradContextResp : public rpc::RpcCommandBase { + public: + CleanupAutogradContextResp() = default; + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h new file mode 100644 index 0000000000000000000000000000000000000000..4bb58f35852137492d3886095242b77245224c5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Used to propagate gradients from one node to another during a distributed +// backwards pass. This RPC call is invoked when we hit a `recv` autograd +// function during backward pass execution. +class TORCH_API PropagateGradientsReq : public rpc::RpcCommandBase { + public: + PropagateGradientsReq( + const AutogradMetadata& autogradMetadata, + std::vector grads, + bool retainGraph = false); + + const AutogradMetadata& getAutogradMetadata(); + + const std::vector& getGrads(); + + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Whether or not to retain the autograd graph. + bool retainGraph(); + + private: + AutogradMetadata autogradMetadata_; + std::vector grads_; + bool retainGraph_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2ed0f0e34eb09f0ec029a6491ffd988042516c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Response for the PropagateGradients call. Currently, this class is mostly +// just a placeholder and sends an empty message over the wire. The purpose of +// this RPC command is to indicate whether or not the PropagateGradientsReq call +// was successfully or not. +class TORCH_API PropagateGradientsResp : public rpc::RpcCommandBase { + public: + PropagateGradientsResp() = default; + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..6d0b6111cc88cd5a1df33d334851f8d17e166941 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Represents an RPC that includes autograd information. This class basically +// wraps another `RpcCommandBase` object which represents the actual RPC and has +// additional autograd information associated with that RPC. +class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase { + public: + // Used when we are sending an RPC over the wire. + RpcWithAutograd( + rpc::worker_id_t fromWorkerId, + rpc::MessageType messageType, + const AutogradMetadata& autogradMetadata, + c10::intrusive_ptr wrappedMessage, + rpc::DeviceMap deviceMap = {}); + + // Used when receiving an RPC over the wire. + RpcWithAutograd( + rpc::worker_id_t fromWorkerId, + rpc::MessageType messageType, + const AutogradMetadata& autogradMetadata, + std::unique_ptr wrappedRpc, + rpc::MessageType wrappedMessageType, + std::vector tensors, + rpc::DeviceMap deviceMap = {}); + + c10::intrusive_ptr toMessageImpl() && override; + + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Retrieves tensors as part of this RPC, which need to be considered for + // autograd computations. + std::vector& tensors(); + + const AutogradMetadata& autogradMetadata() const; + + RpcCommandBase& wrappedRpc(); + + void setWrappedRpc(std::unique_ptr wrappedRpc); + + std::unique_ptr moveWrappedRpc() &&; + + // Message type of the wrapped RPC. + rpc::MessageType wrappedMessageType() const; + + // Retrieve the worker id from which the RPC originated. + rpc::worker_id_t fromWorkerId() const; + + // Retrieve the device map. + const rpc::DeviceMap& deviceMap(); + + private: + // WorkerId from which this RPC originated. This is necessary for knowing + // which worker we need to contact during the backward pass. + rpc::worker_id_t fromWorkerId_; + + // Message type for this call. + rpc::MessageType messageType_; + + AutogradMetadata autogradMetadata_; + + // Since wrappedMessage_ is destructively constructed from wrappedRpc_, + // they are valid exclusively. They are used for different purpose. + // wrappedRpc_ is used while constructing receive rpcWithAutograd; + // wrappedMessage_ is used while constructing send rpcWithAutograd; + + // When receive rpcWithAutograd is constructed fromMessage, it is valid; + // When send rpcWithAutograd is constructed before toMessage, it is nullptr; + std::unique_ptr wrappedRpc_; + + // Serialized message representing wrappedRpc_. Used mostly as a cache to + // avoid serializing the request twice. + // When receive rpcWithAutograd is constructed fromMessage, it is nullptr; + // When send rpcWithAutograd is constructed before toMessage, it is valid; + c10::intrusive_ptr wrappedMessage_; + + // message type of the wrappedMessage, this is stored separately since + // wrappedMessage_ is not always guaranteed to be populated. + rpc::MessageType wrappedMessageType_; + + // Tensors part of the wrappedRpc that need to be considered for autograd. + std::vector tensors_; + + // Device mapping for tensors that are sent across an RPC to another node. + rpc::DeviceMap deviceMap_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h new file mode 100644 index 0000000000000000000000000000000000000000..e25728d79194aed9053b2475d48580f735ba53c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +class TORCH_API RpcWithProfilingReq : public rpc::RpcCommandBase { + public: + // For sending RPCs, invoked when client is creating this RPC command. + RpcWithProfilingReq( + rpc::MessageType messageType, + c10::intrusive_ptr wrappedMessage, + torch::autograd::profiler::ProfilerConfig&& profilerConfig, + rpc::ProfilingId profilingKeyId); + + // For receiving an RPC + // Used in fromMessage. + RpcWithProfilingReq( + rpc::MessageType messageType, + std::unique_ptr wrappedRpc, + rpc::MessageType wrappedMessageType, + std::vector tensors, + torch::autograd::profiler::ProfilerConfig&& profilerConfig, + rpc::ProfilingId profilingKeyId); + + // Convert this RPC Command to a Message that can be sent over the wire. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + // Retrieve the profiling data that is associated with this command. + torch::autograd::profiler::ProfilerConfig getProfilingConfig() const; + // Retrieve the globally unique profiling ID corresponding to this command. + const rpc::ProfilingId& getProfilingId() const; + // Retrieve the original RPC which this ProfilingRPC wraps. + RpcCommandBase& wrappedRpc(); + // Destructively move the wrapped RPC. + std::unique_ptr moveWrappedRpc() &&; + // Message type of the wrapped RPC + rpc::MessageType wrappedMessageType() const; + void setWrappedRpc(std::unique_ptr wrappedRpc); + + private: + // message type + const rpc::MessageType messageType_; + // wrapped message + c10::intrusive_ptr wrappedMessage_; + std::unique_ptr wrappedRpc_; + rpc::MessageType wrappedMessageType_; + std::vector tensors_; + const torch::autograd::profiler::ProfilerConfig profilerConfig_; + const rpc::ProfilingId profilingKeyId_; +}; +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..fef0055e04be28873dc684bd79b168273e68fe50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { +class TORCH_API RpcWithProfilingResp : public rpc::RpcCommandBase { + public: + // For sending RPCs over the wire + RpcWithProfilingResp( + rpc::MessageType messageType, + c10::intrusive_ptr wrappedMessage, + std::vector profiledEvents, + rpc::ProfilingId profilingId); + + // For receiving RPCs. Used in from message when converting a message received + // over the wire. + RpcWithProfilingResp( + rpc::MessageType messageType, + std::unique_ptr wrappedRpc, + rpc::MessageType wrappedMessageType, + std::vector tensors, + std::vector profiledEvents, + rpc::ProfilingId profilingId); + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + // Retrieve remote Events + std::vector getProfiledEvents() const; + // Retrieve the globally unique profiling ID corresponding to this command. + const rpc::ProfilingId& getProfilingId() const; + // Retrieve the original RPC which this ProfilingRPC wraps. + RpcCommandBase& wrappedRpc(); + // Destructively move the wrapped RPC. + std::unique_ptr moveWrappedRpc() &&; + // Message type of the wrapped RPC + rpc::MessageType wrappedMessageType() const; + // Set the wrapped RPC for this RPC. + void setWrappedRpc(std::unique_ptr wrappedRpc); + + private: + // message type + const rpc::MessageType messageType_; + // wrapped message + c10::intrusive_ptr wrappedMessage_; + std::unique_ptr wrappedRpc_; + rpc::MessageType wrappedMessageType_; + std::vector tensors_; + const std::vector profiledEvents_; + const rpc::ProfilingId profilingId_; +}; +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc4413cfa50980af4df98bd88c9fd57e86a2a75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Internal system RPC to invoke distributed backward pass on remote nodes when +// 'rref.backward()' is invoked. +class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase { + public: + RRefBackwardReq( + const rpc::RRefId& rrefId, + int64_t autogradContextId, + bool retainGraph = false); + + const rpc::RRefId& getRRefId() const; + + int64_t getAutogradContextId() const; + + bool retainGraph() const; + + // Serialization and deserialization methods. + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); + + private: + const rpc::RRefId rrefId_; + const int64_t autogradContextId_; + const bool retainGraph_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..2ce4d6f3fa84264dee58733070a7c42592f1af53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Response for the RRefBackwardReq. +class TORCH_API RRefBackwardResp : public rpc::RpcCommandBase { + public: + RRefBackwardResp() = default; + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const rpc::Message& message); +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp new file mode 100644 index 0000000000000000000000000000000000000000..44888a2442f09207963e6f7a09d9e4c9108b4822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp @@ -0,0 +1,408 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +constexpr auto kBackendDefaultTimeout = + std::chrono::milliseconds(30 * 60 * 1000); + +namespace c10d { + +class TORCH_API Backend : public torch::CustomClassHolder { + public: + // Backend Options is a base struct that defines the basic options + // when constructing a Backend. Each Backend subclass should + // extend this struct and define its options if it wants to provide more + // config options (beyond basic ones defined here) to end user. + struct TORCH_API Options : torch::CustomClassHolder { + explicit Options( + std::string backend, + std::chrono::milliseconds timeout = kBackendDefaultTimeout) + : timeout(timeout), backend(std::move(backend)) {} + ~Options() override = default; + + std::chrono::milliseconds timeout; + + // backend name + const std::string backend; + }; + + explicit Backend(int rank, int size); + ~Backend() override = 0; + + int getRank() const { + return rank_; + } + + int getSize() const { + return size_; + } + + // Returns an unique opaque ID of this backend that can be used to correlate + // with its collectives. + int64_t getID() const { + return reinterpret_cast(this); + } + + virtual bool supportsSplitting() const { + return false; + } + + virtual void startCoalescing() { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not implement startCoalescing")); + } + + virtual c10::intrusive_ptr endCoalescing() { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not implement endCoalescing")); + } + + // Subclasses must override this method to return the backend name + virtual const std::string getBackendName() const { + TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented."); + }; + + virtual c10::intrusive_ptr broadcast( + std::vector& /* tensors */, + const BroadcastOptions& /* opts */ = BroadcastOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support broadcast")); + } + + virtual c10::intrusive_ptr allreduce( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support allreduce")); + } + + virtual c10::intrusive_ptr allreduce_sparse( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allreduce sparse")); + } + + virtual c10::intrusive_ptr allreduce_coalesced( + std::vector& /* tensors */, + const AllreduceCoalescedOptions& /* opts */ = + AllreduceCoalescedOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allreduce_coalesced")); + } + + virtual c10::intrusive_ptr reduce( + std::vector& /* tensors */, + const ReduceOptions& /* opts */ = ReduceOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support reduce")); + } + + virtual c10::intrusive_ptr allgather( + std::vector>& /* outputTensors */, + std::vector& /* inputTensors */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support allgather")); + } + + // Gathers a single tensor inputBuffer into a single buffer outputBuffer that + // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE. + // For implementers of ProcessGroup API and advanced users only. + // Note: this function will be deprecated in near future. + virtual c10::intrusive_ptr _allgather_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support _allgather_base")); + } + + // This function is deprecated and will be moved out of Backend to comms: + // * do not add dependencies on this function, + // * do not implement it in your Backend, implement _allgather_base + // instead. + virtual c10::intrusive_ptr allgather_coalesced( + std::vector>& /* outputTensorLists */, + std::vector& /* inputTensors */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allgather_coalesced")); + } + + // This function is a coalesced version of `allgather_into_tensor` (currently + // still named as `_allgather_base`). Each tensor in the vector corresponds to + // an input/output of one `allgather_into_tensor` operation. + virtual c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& /* outputs */, + std::vector& /* inputs */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support allgather_into_tensor_coalesced")); + } + + virtual c10::intrusive_ptr gather( + std::vector>& /* outputTensors */, + std::vector& /* inputTensors */, + const GatherOptions& /* opts */ = GatherOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support gather")); + } + + virtual c10::intrusive_ptr scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ScatterOptions& /* opts */ = ScatterOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support scatter")); + } + + virtual c10::intrusive_ptr reduce_scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support reduce_scatter")); + } + + virtual c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support _reduce_scatter_base")); + } + + // This function is a coalesced version of `reduce_scatter_tensor` (currently + // still named as `_reduce_scatter_base`). Each tensor in the vector + // corresponds to an input/output of one `reduce_scatter_tensor` operation. + virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& /* outputs */, + std::vector& /* inputs */, + const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", + getBackendName(), + " does not support reduce_scatter_tensor_coalesced")); + } + + virtual c10::intrusive_ptr alltoall_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + std::vector& /* outputSplitSizes */, + std::vector& /* inputSplitSizes */, + const AllToAllOptions& /* opts */ = AllToAllOptions()) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support alltoall_base")); + } + + virtual c10::intrusive_ptr alltoall( + std::vector& /* outputTensors */, + std::vector& /* inputTensors */, + const AllToAllOptions& opts = AllToAllOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support alltoall")); + } + + virtual void monitoredBarrier( + const BarrierOptions& /* unused */, + bool /* unused */ = false) { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not support monitoredBarrier, only GLOO supports monitored barrier.")); + } + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + virtual void setSequenceNumberForGroup() { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not yet support sequence numbers.")); + } + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + virtual uint64_t getSequenceNumberForGroup() { + auto backendName = getBackendName(); + TORCH_CHECK( + false, + c10::str( + "Backend ", + backendName, + " does not yet support sequence numbers.")); + } + + virtual c10::intrusive_ptr send( + std::vector& /* tensors */, + int /* dstRank */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support send")); + } + + virtual c10::intrusive_ptr recv( + std::vector& /* tensors */, + int /* srcRank */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support recv")); + } + + virtual c10::intrusive_ptr recvAnysource( + std::vector& /* tensors */, + int /* tag */) { + TORCH_CHECK( + false, + c10::str( + "Backend ", getBackendName(), " does not support recvAnysource")); + } + + virtual c10::intrusive_ptr barrier( + const BarrierOptions& /* opts */ = BarrierOptions()) { + TORCH_CHECK( + false, + c10::str("Backend ", getBackendName(), " does not support barrier")); + } + + virtual void registerOnCompletionHook( + std::function)>&& hook) { + TORCH_CHECK( + false, + "Only ProcessGrouppNCCL supports onCompletion hook, but got ", + getBackendName(), + " backend."); + } + + virtual void waitForPendingWorks() { + TORCH_CHECK( + false, + "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ", + getBackendName(), + " backend."); + } + + virtual void enableCollectivesTiming() { + TORCH_CHECK( + false, + "Backend ", + getBackendName(), + " is missing implementation of enableCollectivesTiming."); + } + + bool hasHooks() const { + return onCompletionHook_ != nullptr; + } + + // Do not call this directly, use ProcessGroup::setGroupName instead. + void setGroupName(const std::string& name) { + pg_name_ = name; + } + + const std::string& getGroupName() const { + return pg_name_; + } + + // See similar functions in ProcessGroup.hpp for context. + c10::optional getBoundDeviceId() const { + return bound_device_id_; + } + + // Perform an eager connect to the specified device if the backend supports + // it. + virtual void eagerConnectSingleDevice(at::Device device) { + // no-op in the default case; this is an optimization some + // backends may perform + } + + void setBoundDeviceId(c10::optional device) { + if (device) { + TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index"); + } + bound_device_id_ = device; + } + + protected: + // Implementations of this interface need to call this to setup + // appropriate logging etc. + void init(); + + const int rank_; + const int size_; + // Debug level setting. It is parsed once when ProcessGroup is constructed and + // remains the same across use of this process group. + DebugLevel dist_debug_level_; + std::string pg_name_; + + std::function)> onCompletionHook_; + + c10::optional bound_device_id_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FakeProcessGroup.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FakeProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2736e0e3538d8a95191f45ced1e7c0dd83cd33d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FakeProcessGroup.hpp @@ -0,0 +1,186 @@ +#pragma once + +#include + +namespace c10d { + +class FakeWork : public Work { + public: + bool wait(std::chrono::milliseconds timeout) override { + return true; + } + + c10::intrusive_ptr getFuture() override { + auto fut = c10::make_intrusive(c10::NoneType::get()); + fut->markCompleted(); + return fut; + } +}; + +class FakeProcessGroup : public Backend { + public: + FakeProcessGroup(int rank, int size) : Backend(rank, size) {} + + c10::intrusive_ptr broadcast( + std::vector& /* tensors */, + const BroadcastOptions& /* opts */ = BroadcastOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr allreduce( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr allreduce_sparse( + std::vector& /* tensors */, + const AllreduceOptions& /* opts */ = AllreduceOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr allreduce_coalesced( + std::vector& /* tensors */, + const AllreduceCoalescedOptions& /* opts */ = + AllreduceCoalescedOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr reduce( + std::vector& /* tensors */, + const ReduceOptions& /* opts */ = ReduceOptions()) override { + return c10::make_intrusive(); + } + + // NOTE [allgather on FakeProcessGroup] + // Assume each rank have the same input tensor so we just copy to the results + // since it's not a real allgather, we simply make this copying logic to let + // some simple validation works (i.e. calling allgather to see if each rank + // have the same tensor or not). + // + // NOTE: in general it's not good form to try to make FakeProcessGroup work + // with real data, but the reasoning here is that we want FakeProcessGroup to + // work with DeviceMesh's init code that have the data validation, which + // makes it worth the tradeoff. + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& /* opts */ = AllgatherOptions()) override { + for (auto& tensor : outputTensors[0]) { + tensor.copy_(inputTensors[0]); + } + return c10::make_intrusive(); + } + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& /* opts */ = AllgatherOptions()) override { + auto chunks = outputBuffer.chunk(size_); + for (auto& tensor : chunks) { + tensor.copy_(inputBuffer); + } + return c10::make_intrusive(); + } + + c10::intrusive_ptr allgather_coalesced( + std::vector>& /* outputTensorLists */, + std::vector& /* inputTensors */, + const AllgatherOptions& /* opts */ = AllgatherOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& /* opts */ = AllgatherOptions()) override { + for (size_t i = 0; i < outputs.size(); ++i) { + auto chunks = outputs[i].chunk(size_); + for (auto& chunk : chunks) { + chunk.copy_(inputs[i]); + } + } + return c10::make_intrusive(); + } + + c10::intrusive_ptr gather( + std::vector>& /* outputTensors */, + std::vector& /* inputTensors */, + const GatherOptions& /* opts */ = GatherOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ScatterOptions& /* opts */ = ScatterOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr reduce_scatter( + std::vector& /* outputTensors */, + std::vector>& /* inputTensors */, + const ReduceScatterOptions& /* opts */ = + ReduceScatterOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + const ReduceScatterOptions& /* opts */ = + ReduceScatterOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& /* outputs */, + std::vector& /* inputs */, + const ReduceScatterOptions& /* opts */ = + ReduceScatterOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr alltoall_base( + at::Tensor& /* outputBuffer */, + at::Tensor& /* inputBuffer */, + std::vector& /* outputSplitSizes */, + std::vector& /* inputSplitSizes */, + const AllToAllOptions& /* opts */ = AllToAllOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr alltoall( + std::vector& /* outputTensors */, + std::vector& /* inputTensors */, + const AllToAllOptions& opts = AllToAllOptions()) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr send( + std::vector& /* tensors */, + int /* dstRank */, + int /* tag */) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr recv( + std::vector& /* tensors */, + int /* srcRank */, + int /* tag */) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr recvAnysource( + std::vector& /* tensors */, + int /* tag */) override { + return c10::make_intrusive(); + } + + c10::intrusive_ptr barrier( + const BarrierOptions& /* opts */ = BarrierOptions()) override { + return c10::make_intrusive(); + } +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0bb0756e061678945521349daba47da9d0a6b696 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include + +#include +#include + +#include + +namespace c10d { + +class TORCH_API FileStore : public Store { + public: + explicit FileStore(std::string path, int numWorkers); + + ~FileStore() override; + + void set(const std::string& key, const std::vector& value) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + int64_t add(const std::string& key, int64_t value) override; + + int64_t getNumKeys() override; + + bool deleteKey(const std::string& key) override; + + bool check(const std::vector& keys) override; + + void wait(const std::vector& keys) override; + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + // Returns the path used by the FileStore. + const std::string& getPath() const noexcept { + return path_; + } + + protected: + int64_t addHelper(const std::string& key, int64_t i); + + std::string path_; + off_t pos_{0}; + + int numWorkers_; + const std::string cleanupKey_; + const std::string refCountKey_; + const std::string regularPrefix_; + const std::string deletePrefix_; + + std::unordered_map> cache_; + + std::mutex activeFileOpLock_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b22fb1ae8faf3f65a1032c8d6fd2f81931d72cf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace c10d { + +C10_EXPORT void set_thread_isolation_mode(bool enable); + +bool get_thread_isolation_mode(); + +C10_EXPORT void register_process_group( + const std::string& group_name, + c10::intrusive_ptr group); + +C10_EXPORT c10::intrusive_ptr resolve_process_group( + const std::string& group_name); + +C10_EXPORT void unregister_process_group(const std::string& group_name); + +C10_EXPORT void unregister_all_process_groups(); + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b691de302a389ece3dda5a539796c5d080f6073f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp @@ -0,0 +1,61 @@ +#pragma once + +#include + +#include +#include +#include + +#include + +namespace c10d { + +class TORCH_API HashStore : public Store { + public: + ~HashStore() override = default; + + void set(const std::string& key, const std::vector& data) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + void wait(const std::vector& keys) override { + wait(keys, Store::kDefaultTimeout); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + int64_t add(const std::string& key, int64_t value) override; + + int64_t getNumKeys() override; + + bool check(const std::vector& keys) override; + + bool deleteKey(const std::string& key) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + // Returns true if this store support append, multiGet and multiSet + bool hasExtendedApi() const override; + + protected: + std::unordered_map> map_; + std::mutex m_; + std::condition_variable cv_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..15720fcaf140f20cdfcded9d7c36d3df4a543fb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp @@ -0,0 +1,529 @@ +#pragma once + +#ifdef USE_C10D_NCCL + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 14) +#define NCCL_HAS_COMM_NONBLOCKING +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 18) +#define NCCL_HAS_COMM_SPLIT +#endif + +// ncclGetLastError() is enabled only for NCCL versions 2.13+ +// ncclRemoteError only exists in NCCL versions 2.13+ +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 13) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#endif + +// Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort() +// and ncclCommGetAsyncError() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 4) +#define ENABLE_NCCL_ERROR_CHECKING +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_ERROR_CHECKING +#endif + +// P2P is enabled only for NCCL versions 2.7+ since ncclSend() +// and ncclRecv() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 7) +#define ENABLE_NCCL_P2P_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_P2P_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 11) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 17) +#define NCCL_HAS_COMM_CTA_CGA +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_CTA_CGA +#endif + +#if defined(NCCL_REGISTRATION_SUPPORTED) || \ + ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 19))) +#define NCCL_HAS_COMM_REGISTER +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_REGISTER +#endif + +// Macro to throw on a non-successful NCCL return value. +#define C10D_NCCL_CHECK(cmd, failureReason) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } while (0) + +// Macro to throw on a non-successful NCCL return value for NONBLOCKING calls. +#define C10D_NCCL_CHECK_NONBLOCKING(cmd, failureReason) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess && result != ncclInProgress) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } while (0) + +// Macro to throw on a non-successful NCCL return value, non-blocking. +#define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \ + ncclResult_t result = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + while (result == ncclInProgress) { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(result) + "\n" + \ + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comm, &result); \ + } \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +#define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comm, failureReason) \ + ncclResult_t state = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + if (state == ncclInProgress) { \ + do { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + \ + ":" + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(state) + "\n" + \ + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comm->getNcclComm(), &state); \ + } while (state == ncclInProgress); \ + } \ + if (state != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \ + "\n" + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +// Macro to print and abort on a non-successful NCCL return value. +#define C10D_NCCL_ASSERT(cmd) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = ncclGetErrorWithVersion(result); \ + fprintf( \ + stderr, \ + "NCCL error in: %s:%d, %s\n", \ + __FILE__, \ + __LINE__, \ + err.c_str()); \ + abort(); \ + } \ + } while (0) + +namespace c10d { + +TORCH_API size_t hashTensors(const std::vector& tensors); +std::string getNcclVersion(); +std::string ncclGetErrorWithVersion(ncclResult_t error); +bool nccl_use_nonblocking(); +int nccl_nonblocking_timeout(); + +// Provides additional detail into NCCL error codes based on when these are +// thrown in the NCCL codebase. +std::string getNcclErrorDetailStr( + ncclResult_t error, + c10::optional processGroupFailureReason = c10::nullopt); + +// Write NCCL debug info to local disk or any storage users define. +// There are some constrains we set for the debug info writer: +// 1. The writer should only be registered once. +// 2. Once registered, users cannot change it including un-register. +// 3. It is recommended to register the customized writer in the trainer setup, +// If users don't register before calling launchAsyncDebugDump, then users +// lose the chance to register (and the default writer will be +// auto-registered). +class TORCH_API DebugInfoWriter { + public: + virtual ~DebugInfoWriter(); + virtual void write(const std::string& ncclTrace); + static DebugInfoWriter& getWriter(int rank); + static void registerWriter(std::unique_ptr writer); + + protected: + DebugInfoWriter(std::string namePrefix, int rank) { + filename_ = c10::str(namePrefix, rank); + } + std::string filename_; + + private: + static std::unique_ptr writer_; + static std::atomic hasWriterRegistered_; +}; + +// RAII wrapper for NCCL communicator +class NCCLComm { + public: + explicit NCCLComm(ncclComm_t ncclComm) + : ncclComm_(ncclComm), + aborted_(false), + ncclAsyncErr_(ncclSuccess), + commFailureReason_(c10::nullopt), + initialized_(false) {} + + NCCLComm() : NCCLComm(nullptr) {} + + ~NCCLComm() noexcept { + // Add lock in this destructor, as aborted_ needs to be read after memory + // barrier here. + std::unique_lock lock(mutex_); + if (ncclComm_ && !aborted_) { +#ifdef ENABLE_NCCL_ERROR_CHECKING + // Use ncclCommAbort instead of ncclCommDestroy here since + // ncclCommDestroy could block forever waiting for work to complete on + // the communicator. + C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_)); +#else + C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_)); +#endif + } + } + + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId) { + auto comm = std::make_shared(); + C10D_NCCL_CHECK( + ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank), + c10::nullopt); + comm->ncclId_ = commId; + comm->rank_ = rank; + comm->initialized_ = true; + return comm; + } + +#ifdef NCCL_HAS_COMM_NONBLOCKING + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId, + ncclConfig_t& config) { + auto comm = std::make_shared(); + bool isInitialized = false; + if (nccl_use_nonblocking()) { + config.blocking = 0; + LOG(INFO) << "Rank " << rank + << ": creating NCCL communicator in nonblocking mode"; + C10D_NCCL_CHECK_NONBLOCKING( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + c10::nullopt); + } else { + C10D_NCCL_CHECK( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + c10::nullopt); + // under blocking mode, comm is initialized after NCCL CHECK + isInitialized = true; + } + comm->ncclId_ = commId; + comm->rank_ = rank; + comm->initialized_ = isInitialized; + return comm; + } +#endif + +#ifdef NCCL_HAS_COMM_SPLIT + static std::shared_ptr split( + NCCLComm* source, + int color_id, + int rank, + ncclConfig_t& config) { + auto comm = std::make_shared(); + C10D_NCCL_CHECK( + ncclCommSplit( + source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config), + c10::nullopt); + ++source->ncclCommSplitCounter_; + comm->rank_ = rank; + return comm; + } +#endif + +#if defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP) + std::unordered_map ncclCommDump() { + std::unordered_map dump; + if (isAborted()) { + LOG(INFO) << "Communicator was aborted before trying to dump its state."; + return dump; + } + C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), c10::nullopt); + return dump; + } +#endif + + ncclUniqueId getNcclId() { + return ncclId_; + } + + // Must not be copyable + NCCLComm(const NCCLComm&) = delete; + NCCLComm& operator=(const NCCLComm&) = delete; + + // Do not support move assignment as there is no valid use case + NCCLComm& operator=(NCCLComm&& other) = delete; + + // Move constructable + NCCLComm(NCCLComm&& other) { + // Using other's lock, as it reads other's states + // Can not use this.mutex_, as this object is being constructed. + std::unique_lock lock(other.mutex_); + std::swap(ncclComm_, other.ncclComm_); + std::swap(aborted_, other.aborted_); + std::swap(ncclAsyncErr_, other.ncclAsyncErr_); + std::swap(initialized_, other.initialized_); + } + + ncclComm_t getNcclComm(); + + c10::optional getNcclCommFailureReason() const { + std::unique_lock lock(mutex_); + return commFailureReason_; + } + + void ncclCommAbort( + c10::optional commFailureReason = c10::nullopt) { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (aborted_) { + // Should not abort twice. + return; + } + +#ifdef NCCL_HAS_COMM_REGISTER + // Deregister all registered segments before aborting. + for (auto& it : registeredSegmentHandles_) { + void* handle = it.second; + C10D_NCCL_CHECK( + ::ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + " on ncclComm_ ", + ncclComm_)); + } + registeredSegmentHandles_.clear(); +#endif + + // Set true failure reason if provided by ProcessGroupNCCL (e.g. work + // timeout) + commFailureReason_ = commFailureReason; + LOG(INFO) << "Aborting ncclComm_ " << ncclComm_ << " with reason: " + << (commFailureReason ? *commFailureReason + : "No abort reason provided."); +#ifndef NCCL_HAS_COMM_NONBLOCKING + C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_); +#else + C10D_NCCL_CHECK_TIMEOUT( + ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_); +#endif + aborted_ = true; + ncclComm_ = nullptr; + + // Set an appropriate error so that we avoid using the communicator. + if (ncclAsyncErr_ == ncclSuccess) { + ncclAsyncErr_ = ncclSystemError; + } +#else + // This is a NOOP, if error checks are disabled. + return; +#endif + } + + bool isAborted() const { + std::unique_lock lock(mutex_); + return aborted_; + } + + uint64_t getCommSplitCounter() const { + return ncclCommSplitCounter_; + } + + ncclResult_t checkForNcclError() { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (ncclAsyncErr_ != ncclSuccess) { + return ncclAsyncErr_; + } + C10D_NCCL_CHECK( + ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_); + return ncclAsyncErr_; +#else + // Always return success, if error checks are disabled. + return ncclSuccess; +#endif + } + + ncclResult_t registerSegment(void* ptr, size_t size) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + // We register only segments from cache allocator + // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always + // maps to a unique handle and should not be registered before the current + // ptr is deregistered and freed. + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 0, + "Segment with ptr ", + ptr, + " has already been registered on ncclComm_ ", + ncclComm_); + + void* handle; + C10D_NCCL_CHECK( + ncclCommRegister(ncclComm_, ptr, size, &handle), + c10::str( + "Failed to register segment with ptr ", + ptr, + ", size ", + size, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_[ptr] = handle; + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + ncclResult_t deregisterSegment(void* ptr) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 1, + "Segment with ptr ", + ptr, + " is not registered on ncclComm_ ", + ncclComm_); + + void* handle = registeredSegmentHandles_[ptr]; + C10D_NCCL_CHECK( + ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + ", with ptr ", + ptr, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_.erase(ptr); + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + friend class ProcessGroupNCCL; + + protected: + // a helper function to wait until the communicator is initialized; + void waitUntilInitialized(int timeoutSecs); + ncclComm_t ncclComm_; + // Unique nccl_id for this communicator. + ncclUniqueId ncclId_; + bool aborted_; + uint64_t ncclCommSplitCounter_{0}; + ncclResult_t ncclAsyncErr_; + mutable std::mutex mutex_; + // Rank that this communicator corresponds to. + int rank_; + // Optional reason for communicator failure, provided by ProcessGroupNCCL for + // better error messaging. + c10::optional commFailureReason_; + bool initialized_{false}; +#ifdef NCCL_HAS_COMM_REGISTER + // Stores handlers for tensors registered by NCCL + std::unordered_map registeredSegmentHandles_; +#endif +}; + +// Helper that automatically cleans up premul sums. +struct ncclRedOpRAII { + ncclRedOpRAII() = default; + ncclRedOpRAII(ncclRedOp_t op) : op_(op) {} + ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm) + : op_(op), comm_(comm), premul_sum_(true) {} + ncclRedOpRAII(const ncclRedOpRAII&) = delete; + ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete; + ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() { + std::swap(tmp.op_, this->op_); + std::swap(tmp.comm_, this->comm_); + std::swap(tmp.premul_sum_, this->premul_sum_); + } +#if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT) + ~ncclRedOpRAII() { + if (premul_sum_) { + ncclRedOpDestroy(op_, comm_); + } + } +#endif + operator ncclRedOp_t() const { + return op_; + } + ncclRedOp_t op_; + ncclComm_t comm_; + bool premul_sum_ = false; +}; + +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cf153a75fff7e9538d6b94c8406dd0447789f28c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { + +class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase { + public: + ParamCommsDebugInfo() = default; + ParamCommsDebugInfo( + int pgId, + int rank, + std::string&& colName, + int inNelems, + int outNelems, + at::ScalarType dType, + std::vector inSplitSizes, + std::vector outSplitSizes, + int globalRankStart, + int globalRankStride, + int worldSize); + + ~ParamCommsDebugInfo() override = default; + + int getProcessGroupId() const { + return pgId_; + } + + int getRank() const { + return rank_; + } + + int getWorldSize() const { + return worldSize_; + } + + int getGlobalRankStart() const { + return globalRankStart_; + } + + int getGlobalRankStride() const { + return globalRankStride_; + } + + const std::string getColumnName() const { + return columnName_; + } + + int getInMessageNelems() const { + return inMessageNelems_; + } + + int getOutMessageNelems() const { + return outMessageNelems_; + } + + at::ScalarType getDType() const { + return dType_; + } + + const std::vector& getInputSplitSizes() const { + return inputSplitSizes_; + } + + const std::vector& getOutputSplitSizes() const { + return outputSplitSizes_; + } + + const std::vector& getGroupRanks() const { + return groupRanks_; + } + + private: + int pgId_{}; + int rank_{}; + int worldSize_{}; + std::string columnName_; + int inMessageNelems_{}; + int outMessageNelems_{}; + at::ScalarType dType_ = at::kByte; + std::vector inputSplitSizes_; + std::vector outputSplitSizes_; + int globalRankStart_; + int globalRankStride_; + std::vector groupRanks_{}; +}; + +#define RECORD_PARAM_COMMS( \ + seq, \ + pgId, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + pgId, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(seq), \ + pgId, \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION(at::kParamCommsCallName, paramInputs); + +#define RECORD_PARAM_COMMS_DATA( \ + seq, \ + pgId, \ + InputTensors, \ + OutputTensors, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize) \ + auto paramCommsInfo = std::make_shared( \ + pgId, \ + rank, \ + colName, \ + inNelems, \ + outNelems, \ + dType, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize); \ + c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \ + std::initializer_list paramList = { \ + c10::IValue(InputTensors), \ + c10::IValue(seq), \ + pgId, \ + rank, \ + colName, \ + inSplitSizes, \ + outSplitSizes, \ + globalRankStart, \ + globalRankStride, \ + worldSize}; \ + c10::ArrayRef paramInputs(paramList); \ + RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \ + at::kParamCommsCallName, \ + paramInputs, \ + std::vector(1, c10::IValue(OutputTensors))); +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a21b98c2e16fa152af74dd0baf842500886321a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp @@ -0,0 +1,743 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +// ************************************************************************* +// PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN +// versions 1.7 and 1.8. +// PLEASE DO NOT ADD ANY DEPENDENCIES. +// SEE RFC: https://github.com/pytorch/pytorch/issues/39662 +// ************************************************************************* + +constexpr auto kProcessGroupDefaultTimeout = + std::chrono::milliseconds(30 * 60 * 1000); + +namespace c10d { + +// ProcessGroup is a base class that captures collective and point to +// point communication in a fixed set of processes. +// +// The functions specified in the class below describe the API alone; +// implementations are provided in subclasses. +// +// Every function that performs I/O is executed asynchronously by a +// thread pool owned by the ProcessGroup (by default). They return an +// object that can be used to wait for completion or error. +// +// The ProcessGroup can instantiate subgroups with fewer or an equal +// number of members. Implementations must take care that multiple +// process groups can be used in parallel and synchronize accordingly. +// +// The ProcessGroup assumes a fixed set of processes. If the set +// changes, existing instances must be destructed and instantiation +// and initialization must start from scratch. For members of the +// process group to find each other (referred to as rendezvous from +// hereon) +// +class TORCH_API ProcessGroup : public torch::CustomClassHolder { + public: + // ProcessGroup Options is a base struct that defines the basic options + // when constructing a ProcessGroup. Each ProcessGroup subclass should + // extend this struct and define its options if it wants to provide more + // config options (beyond basic ones defined here) to end user. + struct TORCH_API Options : torch::CustomClassHolder { + explicit Options( + std::string backend, + std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout) + : timeout(timeout), backend(std::move(backend)) {} + ~Options() override = default; + + std::chrono::milliseconds timeout; + + // backend name + const std::string backend; + }; + + enum BackendType { + UNDEFINED = 0, + GLOO = 1, + NCCL = 2, + UCC = 3, + MPI = 4, + CUSTOM = 5, + }; + + // Not used, set for backwards compatibility and only used for TypeDef in + // Ops.cpp + explicit ProcessGroup(int rank, int size); + + explicit ProcessGroup( + const c10::intrusive_ptr<::c10d::Store>& store, + int rank, + int size, + c10::intrusive_ptr options); + ~ProcessGroup() override; + + int getRank() const { + return rank_; + } + + int getSize() const { + return size_; + } + + // Returns an unique opaque ID of this process group object. + int64_t getID() const { + return reinterpret_cast(this); + } + + // Returns an unique opaque ID of a backend for the specific backend type + // that can correlate with this process group's collectives. + int64_t getBackendID(BackendType backend_type) const { + return reinterpret_cast(getBackend(backend_type).get()); + } + + virtual const std::string getBackendName() const { + return options_->backend; + }; + + BackendType getBackendType() const { + return backendType_; + }; + + virtual void startCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented startCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + backend->startCoalescing(); + } + + virtual c10::intrusive_ptr endCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented endCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + auto work = backend->endCoalescing(); + return work; + } + + virtual c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::broadcast_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t, + bool, + int64_t)>(); + // It's awakward to unbox the opts here and box them again in the custom C++ + // op. But it's also complicated to make opts as a CustomClassHolder. Leave + // it as it is now. + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.rootTensor, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + const c10::optional& sparse_indices, + int64_t)>(); + + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.sparseIndices, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_coalesced_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.rootRank, + opts.rootTensor, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_", "") + .typed>, + c10::intrusive_ptr>( + const std::vector>&, + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + // Gathers a single tensor inputBuffer into a single buffer outputBuffer that + // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE. + // For implementers of ProcessGroup API and advanced users only. + // Note: this function will be deprecated in near future. + virtual c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_allgather_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + bool, + int64_t)>(); + + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is deprecated and will be moved out of ProcessGroup to comms: + // * do not add dependencies on this function, + // * do not implement it in your ProcessGroup, implement _allgather_base + // instead. + virtual c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_coalesced_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensorLists, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + // This function is a coalesced version of `allgather_into_tensor` (currently + // still named as `_allgather_base`). Each tensor in the vector corresponds to + // an input/output of one `allgather_into_tensor` operation. + virtual c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + virtual c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::gather_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count())); + } + + virtual c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_reduce_scatter_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is a coalesced version of `reduce_scatter_tensor` (currently + // still named as `_reduce_scatter_base`). Each tensor in the vector + // corresponds to an input/output of one `reduce_scatter_tensor` operation. + virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_base_", "") + .typed( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + std::vector, + std::vector, + int64_t)>(); + return op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + outputSplitSizes, + inputSplitSizes, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + virtual void monitoredBarrier( + const BarrierOptions& opts, + bool wait_all_ranks = false) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::monitored_barrier_", "") + .typed&, + const std::vector&, + int64_t, + bool)>(); + // Default to using cpu implementation, monitored barrier is only for GLOO + at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU)); + op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count(), + wait_all_ranks); + } + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + virtual void setSequenceNumberForGroup() { + auto backendType = getBackendType(); + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + getDefaultBackend()->setSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + virtual uint64_t getSequenceNumberForGroup() { + auto backendType = getBackendType(); + + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + return getDefaultBackend()->getSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + virtual c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::send", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + dstRank, + tag); + } + + virtual c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + srcRank, + tag); + } + + virtual c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_any_source_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + tag); + } + + virtual c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) { + static at::Tensor tensor; + // TODO: if nccl was specified then use it + auto device = opts.device; + if (device.has_value()) { + // set device tensor from argument + tensor = at::empty( + {1}, at::TensorOptions().device(device.value()).dtype(at::kByte)); + } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) { + // set cuda tensor + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte)); + } else { + // Default to using cpu implementation + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte)); + } + + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::barrier", "") + .typed( + at::Tensor, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const std::vector&, + int64_t)>(); + + return op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count()); + } + + c10::intrusive_ptr getOptions() { + return options_; + } + + bool hasBackends() { + return !deviceTypeToBackendType_.empty(); + } + + void setBackend( + c10::DeviceType deviceType, + BackendType backendType, + const c10::optional>& backend) { + // TODO: should we add these entries after the backend setting succeeds? + deviceTypeToBackendType_[deviceType] = backendType; + deviceTypes_.insert(deviceType); + // if the backendType is already set then reuse it for this device + if (backendTypeToBackend_.find(backendType) != + backendTypeToBackend_.end()) { + auto existingBackend = backendTypeToBackend_.at(backendType); + deviceTypeToBackend_[deviceType] = existingBackend; + TORCH_CHECK( + existingBackend->getBoundDeviceId() == + (*backend)->getBoundDeviceId()); + } else { + // check if backend has value + if (backend.has_value()) { + deviceTypeToBackend_[deviceType] = backend.value(); + backendTypeToBackend_[backendType] = backend.value(); + (*backend)->setBoundDeviceId(bound_device_id_); + } + } + } + + c10::intrusive_ptr getDefaultBackend() const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(), + "Could not find the default backend type ", + backendType_, + " for Process Group with name ", + getBackendName(), + "."); + return backendTypeToBackend_.at(backendType_); + } + + c10::intrusive_ptr getBackend(c10::DeviceType deviceType); + + c10::intrusive_ptr getBackend(BackendType backendType) const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(), + "Could not find backend type ", + backendType, + "."); + return backendTypeToBackend_.at(backendType); + } + + // Return device types supported by this ProcessGroup. + // Note: the return type is `Device` rather than `DeviceType` for the purpose + // of easy comparison at Python level. The `Device` will have default index + // (-1). + std::vector getDeviceTypes() const { + std::vector devices; + devices.reserve(deviceTypes_.size()); + for (auto& dt : deviceTypes_) { + devices.push_back(c10::Device(dt)); + } + return devices; + } + + void registerOnCompletionHook( + std::function)>&& hook) { + getDefaultBackend()->registerOnCompletionHook(std::move(hook)); + } + + void waitForPendingWorks() { + getDefaultBackend()->waitForPendingWorks(); + } + + bool hasHooks() const { + return getDefaultBackend()->hasHooks(); + } + + const std::string& getGroupName() const; + void setGroupName(const std::string& name); + void enableCollectivesTiming(); + + void release_resources() override; + + // ProcessGroups optionally can be "bound" to a specific device. + // Currently this is only for nccl and allows for some opt-in + // optimizations such as automatic use of ncclCommSplit. The device + // is specified in `init_process_group` and eventually makes it + // here and then down into the actual backend instances. + c10::optional getBoundDeviceId() const { + return bound_device_id_; + } + + void setBoundDeviceId(c10::optional device) { + if (device) { + TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index"); + } + bound_device_id_ = device; + } + + protected: + // Implementations of this interface need to call this to setup + // appropriate logging etc. + void init(); + + c10::intrusive_ptr store_; + const int rank_; + const int size_; + const c10::intrusive_ptr options_; + const BackendType backendType_; + + // Debug level setting. It is parsed once when ProcessGroup is constructed and + // remains the same across use of this process group. + DebugLevel dist_debug_level_; + + // Backend classes for this ProcessGroup + std::unordered_set deviceTypes_; + std::unordered_map deviceTypeToBackendType_; + std::unordered_map> + deviceTypeToBackend_; + std::unordered_map> + backendTypeToBackend_; + + c10::optional bound_device_id_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7dfa6d6121ac0e44a53a5a41476d80acb6d3dde3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -0,0 +1,448 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace c10d { + +constexpr const char* GLOO_BACKEND_NAME = "gloo"; + +// ProcessGroupGloo implements Gloo bindings for c10d. +// +// All functions on this class are expected to be called in the same +// order across processes in the group. This is the only way that we +// can guarantee to match up the same calls across processes. For +// multi-threaded usage of process groups, you can use consider using +// multiple process group instances. +// +// The Gloo algorithms that this class calls into are cached by their +// signature (see description of AlgorithmKey above). This cache works +// as follows: every function call instantiates an AlgorithmKey and +// looks in the cache for existing entries. If there is one, it is +// removed from the cache and returned to the caller. If there are +// none, a new entry is created and returned. If an entry was created +// before, but is still in use, the call will block and wait until the +// entry is returned to the cache. +// +// In the future, we hope to extend this to allow multiple entries per +// key, to enable parallelism for a single key. The number of entries +// per key must always be identical for all processes. This maximum +// number can be automatically tuned, but only if we let a single +// process take charge, and have it broadcast the limits. +// +class TORCH_API ProcessGroupGloo : public Backend { + public: + // AsyncWork is the Gloo specific superclass for asynchronous work items. + // We can split asynchronous work into 3 phases: + // 1) Sanity checks and prepare input (e.g. memcpy) + // 2) Run operation on background thread + // 3) Synchronize with completion on foreground thread + // + // There is state to be shared between these 3 phases and all of this state + // is captured in the AsyncWork class and its derivatives. + // + // Note: while we are porting operations to use new style collectives, there + // is a split between operations using the existing caching approach and + // operations using the new AsyncWork base class. Over time we will port + // all operations and perform needed cleanup. + // + // FIXME: This probably should be called WorkGloo since the work is executed + // in sync mode by a background thread. + class TORCH_API AsyncWork : public Work { + public: + explicit AsyncWork( + std::vector> outputTensors, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + ~AsyncWork() override = default; + + static void execute(c10::intrusive_ptr work); + + virtual void run() = 0; + + std::vector result() override; + + c10::intrusive_ptr getFuture() override; + uint64_t getSequencenumber() const override; + + protected: + friend class ProcessGroupGloo; + + private: + void finishWorkGloo(); + void finishWorkGlooError(std::exception_ptr eptr); + inline void recordAsyncWorkProfilingInfo( + const char* profilingTitle, + const c10::optional>& inputTensors); + + const std::vector> outputTensors_; + c10::intrusive_ptr future_; + std::function recordFunctionBeforeCallback_; + const uint64_t seq_; + }; + + // Wrap c10d store as Gloo store + class TORCH_API GlooStore : public ::gloo::rendezvous::Store { + public: + GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {} + + void setUint(const std::string& key, const std::vector& value) { + store_->set(key, value); + } + + void set(const std::string& key, const std::vector& value) override { + std::vector tmp(value.begin(), value.end()); + store_->set(key, tmp); + } + + std::vector getUint(const std::string& key) { + auto value = store_->get(key); + return value; + } + + std::vector get(const std::string& key) override { + auto value = store_->get(key); + return std::vector(value.begin(), value.end()); + } + + void wait(const std::vector& keys) override { + store_->wait(keys, ::c10d::Store::kDefaultTimeout); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override { + store_->wait(keys, timeout); + } + +#ifdef GLOO_STORE_HAS_STORE_V2 + bool has_v2_support() override { + return store_->hasExtendedApi(); + } + + std::vector> multi_get( + const std::vector& keys) override { + std::vector> res; + for (auto& value : store_->multiGet(keys)) { + res.emplace_back(std::vector(value.begin(), value.end())); + } + return res; + } + + void multi_set( + const std::vector& keys, + const std::vector>& values) override { + std::vector> u_values; + for (auto& value : values) { + u_values.emplace_back(std::vector(value.begin(), value.end())); + } + store_->multiSet(keys, u_values); + } + + void append(const std::string& key, const std::vector& value) + override { + std::vector tmp(value.begin(), value.end()); + return store_->append(key, tmp); + } + + int64_t add(const std::string& key, int64_t value) override { + return store_->add(key, value); + } +#endif + + protected: + c10::intrusive_ptr<::c10d::Store> store_; + }; + + // For send and recv operations there is no need to pass them to the + // thread pool as they are entirely completed by the device thread. + // This work object is used to synchronize completion of the send or + // recv operation. It keeps a reference to the tensor it is + // operating on to prevent it from being deallocated while the + // operation is still in flight. + class TORCH_API SendWork : public Work { + public: + explicit SendWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + uint64_t seq); + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + const uint64_t seq_; + }; + + class TORCH_API RecvWork : public Work { + public: + explicit RecvWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr); + + int sourceRank() const override; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + int srcRank_; + const uint64_t seq_; + }; + + struct TORCH_API Options : public Backend::Options { + explicit Options( + std::chrono::milliseconds timeout = kBackendDefaultTimeout); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + std::chrono::milliseconds timeout = kBackendDefaultTimeout) { + return c10::make_intrusive(timeout); + } + + std::vector> devices; + int threads; + }; + + const std::string getBackendName() const override { + return std::string(GLOO_BACKEND_NAME); + } + + // Helper functions to create a new device object. + // They are static functions on this class to keep them logically + // separate from the rest of the code base (e.g. torch/csrc/distributed). + + // Create new device instance for specific interface. + static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface( + const std::string& interface); + + // Create new device instance for specific hostname or address. + static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname( + const std::string& hostname); + + // Create new device instance. + // It tries to resolve this machine's hostname and bind to that address. + // If that fails (i.e. the hostname doesn't resolve to an address), it + // falls back to binding to the loopback address. + static std::shared_ptr<::gloo::transport::Device> createDefaultDevice(); + + // Create ProcessGroupGloo instance. + static c10::intrusive_ptr createProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + std::chrono::milliseconds timeout); + + explicit ProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + ~ProcessGroupGloo() override; + + c10::intrusive_ptr getOptions() { + return options_; + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& output_tensor, + at::Tensor& input_tensor, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& output_lists, + std::vector& input_list, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputs, + std::vector& inputs, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputs, + std::vector>& inputs, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputs, + std::vector>& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputCounts, + std::vector& inputCounts, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + void enableCollectivesTiming() override; + + const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const { + return store_; + } + + // Similar to barrier(), but blocks rank 0 until all other ranks have + // acknowledged that they are alive (through send/recv from rank 0). Rank 0 + // is able to report all failed ranks if waitAllRanks = true, otherwise + // reports the first rank it detected as failed. + void monitoredBarrier( + const BarrierOptions& opts = BarrierOptions(), + bool waitAllRanks = false) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + int getNumThreads() { + return options_->threads; + } + + protected: + std::unique_ptr<::gloo::rendezvous::Store> store_; + const c10::intrusive_ptr options_; + + // Every Gloo context represents a set of connections to its peers. + // In order to use more than one device (or allow for parallelism on + // a single device), you need multiple contexts. + std::vector> contexts_; + std::vector threads_; + bool stop_; + + // Incremented for every collective we kick off. + // The value is used as tag for collective operations. Collectives are kicked + // off in identical order across processes. Therefore the tag can be used + // to match up operations during concurrent execution. + uint32_t collectiveCounter_; + + // Returns next collective tag to use (uses collectiveCounter_). + uint32_t nextTag(); + + // Returns the context to use for the specified tag. + // With `nextTag` returning an increasing number, this should lead + // to contexts being used in a round-robin fashion. + std::shared_ptr<::gloo::Context> getContext(uint32_t tag); + + // Entrypoint for worker threads. + void runLoop(int workerIndex); + + // Queue work to run on worker thread. + void enqueue(c10::intrusive_ptr work); + + // Keep both a queue of pending work, and a vector with in progress work. + // Both of these can only be mutated when holding the queue lock. + // We keep both around instead of just the queue, so we can grab a weak_ptr + // to all in progress and pending work when executing a barrier. + // When executing a barrier, we need to ensure that all prior work + // has completed before completing itself. + std::deque> workQueue_; + std::vector> workInProgress_; + std::mutex workMutex_; + std::condition_variable workProduceCV_; + std::condition_variable workConsumeCV_; + uint64_t seq_{0}; +}; + +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c21cb1cec88adae7bb11006447c15f5a29c3de77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -0,0 +1,1097 @@ +#pragma once + +#if defined(__linux__) +#include +#include +#include +#include +#endif + +#ifdef USE_C10D_NCCL + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace c10d { + +// Control whether or not wait() is blocking or non-blocking. +static std::vector TORCH_NCCL_BLOCKING_WAIT = { + "TORCH_NCCL_BLOCKING_WAIT", + "NCCL_BLOCKING_WAIT"}; + +// Control whether or not we perform Async Error Handling with NCCL. +static std::vector TORCH_NCCL_ASYNC_ERROR_HANDLING = { + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "NCCL_ASYNC_ERROR_HANDLING"}; + +// Control whether dumping debug info on watchdog +// timeout is enabled. This variable must be set together with +// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0. +static std::vector TORCH_NCCL_DUMP_ON_TIMEOUT = { + "TORCH_NCCL_DUMP_ON_TIMEOUT"}; + +// Control whether Desync Debug is enabled. This variable must be set +// together with TORCH_NCCL_ASYNC_ERROR_HANDLING. +static std::vector TORCH_NCCL_DESYNC_DEBUG = { + "TORCH_NCCL_DESYNC_DEBUG", + "NCCL_DESYNC_DEBUG"}; + +// Enable recording start-events for all ProcessGroupNCCL collectives, and +// compute accurate collective timing per-collective. (Note: end-events are +// recorded by default. Turn on this flag can increase chances of a watchdog +// hang due to performing a CUDA event query which eventually calls +// cudaEventElapsedTime() API. +static std::vector TORCH_NCCL_ENABLE_TIMING = { + "TORCH_NCCL_ENABLE_TIMING", + "NCCL_ENABLE_TIMING"}; + +// Enable monitoring thread which aborts the process when the ProcessGroupNCCL +// Watchdog thread gets stuck and no heartbeat is detected after +// TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC. This can happen due to calling CUDA/NCCL +// APIs that may hang. It is Useful to prevent jobs being stuck for a prolonged +// time than necessary tying up cluster resources. +static std::vector TORCH_NCCL_ENABLE_MONITORING = { + "TORCH_NCCL_ENABLE_MONITORING"}; + +// Control the watchdog heartbeat timeout period after which the monitoring +// thread will abort the process. +static std::vector TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = { + "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"}; + +// The maximum number of events we store in the flight recorder's ring buffer. +// (One event could be the start or end of a collective, for example). +static std::vector TORCH_NCCL_TRACE_BUFFER_SIZE = { + "TORCH_NCCL_TRACE_BUFFER_SIZE"}; + +// Control how much extra time we will wait for dumping the debugging info +// before we exit and throws timeout exception. +static std::vector TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC = { + "TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC"}; + +// Control the interval inside the watchdog thread to check the coordinated +// signal from other ranks, e.g. to dump the debugging information. +static std::vector TORCH_NCCL_COORD_CHECK_MILSEC = { + "TORCH_NCCL_COORD_CHECK_MILSEC"}; + +// Whether to abort the communicators when users call destroy_process_group(). +// If yes, communicators will be aborted when destroy_process_group is called, +// but not in destructor. +static std::vector TORCH_NCCL_ABORT_IN_DESTROY_PG = { + "TORCH_NCCL_ABORT_IN_DESTROY_PG"}; + +constexpr const char* NCCL_BACKEND_NAME = "nccl"; + +constexpr const char* TIMEOUT_DUMP = "timeout_dump"; + +constexpr const int kWorkStatusUpdatePeriodMs = 10 * 1000; // 10 seconds + +constexpr auto kProcessGroupNCCLDefaultTimeout = + std::chrono::milliseconds(10 * 60 * 1000); + +// NoHandling: do not handle asynchronous NCCL errors +// TearDown: tear down process upon error, see `WorkNCCL::handleException` +// CleanUpOnly: just clean up collectives and abort communicators without +// tearing down process SkipCleanUp: (this is a temporary option and can be +// removed in future) tear down process without cleaning up NCCL communicators. +// This should be used as a last resort in case `ncclCommAbort` itself is +// hanging +enum ErrorHandlingMode { + NoHandling = 0, + TearDown = 1, + CleanUpOnly = 2, + SkipCleanUp = 3 +}; + +#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp) + +#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly) + +#define PRINT_COLLECTIVE_HASH_SIGNATURE(phase, opType, numel, hashValue) \ + LOG(WARNING) << logPrefix() << "Hash of " << phase << " to NCCL " << opType \ + << " with size " << numel << " is " << hashValue; + +// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure +// caching allocator safety for tensors used on both user-facing and +// internal comm streams. +// Instead, it stashes live references to those tensors until after +// user-facing streams are synced with comm streams. +// See stashed_for_allocator_safety_ below. +static std::vector TORCH_NCCL_AVOID_RECORD_STREAMS = { + "TORCH_NCCL_AVOID_RECORD_STREAMS"}; + +// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache +// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL +// can register/deregister the tensor on all available NCCL communicators. +static std::vector TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK = + {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK", + "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"}; + +#if defined(__linux__) +struct DumpPipe { + DumpPipe(int rank) { + std::string fileStem = + getCvarString({"TORCH_NCCL_DEBUG_INFO_PIPE_FILE"}, ""); + if (fileStem.empty() || + getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0) <= 0) { + return; + } + TORCH_CHECK(!fileStem.empty(), "TORCH_NCCL_DEBUG_INFO_TEMP_FILE is empty"); + std::string filename = c10::str(fileStem, rank, ".pipe"); + TORCH_CHECK( + unlink(filename.c_str()) != -1 || errno == ENOENT, + "Error removing existing named pipe ", + filename); + TORCH_CHECK( + mkfifo(filename.c_str(), 0666) != -1, + "Error creating named pipe ", + filename); + fd_ = open(filename.c_str(), O_RDONLY | O_NONBLOCK); + LOG(INFO) << "Pipe file " << filename + << " has been opened, write to it to trigger NCCL Debug Dump."; + TORCH_CHECK(fd_ != -1, "Error opening named pipe ", filename); + } + bool shouldDump() { + if (fd_ == -1) { + return false; + } + char buf[128]; + // non-blocking from O_NONBLOCK above. + // Ignore EINTR because we already will poll this + // again later. + ssize_t bytesRead = read(fd_, &buf, 128); + return bytesRead > 0; + } + ~DumpPipe() { + if (fd_ != -1) { + close(fd_); + } + } + + private: + int fd_ = -1; +}; +#else +struct DumpPipe { + DumpPipe(int rank) {} + bool shouldDump() { + return false; + } +}; +#endif + +// ProcessGroupNCCL implements NCCL bindings for c10d. +// +// All functions of the class are expected to be called in the same order +// across all processes in the process group. This is the only way that we +// can guarantee to match up the same calls among all processes. +// +// All NCCL functions provided by this class are asynchronous functions. More +// specifically, each NCCL call is scheduled on a separate CUDA stream that is +// different from the current CUDA stream. This is for the purpose of +// achieving potentially concurrency and better performance. As a result, +// it is the callers' responsibility to make sure that the CUDA stream their +// code works on needs to wait for the NCCL operation from +// this class. +// +// This can be done by calling: +// +// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same +// functionality and are synonyms. +// +// Also note that WorkNCCL::finishedGPUExecution() is a helper function only +// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has +// finished execution on the GPU (not just scheduled). +// +// Example on using the NCCL process group +// +// ProcessGroupNCCL pg(store, rank, size); +// std::shared_ptr work = pg.allreduce(tensors); +// +// // At this point, NCCL kernel has already by queued successfully +// // Now, let current stream wait for the NCCL to finish, this function is +// // async operation as well +// +// work->wait() +// +// // Now continue on other work in the current stream. +class TORCH_API ProcessGroupNCCL : public Backend { + public: + class WorkNCCL : public Work, public std::enable_shared_from_this { + public: + friend struct WorkInfo; + + // Constructor takes a list of CUDA devices + WorkNCCL( + at::Device& device, + int rank, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const c10::optional>& inputs = c10::nullopt, + bool desyncDebug = false, + bool enableTiming = false, + DebugLevel distDebugLevel = DebugLevel::Off); + // Copy constructor doing partial copy without outputs_. Cleanup thread + // monitors and removes finished works. However it will deadlock when + // destructs outputs_ tensors who are view tensors in autograd graph. + WorkNCCL(const WorkNCCL& w); + + ~WorkNCCL() override; + + // Checks if the NCCL kernel has started to execute. + bool isStarted(); + + // Checks if request has completed. In this specific case of NCCL, it checks + // if the NCCL operation has completed on the GPU in its own NCCL stream. + // Non-blocking operation. + bool isCompleted() override; + + bool isSuccess() const override; + + // Same as calling synchronize() for NCCL work. + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + // Let current stream wait on the completing of the NCCL work + // Throws on exceptions. Blocking operation, which will wait for work + // completion. + void synchronize() override; + + // Synchronize streams by blocking each on the NCCL stream + void synchronizeStream(); + + // Helper function to handle exception (throw if needed). + void handleException(ErrorHandlingMode asyncErrorHandling); + + // Helper function that checks if the NCCL kernels have finished + // execution on the GPUs + bool finishedGPUExecution(); + + // Get a Future object that will be marked as completed internally. + c10::intrusive_ptr getFuture() override; + + float getDuration() const override; + + uint64_t getSequencenumber() const override; + + const std::string& logPrefix() const; + + // Helper function that sets an exception_ptr on the WorkNCCL object. + void setException(std::exception_ptr exception_ptr); + + // Helper function that returns True if the WorkNCCL object has timed out + // and False otherwise. + // In case of timeout, set exception on the WorkNCCL object. + bool checkTimeout( + c10::optional timeout = c10::nullopt); + + std::vector result() override; + + protected: + // The cached list of CUDA devices to operate on + at::Device device_; + + // The start CUDA event of NCCL operator tracking this work item. These + // start CUDA events are needed by desync debugging if enabled. + std::shared_ptr ncclStartEvent_; + + // The end CUDA event of NCCL operator tracking this work item. + std::shared_ptr ncclEndEvent_; + + // The NCCL communicator used for this work item. + std::shared_ptr ncclComm_; + + // Tensors used for barrier op + at::Tensor barrierTensor_; + + // Clone of blockingWait_ from ProcessGroupNCCL. + bool blockingWait_ = false; + + // Clone of avoidRecordStreams_ from ProcessGroupNCCL. + bool avoidRecordStreams_ = false; + + // Clone of opTimeout_ from ProcessGroupNCCL. + std::chrono::milliseconds opTimeout_; + + // Time point representing when the work started. + std::chrono::time_point workStartTime_; + + // Record the collective sequential number. + uint64_t seq_; + + // Indicates if the nccl start event has been updated to the store trace. + // This will be used by desync debug. + bool startTraceUpdated_{false}; + + // Record collective sizes for debug. We only record the size on the first + // device as multi-device per process is deprecated + size_t numelIn_ = -1; + size_t numelOut_ = -1; + + // Wrapper method for the static checkForNCCLErrors which can be overridden + // for tests. + virtual std::exception_ptr checkForNCCLErrors(); + + friend std::ostream& operator<<( + std::ostream& output, + const WorkNCCL& workNCCL); + + private: + // Helper function for synchronize + void synchronizeInternal(std::chrono::milliseconds timeout); + + // Checks for NCCL errors and sets an appropriate exception_ptr. + void checkAndSetException(); + + // Just checks whether GPU execution has started, without modifying + // exception_ptr. + bool startedGPUExecutionInternal() const; + + // Just checks whether GPU execution has completed, without modifying + // exception_ptr. + bool finishedGPUExecutionInternal() const; + + // Reference to the store so that we can write aborted communicators + // to the store. + c10::intrusive_ptr store_; + + // Store a reference to NCCL collective's outputs, used by result and to + // give a more descriptive message when representing the Work as a string. + std::shared_ptr> outputs_; + + // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper. + // Stores references to participating non-output tensors (ie inputs, + // flattened intermediates). + // We'll clear this list in synchronizeStream, just after user-facing + // stream(s) are synced with the nccl work stream(s). + // By keeping these refs (as well as outputs_) alive until after the + // collective's work rejoins the user-facing streams, we achieve + // caching allocator safety without any recordStream calls. + // For in-place collectives, some refs stashed here may alias outputs_, + // but that doesn't do any harm. + std::shared_ptr> stashed_for_allocator_safety_; + + // The future returned by getFuture. + c10::intrusive_ptr future_; + + bool timingEnabled_; + // unique id used to tell the trace buffer that this + // work has completed + c10::optional trace_id_; + DebugLevel distDebugLevel_; + friend class ProcessGroupNCCL; + }; + + struct Options : Backend::Options { + // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for + // operations. This is only used when blockingWait_ is enabled. + explicit Options(bool is_high_priority_stream = false); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + bool is_high_priority_stream = false) { + return c10::make_intrusive(is_high_priority_stream); + } + + // Schedule NCCL operations on high priority CUDA streams + bool is_high_priority_stream; + +#ifdef NCCL_HAS_COMM_NONBLOCKING + // Configure ranks + ncclConfig_t config = NCCL_CONFIG_INITIALIZER; +#endif + + // Optional "parent" backend and color to create communicators from + // via `ncclCommSplit` + std::shared_ptr split_from; + int64_t split_color{0}; + std::vector global_ranks_in_group; + }; + + // If you wish to create multiple process groups, each with a potentially + // different rank and size, you can do so by passing a new store instance + // to each one. If you have only a single store object, you can + // use the `c10d::PrefixStore` to derive scoped instances. + // This is also what the Python API in torch.distributed does. + // + // The process group instance keeps a reference to the store because + // it may be used long after the constructor runs. In fact, the constructor + // doesn't create any NCCL communicators. A single NCCL communicator can + // only be used on a specific set of devices, and are therefore created + // on-demand when a collective runs. If another collective is executed later, + // against a different set of devices, the process group creates another NCCL + // communicator. These NCCL communicators are cached and reused if possible. + // + ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + // This constructor includes the deprecated `groupName` argument. + // If you have existing code that uses the `groupName`, you can replace + // it by specifying a `c10d::PrefixStore(groupName, store)` for store. + C10_DEPRECATED ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + const std::string& groupName, + c10::intrusive_ptr options = Options::create()) + : ProcessGroupNCCL(store, rank, size, options) {} + + ~ProcessGroupNCCL() override; + + uint64_t getUid() { + return static_cast(uid_); + } + + c10::intrusive_ptr getOptions() { + return options_; + } + + const std::string getBackendName() const override { + return std::string(NCCL_BACKEND_NAME); + } + + bool supportsSplitting() const override { + return true; + } + + void startCoalescing() override; + + c10::intrusive_ptr endCoalescing() override; + + // For specifying a composite optype, such as ALLGATHER and REDUCE_SCATTER + c10::intrusive_ptr endCoalescing(OpType optype); + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr _broadcast_oop( + at::Tensor& outputTensors, + at::Tensor& inputTensors, + const BroadcastOptions& opts = BroadcastOptions()); + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_oop( + at::Tensor& outputTensors, + at::Tensor& inputTensors, + const ReduceOptions& opts = ReduceOptions()); + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputbuffer, + at::Tensor& inputbuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + void groupStart(); + + void groupEnd(); + + void groupEndNonblocking(std::shared_ptr comm); + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + // Unsupported Ops + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + // Return the total number of splits the communicators held by this process + // group have performed. + uint64_t getCommSplitCounter() const; + + void registerOnCompletionHook( + std::function)>&& hook) override; + void waitForPendingWorks() override; + + void enableCollectivesTiming() override; + + // Helper function for iteratively aborting communicators in the provided map + void abortCommsFromMap( + std::unordered_map>& ncclCommsMap, + c10::optional abortReason); + + c10::intrusive_ptr initIntraNodeComm(); + + // Provides an API to abort the ProcessGroup (similar to ncclCommAbort) + // instead of relying on ProcessGroupNCCL destructor. + // return true if abort is successful, otherwise false + bool abort(c10::optional abortReason = c10::nullopt); + + void shutdown(c10::optional reason = c10::nullopt); + + void eagerConnectSingleDevice(at::Device device) override; + + void performNocolorSplit(at::Device device); + + protected: + // Helper that broadcasts nccl unique ID to all ranks through the store + void broadcastUniqueNCCLID( + ncclUniqueId* ncclID, + bool isSingleP2POp, + const std::string& devicesKey, + int p2pRank); + + // Helper that either looks up the cached NCCL communicators or creates + // a new set of NCCL communicators as a cache entry + std::shared_ptr getNCCLComm( + const std::string& deviceKey, + at::Device& device, + OpType opType, + int p2pRank = 0, + bool isSendRecvSelf = false); + + // Wrapper method which can be overridden for tests. + virtual std::exception_ptr checkForNCCLErrors( + std::shared_ptr& ncclComm); + + // Ensure thaht if record is True, the work obj will be enqueued via + // workEnqueue + virtual c10::intrusive_ptr initWork( + at::Device& device, + int rank, + OpType opType, + const char* profilingTitle = nullptr, + const std::vector& inputs = {}, + const std::vector& outputs = {}, + bool record = false); + + // In the timeout case and we will dump debug info such as the NCCL flight + // recorder to storage. Down the road, if we have more complicated or blocking + // operations, we might need to use a side thread to do it. + bool dumpDebuggingInfo(); + + private: + int globalRankStart; + int globalRankStride; + + // Helper that encapsulates work shared across all collective communication + // primitives. The callbacks have the following signatures: + // + // ncclResult_t fn(at::Tensor& input, at::Tensor& output, + // ncclComm_t, at::cuda::CUDAStream&); + // void {pre,post}(std::vector); + template + c10::intrusive_ptr collective( + at::Tensor& input, + at::Tensor& output, + Fn fn, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + template + c10::intrusive_ptr collective( + at::Tensor& input, + at::Tensor& output, + Fn fn, + PreProcess pre, + PostProcess post, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + template + c10::intrusive_ptr collectiveCoalesced( + std::vector& input, + std::vector& output, + Fn fn, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + // Helper that encapsulates work shared across point-to-point communication + // primitives. It is the same structure as the helper used for collective + // communication primitives. + template + c10::intrusive_ptr pointToPoint( + at::Tensor& tensor, + Fn fn, + int peer, + OpType opType, + const char* profilingTitle = nullptr); + + template + c10::intrusive_ptr pointToPoint( + at::Tensor& tensor, + Fn fn, + int peer, + OpType opType, + PreProcess pre, + PostProcess post, + const char* profilingTitle); + + c10::intrusive_ptr allreduce_impl( + at::Tensor& tensor, + const AllreduceOptions& opts = AllreduceOptions()); + + // Checks for NCCL errors on each of the communicators and returns an + // appropriate exception_ptr (nullptr if no errors). + static std::exception_ptr checkForNCCLErrorsInternal( + std::shared_ptr& ncclComm); + + // Function that runs as part of a separate thread and checks for errors on + // NCCL communicators. We need a separate thread to check for NCCL errors + // since we can't rely on the user calling certain methods like wait(), + // isCompleted() etc. to detect and remediate errors. In addition to this, we + // need a mechanism to safely abort and remove NCCL communicators from our + // cache. This can be done cleanly by having a thread for the ProcessGroupNCCL + // class. Attempting to modify the communicator cache from the WorkNCCL class + // might run into issues with object lifetime since the ProcessGroupNCCL + // object might get destroyed before the WorkNCCL object. + void ncclCommWatchdog(); + + // Return the CUDA device most likely associated with this backend. + // If we aren't bound to a specific device, there is no strict + // guarantee that this heuristic is the correct assignment of ranks + // to GPUs that Python layers use, but in practice it tends to be. + // Fortunately we don't rely on this for correctness of any tensor + // operations, just for ancillary uses like barriers. + at::Device guessDeviceForRank() const; + + // Destroys initialized NCCL communicators in devNCCLComMap_ given by input + // key. Throws if there are no communicators to destroy. Also removes + // communicators from the cache and clears used device indices. + void destroyNCCLComms(const std::string& devNCCLCommMapKey); + + // Watchdog's inside loop. + // Takes care of cleaning up completed work, and aborting upon failure or + // timeout. + void watchdogHandler(); + + void runHookLoop(); + + // Desync debug helper + void logWorkStart(WorkNCCL& work); + + // Desync debug helper + void logWorkEnd(WorkNCCL& work); + + // Generates a prefix that is unique to this process group and rank, for + // disambiguating logs + std::string createLogPrefix() const; + + // Returns the unique prefix created in createLogPrefix + const std::string& logPrefix() const; + + // Returns the global rank of the device. This function assumes that users + // always create a default global process group(PG) which includes all + // devices. It is called in the constructor of ProcessGroupNCCL, so it always + // return the rank_ of the the very first PG created, aka, default global PG. + const int& globalRank() const; + + // Returns the global ranks of a PG. + const std::vector& groupRanks() const; + + protected: + // Function that runs as part of a separate thread aside from watchdog + // thread because we need to check the heartbeat from watchdog thread + // so that when we get stuck in some NCCL/CUDA calls, + // we can dump the debugging information and abort the process. + virtual void heartbeatMonitor(); + + // Function that directly trigger std::abort so that the whole process + // gets terminated. + virtual void terminateProcess(std::string errMsg); + + // A helper function to wait for a future to complete or timeout. + void waitForFutureOrTimeout( + std::future& fut, + const std::chrono::milliseconds& timeOutMilSec, + const std::string& futDescription, + bool throwException = false); + + // When watchdog timeout, this function will be called and return debug info + // for users. For now we only get information from retrieveDesyncReport. + // We are working on enabling more useful debug information for watchdog + // timeout. + virtual std::string getNCCLWatchdogDebugInfo(); + + static const int64_t kWatchdogThreadSleepMillis; + + // The store is used to broadcast the NCCL unique ID of rank 0. This store + // comes with prefix and it is different across ProcessGroup NCCL instances + // (aka, different ProcessGroups). + c10::intrusive_ptr store_; + + // Reference to the store without prefix so that keys are same across all + // ProcessGroup NCCL instances and (key, value) pairs written to the store are + // global. + c10::intrusive_ptr globalStore_; + + bool storeError_{false}; + + const c10::intrusive_ptr options_; + + // The number of NCCL communicators that have been created during + // the lifetime of this process group. This sequence number is + // used to scope keys used in the store. + uint64_t ncclCommCounter_{0}; + + // The store keys to trace the last NCCL collective kernel CUDA events - start + // event and end event respectively. These are used to do desync root cause + // analysis. + const std::string traceKeyStart_; + const std::string traceKeyEnd_; + + // The NCCL communicator that the process group has cached. + // + // For collective operations: + // The key is a list of GPU devices that an operation is operating on + // The GPU devices are stored in a device sequence and the cache NCCL + // communicator is associated with this GPU device sequence + // + // e.g. If the process group op only uses device 0, then the value of + // the used device string stored (value of the hashmap) would be "0". + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately, + // then the value of the used device string (key) stored would be + // "0,1,2,3,4,5,6,7" + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately, + // then the value of the used device string stored would be + // "0,4,5,6,7,1,2,3" + // + // Note that the order of the device for the tensor list matters. + // + // For point-to-point operations: + // The key is a string of my current rank and the peer process rank. + // e.g. If process 1 and process 2 are involved in a point-to-point + // communication, the key will be "1:2" on both processes. Note: this is for + // the scenario where there is only 1 GPU per process. When it comes to + // multiple GPUs per process, this part may need to redesigned. + std::unordered_map> devNCCLCommMap_; + + // The NCCL communicators currently in process of being initialized. + std::unordered_map> + inInitializationCommMap_; + + // Map from ncclUniqueId to appropriate communicator. + std::unordered_map> ncclIdToCommMap_; + + // Mutex to guard maps like devNCCLCommMap_ and ncclIdToCommMap_. + std::mutex mutex_; + + // Heartbeat of watchdog thread. + std::atomic_uint64_t heartbeat_; + + // The time interval used for deciding whether there is no watchdog heartbeat. + int heartbeatTimeoutInSec_; + + // timeout for the dump to finish. + int waitTimeoutDumpInMilSec_; + + // Interval of check coordinated signals in ProcessGroupNCCL from other ranks + // e.g., trigger the dump of the debugging info for timeout when notified. + int coordCheckIntervalMilSec_; + + // Size of ring buffer where we store NCCL Traces for debugging. + int ncclTraceBufferSize_; + + // We gate the heartbeat monitor thread so that we can roll it out gradually. + std::atomic monitorThreadEnabled_; + + // Monitor thread which checks the heartbeat of Watchdog thread. + // If the monitor thread finds there is no heartbeat, it will dump debug info + // and then kill the watchdog thread to avoid hang. + std::thread ncclHeartbeatMonitorThread_; + + // Watchdog thread which looks for errors on the cached NCCL communicators. + std::thread ncclCommWatchdogThread_; + + std::thread onCompletionHookThread_; + + // Whether or not we should terminate the watchdog and workCleanup threads. + std::atomic terminateProcessGroup_; + + // Whether or not we should terminate the heartbeat monitoring threads. + std::atomic terminateHeartbeatMonitorThread_; + + // Whether we are in the shutdown mode when we are trying to get debug info, + // such as desync report. + std::atomic collectiveDebugInfoMode_; + + // Whether there are hooks pending to be fired + std::atomic hasPendingHooks_; + + // This is the signal from watchdog threads to indicate whether the monitor + // thread should dump. Making it static so that it is accessiable from all the + // PGs. With this flag, monitor thread would dump debug info under any one of + // the 3 conditions: 1: this flag is set to true by the watchdog thread when + // it detects a timeout. 2: timeout signal is received from + // other ranks through tcpstore 3: no heartbeat of watchdog Note that only the + // monitor thread from PG0 should dump the debug info and only once + static std::atomic shouldDump_; + + // Mutex to Guard workMetaList_ + std::mutex workMetaListMutex_; + + // Mutex to Guard monitorWakeUpCV_ + std::mutex monitorMutex_; + + bool writeDebugInfo_ = false; + + // Condition Variable for watchdog thread sleep + std::condition_variable workMetaListCV_; + + // Condition Variable for monitor thread to wake up early + std::condition_variable monitorWakeUpCV_; + + // Vector to Store WorkNCCL pointers + std::list workMetaList_; + + std::chrono::time_point lastWorkListUpdateTime_; + + // Mutex to Guard workMetaList_ + std::mutex completedWorkListMutex_; + + // Condition Variable for watchdog thread sleep + std::condition_variable completedWorkListCV_; + + std::list completedWorkList_; + + // Add Work Pointer to workVector + void workEnqueue(c10::intrusive_ptr); + + // The CUDA streams used by NCCL kernels + std::unordered_map ncclStreams_; + + // The CUDA events used to sync NCCL streams + std::unordered_map ncclEvents_; + + // Device Indexes used for all collectives in this group + std::set usedDeviceIdxs_; + + // Flag to denote if a coalescing groupStart/groupEnd block is active + int coalescing_state_ = 0; + + // Stores device indexes for all collectives run inside a coalescing block + std::vector coalescedDevices_; + + // Stores communicators for all collectives run inside a coalescing block + std::vector> coalescedComms_; + + // map from the key: "group name + pg counter (ID)" to the + // unique NCCL ID count. This needs to be group and pg specific + // + // For each process group, we need a uniform unique NCCL ID counter to ensure + // that NCCL operation in this process group can be completed successfully. + // Since each process group ID belongs to a group name, the key to this map + // is a combination of group name and ProcessGroupNCCL ID. + static std::unordered_map pgUniqueNCCLIDCnt_; + + // map from group name to the pg counter (ID) within that group + // + // For each group with the "group name" (which is the key), we need to + // keep track of a unique process group ID when creating a new + // ProcessGroupNCCL for this "group name". Therefore, the value of this + // map keeps the unique ProcessGroupNCCL's ID for a specific group with + // the "group name". The reason we need a per-group process group ID counter + // is that different group can have different ranks and we need ensure that + // each group has its own uniform process group ID for all its ranks. + static std::unordered_map processGroupCounterMap_; + + // Whether or not wait() and synchronize() are blocking operations that wait + // for the operation to complete. + bool blockingWait_ = false; + + // Whether to abort the communicators when users call destroy_process_group(). + // If yes, communicators will be aborted when destroy_process_group is called, + // but not in destructor. + bool abortInDestroyProcessGroup_ = false; + + // Whether or not to hook the cache allocator to register all allocated + // tensors + bool useTensorRegisterAllocatorHook_ = false; + + // Whether or not the workCleanupThread is used to perform async error + // handling. + ErrorHandlingMode asyncErrorHandling_ = NoHandling; + + // Whether or not to enable timeout root cause analysis. + bool desyncDebug_; + + // Whether or not to dump debug info on timeout + bool dumpOnTimeout_; + + // Whether or not to create start CUDAEvent and enable timing for start + // and end events. Note that enableTiming_ is always true if desyncDebug_ + // is set to true. + std::atomic enableTiming_; + + // Flag to enable the print of hash value of input/output of collectives for + // verification. + std::atomic enableCollecticeHashDebug_; + + // Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set + bool avoidRecordStreams_ = false; + + // Set of communicators that this process group has aborted and their + // ncclUniqueId has been written to the store. We don't need a lock + // for this map since only the watchdog thread accesses this set. The + // set contains the string representation of ncclUniqueId. + std::unordered_set abortedComms_; + + // The number of active ncclGroupStart() calls. This counter will be increased + // by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd() + // is called. + static thread_local uint64_t ncclActiveGroupCounter_; + + // Counting for the sequential number of NCCL collective call. + // (specifically, how many actual kernels we launched, which differs from + // op_id_ when coalescing is enabled) + uint64_t seq_{0}; + + // Incrementing counter for logical operations (collective or p2p) issued on + // the ProcessGroup + uint64_t op_id_{0}; + + // the sequential number of the last colletive enqueued into workMetaList_ + // This is useful for indentifying a rank that has not join a collective + uint64_t lastEnqueuedSeq_; + + // the sequential number of the last colletive completed marked by + // the watchdog thread + uint64_t lastCompletedSeq_; + + std::exception_ptr watchDogException_ = nullptr; + + size_t uid_; + + std::string logPrefix_; + + c10::intrusive_ptr intraNodeComm_; +}; + +TORCH_API std::string dump_nccl_trace(); + +// Gets a mutable reference to a global optional function. Heartbeat Monitor +// will query this function and if available, call it to dump traces. Inside +// fbcode, we store a function here that uses an internal tool for process +// tracing +TORCH_API c10::optional>& get_cpp_trace_dumper(); + +// Similar to get_cpp_trace_dumper, this stores a function defined in +// torch-python layer that lets us check whether the GIL can be acquired, +// helpful for instrumenting in cases where a hang was observed. +typedef bool (*gil_checker_t)(); + +TORCH_API gil_checker_t& get_gil_checker(); +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp new file mode 100644 index 0000000000000000000000000000000000000000..8255bceebd6cf2c0d2d4c2b98e0396c1020a3d6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp @@ -0,0 +1,113 @@ +#pragma once + +#include + +#include + +namespace c10d { + +constexpr const char* ROUND_ROBIN_BACKEND_NAME = "round_robin"; + +// ProcessGroupRoundRobin implements simple load balancing. +// +// It is constructed with multiple processes groups. Each call is dispatched to +// one of the specified process groups in a round robin fashion. Each process +// group instance must have the same rank and size. +// +// All functions of the class are expected to be called in the same order +// across all processes in the process group. This is the only way that we +// can guarantee to match up the same calls among all processes. +// +class TORCH_API ProcessGroupRoundRobin final : public ProcessGroup { + public: + explicit ProcessGroupRoundRobin( + int rank, + int size, + std::vector> processGroups); + + ~ProcessGroupRoundRobin() override; + + const std::string getBackendName() const override { + return std::string(ROUND_ROBIN_BACKEND_NAME); + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputs, + std::vector& inputs, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputs, + std::vector>& inputs, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputs, + std::vector>& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + private: + std::vector> processGroups_; + std::vector>::const_iterator iterator_; + + // Returns the next ProcessGroup to use. + const c10::intrusive_ptr& next(); +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp new file mode 100644 index 0000000000000000000000000000000000000000..13503ca3f5a9fadd13ec537b52c64b62d2d1ac33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp @@ -0,0 +1,140 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include +#include +#include + +namespace c10d { + +class TORCH_API ProcessGroupWrapper : public Backend { + public: + explicit ProcessGroupWrapper( + c10::intrusive_ptr backend, + c10::intrusive_ptr glooBackend); + + const std::string getBackendName() const override; + + c10::intrusive_ptr broadcast( + std::vector& data, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& data, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + // This function is deprecated and will be moved out of ProcessGroup to comms: + // * do not add dependencies on this function, + // * do not implement it in your ProcessGroup, implement _allgather_base + // instead. + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + void monitoredBarrier(const BarrierOptions& opts, bool waitAllRanks = false) + override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + // dont implement this + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; // just call underlying + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const ReduceScatterOptions& opts) override; + + void startCoalescing() override; + + c10::intrusive_ptr endCoalescing() override; + + c10::intrusive_ptr getWrappedPg() const; + + private: + // Underlying process group that actual application collectives will be + // dispatched to + c10::intrusive_ptr backend_; + // Gloo process group responsible for internal coordination such as monitored + // barrier, sequence number checking, collective fingerprint collecting. + c10::intrusive_ptr glooBackend_; + // Conducts several checks to ensure that the underlying collective is well + // formed with the goal of notifying the user about incorrect collective use + // in the application. + void runCollectiveChecks( + OpType op_type, + const std::vector& tensors); +}; +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d10f0a6986929949cd08e1ce11b9d51769c4bdc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp @@ -0,0 +1,219 @@ +#pragma once + +#include +#include +#include + +namespace c10d { + +// PyProcessGroup is a pybind11 trampoline class to allow a Python +// class to inherit from torch.distributed.ProcessGroup +class PyProcessGroup : public ProcessGroup { + public: + // PyWork is a pybind11 trampoline class to allow a Python + // class to inherit from torch.distributed.Work + class PyWork : public Work { + public: + PyWork() = default; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override { + PYBIND11_OVERRIDE( + bool, /* Return type */ + Work, /* Parent class */ + wait, /* Name of function in C++ */ + timeout); + } + + c10::intrusive_ptr getFuture() override { + // We cannot use PYBIND11_OVERRIDE because: + // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and + // 2. The python name is get_future + pybind11::gil_scoped_acquire gil; + auto override = + pybind11::get_override(static_cast(this), "get_future"); + + if (override) { + py::object o = override(); + auto futWrapper = + o.cast>(); + return futWrapper->fut; + } + + return Work::getFuture(); + } + }; + + using ProcessGroup::ProcessGroup; + + const std::string getBackendName() const override { + PYBIND11_OVERRIDE_PURE( + std::string, /* Return type */ + ProcessGroup, /* Parent class */ + getBackendName, /* Name of function in C++ */ + ); + } + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allgather, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allgather_into_tensor_coalesced, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allreduce, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allreduce_coalesced, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + barrier, /* Name of function in C++ */ + opts); + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + broadcast, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + reduce_scatter, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + reduce_scatter_tensor_coalesced, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + send, /* Name of function in C++ */ + tensors, + dstRank, + tag); + } + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + recv, /* Name of function in C++ */ + tensors, + srcRank, + tag); + } +}; + +class TORCH_PYTHON_API PythonOnCompletionHook { + public: + // Wraps a py::object hook and acquires Python GIL in dtor before + // destructing the hook object. + PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {} + + ~PythonOnCompletionHook() { + py::gil_scoped_acquire ag; + hook_.dec_ref(); + // Explicitly set hook_ to nullptr to prevent py::object's dtor + // to decref on the PyObject again. + // See Note [Destructing py::object] in python_ivalue.h + hook_.ptr() = nullptr; + } + + void operator()(std::shared_ptr workInfo) const { + std::exception_ptr eptr; + { + py::gil_scoped_acquire acquire; + try { + hook_(workInfo); + } catch (py::error_already_set& e) { + // py::error_already_set requires GIL to destruct, take + // special care. + eptr = std::make_exception_ptr(std::runtime_error(e.what())); + e.restore(); + PyErr_Clear(); + } catch (std::exception& e) { + eptr = std::current_exception(); + } + } + // No more Python-related stuff at this point, i.e., this + // exception can be captured and handled by PG backend. + if (eptr) + std::rethrow_exception(eptr); + } + + private: + py::object hook_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b3a649659af4c40b675f837e897443b7b3df6f01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp @@ -0,0 +1,73 @@ + +#pragma once + +#include + +#include + +namespace c10d { + +// `RankLocal` maintains a unique instance of T for each non-autograd thread. +// For non-autograd threads, `RankLocal::get()` functions similar to +// thread_local. For autograd threads, `RankLocal::get()` returns the +// instance of T corresponding to the enqueuing non-autograd thread. The +// mechanism allows for rank-specific context shared between forward and +// backward. It works for both the one-rank-per-process and one-rank-per-thread +// scenarios. +// +// NOTE: RankLocal doesn't make the underlying objects thread-safe. +template +class RankLocal { + public: + RankLocal(const RankLocal&) = delete; + RankLocal& operator=(const RankLocal&) = delete; + + static T& get() { + // Fast path: non-autograd threads can simply return + // the object reference cached in TLS. + if (cached_ != nullptr) { + return *cached_; + } + const auto node = torch::autograd::get_current_node(); + auto fwd_thread_id = node == nullptr ? at::RecordFunction::currentThreadId() + : node->thread_id(); + // Optimistically acquire the read lock first, since most likely we are in + // an autograd thread and the object has already been constructed. + { + std::shared_lock read_lock(lock_); + auto it = thread_id_to_rank_local_.find(fwd_thread_id); + if (it != thread_id_to_rank_local_.end()) { + // Cache for non-autograd threads + if (node == nullptr) { + cached_ = &it->second; + } + return it->second; + } + } + + std::unique_lock write_lock(lock_); + auto [it, _] = thread_id_to_rank_local_.try_emplace(fwd_thread_id); + // Cache for non-autograd threads + if (node == nullptr) { + cached_ = &it->second; + } + return it->second; + } + + private: + RankLocal(){}; + thread_local static T* cached_; + static std::unordered_map thread_id_to_rank_local_; + static std::shared_mutex lock_; +}; + +template +thread_local T* RankLocal::cached_ = nullptr; + +template +std::unordered_map RankLocal::thread_id_to_rank_local_; + +template +std::shared_mutex RankLocal::lock_; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3c0ae960ff7ca74aefd5a17037565e95d1bf76a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace c10d { + +// callback function will be given arguments (optional oldValue, +// optional newValue) +using WatchKeyCallback = + std::function, c10::optional)>; + +class TORCH_API Store : public torch::CustomClassHolder { + public: + static constexpr std::chrono::milliseconds kDefaultTimeout = + std::chrono::seconds(300); + static constexpr std::chrono::milliseconds kNoTimeout = + std::chrono::milliseconds::zero(); + + Store() : timeout_(kDefaultTimeout) {} + + explicit Store(const std::chrono::milliseconds& timeout) + : timeout_(timeout) {} + + Store(const Store&) = default; + Store(Store&&) noexcept = default; + + ~Store() override = default; + + void set(const std::string& key, const std::string& value); + + virtual void set( + const std::string& key, + const std::vector& value) = 0; + + std::string compareSet( + const std::string& key, + const std::string& currentValue, + const std::string& newValue); + + virtual std::vector compareSet( + const std::string& key, + const std::vector& currentValue, + const std::vector& newValue) { + TORCH_INTERNAL_ASSERT(false, "Not implemented."); + } + + std::string get_to_str(const std::string& key); + + virtual std::vector get(const std::string& key) = 0; + + virtual int64_t add(const std::string& key, int64_t value) = 0; + + virtual bool deleteKey(const std::string& key) = 0; + + virtual bool check(const std::vector& keys) = 0; + + virtual int64_t getNumKeys() = 0; + + virtual void wait(const std::vector& keys) = 0; + + virtual void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) = 0; + + virtual const std::chrono::milliseconds& getTimeout() const noexcept; + + virtual void setTimeout(const std::chrono::milliseconds& timeout); + + // watchKey() is deprecated and no longer supported. + virtual void watchKey( + const std::string& /* unused */, + WatchKeyCallback /* unused */) { + TORCH_CHECK(false, "watchKey is deprecated, no implementation support it."); + } + + virtual void append( + const std::string& key, + const std::vector& value); + + virtual std::vector> multiGet( + const std::vector& keys); + + virtual void multiSet( + const std::vector& keys, + const std::vector>& values); + + // Returns true if this store support append, multiGet and multiSet + virtual bool hasExtendedApi() const; + + protected: + std::chrono::milliseconds timeout_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3919b494376be8f333b989292f3de9cf46fa124f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp @@ -0,0 +1,164 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10d { +namespace detail { + +class TCPServer; + +class TCPClient; + +struct SocketAddress { + std::string host{}; + std::uint16_t port{}; +}; + +class Counter { + public: + void update(double val); + std::unordered_map observe() const; + + double mean() const noexcept { + return mean_; + } + int64_t count() const noexcept { + return count_; + } + double variance() const noexcept { + return m2_ / count_; + } + double sample_variance() const noexcept { + return m2_ / (count_ - 1); + } + + private: + int64_t count_ = 0; + double mean_ = 0; + double m2_ = 0; +}; + +} // namespace detail + +struct TCPStoreOptions { + static constexpr std::uint16_t kDefaultPort = 29500; + + std::uint16_t port = kDefaultPort; + bool isServer = false; + c10::optional numWorkers = c10::nullopt; + bool waitWorkers = true; + std::chrono::milliseconds timeout = Store::kDefaultTimeout; + + // A boolean value indicating whether multiple store instances can be + // initialized with the same host:port pair. + bool multiTenant = false; + + // If specified, and if isServer is true, the underlying TCPServer will take + // over the bound socket associated to this fd. This option is useful to avoid + // port assignment races in certain scenarios. + c10::optional masterListenFd = c10::nullopt; + + // A boolean value indicating whether to use the experimental libUV backend. + bool useLibUV = false; +}; + +class TORCH_API TCPStore : public Store { + public: + explicit TCPStore(std::string host, const TCPStoreOptions& opts = {}); + + [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore( + const std::string& masterAddr, + std::uint16_t masterPort, + c10::optional numWorkers = c10::nullopt, + bool isServer = false, + const std::chrono::milliseconds& timeout = kDefaultTimeout, + bool waitWorkers = true); + + ~TCPStore() override; + + void set(const std::string& key, const std::vector& value) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + int64_t add(const std::string& key, int64_t value) override; + + bool deleteKey(const std::string& key) override; + + bool check(const std::vector& keys) override; + + int64_t getNumKeys() override; + + void wait(const std::vector& keys) override; + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + bool hasExtendedApi() const override; + + // Waits for all workers to join. + void waitForWorkers(); + + // Returns the hostname used by the TCPStore. + const std::string& getHost() const noexcept { + return addr_.host; + } + + // Returns the port used by the TCPStore. + std::uint16_t getPort() const noexcept { + return addr_.port; + } + + std::unordered_map> + collectClientCounters() const noexcept; + + bool isLibUvBackend() const noexcept { + return usingLibUv_; + } + + // note(xilunwu): this function is only for internal testing + void _splitSet(const std::string& key, const std::vector& data); + + private: + int64_t incrementValueBy(const std::string& key, int64_t delta); + + void validate(void); + + std::vector doGet(const std::string& key); + + void doWait( + c10::ArrayRef keys, + std::chrono::milliseconds timeout); + + detail::SocketAddress addr_; + std::shared_ptr server_; + std::unique_ptr client_; + c10::optional numWorkers_; + + const std::string initKey_ = "init/"; + const std::string keyPrefix_ = "/"; + std::mutex activeOpLock_; + std::unordered_map clientCounters_; + bool usingLibUv_ = false; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..51532a68635d1918f98d6bee12bf6386236e1073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h @@ -0,0 +1,723 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +namespace c10d { + +/* Trace Utils Related to TORCH_NCCL_DESYNC_DEBUG */ + +inline std::string getTraceStartKey(const std::string& pgName, int rank) { + return pgName + "_" + std::to_string(rank) + "_trace_start"; +} + +inline std::string getTraceEndKey(const std::string& pgName, int rank) { + return pgName + "_" + std::to_string(rank) + "_trace_end"; +} + +inline bool traceUpdate( + c10::intrusive_ptr& store, + const std::string& key, + uint64_t seq, + const std::string& col) { + std::vector value(col.size() + sizeof(seq) + 1); + memcpy(value.data(), &seq, sizeof(seq)); + memcpy(value.data() + sizeof(seq), col.data(), col.size()); + try { + store->set(key, value); + return true; + } catch (...) { + LOG(ERROR) << "Store is down while updating #" << seq << " with key " + << key; + return false; + } + return true; +} + +enum TraceDebugEvent { + kEventStart, + kEventEnd, +}; +// >> +using TraceMap = + std::map>>; + +inline std::string ranksToString(const std::vector& ranks) { + std::string str; + for (int rank : ranks) { + if (str.empty()) { + str = std::to_string(rank); + } else { + str += ", " + std::to_string(rank); + } + } + return str; +} + +inline std::string ranksFromTrace( + const std::vector>& items) { + std::string ranks; + for (auto& p : items) { + if (ranks.empty()) { + ranks = std::to_string(p.first); + } else { + ranks += ", " + std::to_string(p.first); + } + } + return ranks; +} + +inline std::string analyzeMissingRanks(const std::vector& missingRanks) { + return c10::str( + "\n\t - To our best knowledge, ranks [", + ranksToString(missingRanks), + "] are the lagging ranks that caused this timeout. " + "They never joined any collectives"); +} + +inline std::string analyzeLaggingRanks(const TraceMap& traceMap) { + uint64_t lagSeq = traceMap.begin()->first; + std::vector startRanks; + std::vector endRanks; + for (auto& p : traceMap.begin()->second) { + if (p.second.second == kEventStart) { + startRanks.push_back(p.first); + } else { + endRanks.push_back(p.first); + } + } + std::string report = + "\n\t - To our best knowledge, the lagging/dead/mismatched ranks " + "that caused the desync are:"; + if (startRanks.size()) { + report += c10::str( + "\n\t - [", + ranksToString(startRanks), + "] joined but didn't finish collective #", + lagSeq, + " (count from 1)"); + } + if (endRanks.size()) { + report += c10::str( + "\n\t [", + ranksToString(endRanks), + "] finished collective #", + lagSeq, + ", but didn't join collective #", + lagSeq + 1, + " (count from 1)"); + } + return report; +} + +inline std::string dumpSnapshot(TraceMap& traceMap) { + std::string report = "\n\t - Snapshot of ranks' latest states:"; + for (auto& tracePair : traceMap) { + uint64_t seq = tracePair.first; + std::map>& subMap = + tracePair.second; + + std::unordered_map> collectivesStart; + std::unordered_map> collectivesEnd; + for (auto& p : subMap) { + int rank = p.first; + const std::string& col = p.second.first; + if (p.second.second == kEventStart) { + collectivesStart[col].push_back(rank); + } else { + collectivesEnd[col].push_back(rank); + } + } + + if (collectivesStart.size()) { + report += c10::str("\n\t #", seq, " started ranks:"); + for (auto& mapPair : collectivesStart) { + report += c10::str( + "\n\t [", + ranksToString(mapPair.second), + "] started ", + mapPair.first); + } + } + if (collectivesEnd.size()) { + report += c10::str("\n\t #", seq, " finished ranks:"); + for (auto& mapPair : collectivesEnd) { + report += c10::str( + "\n\t [", + ranksToString(mapPair.second), + "] finished ", + mapPair.first); + } + } + } + return report; +} + +inline bool parseTraceValue( + c10::intrusive_ptr& store, + const std::string& key, + uint64_t& seq, + std::string& col) { + try { + std::vector traceValue = store->get(key); + memcpy(&seq, traceValue.data(), sizeof(seq)); + std::string colName((char*)traceValue.data() + sizeof(seq)); + col = colName; + return true; + } catch (...) { + LOG(ERROR) << "Store is down while getting key " << key; + return false; + } + return true; +} + +inline std::string retrieveDesyncReport( + c10::intrusive_ptr& store, + const std::string& pgName, + int myRank, + int worldSize) { + std::string report; + + uint64_t thisSeq; + std::string thisCol; + + std::vector missingRanks; + TraceMap traceMap; + + for (const auto rank : c10::irange(worldSize)) { + // Build traceMapStart. + uint64_t seqStart; + { + std::string traceKeyStart = getTraceStartKey(pgName, rank); + if (!store->check({traceKeyStart})) { + missingRanks.push_back(rank); + continue; + } + std::string col; + if (!parseTraceValue(store, traceKeyStart, seqStart, col)) { + return report; + } + traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart)); + if (rank == myRank) { + thisSeq = seqStart; + thisCol = std::move(col); + } + } + + // Build traceMapEnd. + { + std::string traceKeyEnd = getTraceEndKey(pgName, rank); + if (!store->check({traceKeyEnd})) { + continue; + } + uint64_t seq; + std::string col; + if (!parseTraceValue(store, traceKeyEnd, seq, col)) { + return report; + } + if (seq == seqStart) { + traceMap[seq][rank].second = kEventEnd; + } + } + } + + TORCH_INTERNAL_ASSERT( + !missingRanks.empty() || !traceMap.empty(), + "Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG"); + TORCH_INTERNAL_ASSERT( + !thisCol.empty(), + "Timeout rank [", + myRank, + "] must have collective tracking iteam in c10::Store trace"); + TORCH_INTERNAL_ASSERT( + traceMap[thisSeq][myRank].second == kEventStart, + "Timeout rank [", + myRank, + "] last trace item must be kEventStart. thisSeq = ", + thisSeq, + ", col = ", + thisCol); + + report += c10::str( + "\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq); + + if (!missingRanks.empty()) { + report += analyzeMissingRanks(missingRanks); + } else { + report += analyzeLaggingRanks(traceMap); + report += dumpSnapshot(traceMap); + } + + return report; +} + +/* Trace Utils Related to Flight Recorder */ + +/* Note: this is only used by PGNCCL (could be generalized in an ideal world but + * wasn't done that way, so isn't expected to be fully general at the moment) */ + +#ifdef USE_C10D_NCCL + +/* Helper used by work::getDuration() and nccl flight recorder */ +float getDurationFromEvent( + at::cuda::CUDAEvent& ncclStartEvent, + at::cuda::CUDAEvent& ncclEndEvent) { + TORCH_CHECK( + ncclEndEvent.query(), + "getDuration can only be called after work is succeeded.") + return ncclStartEvent.elapsed_time(ncclEndEvent); +} + +DebugInfoWriter::~DebugInfoWriter() = default; + +void DebugInfoWriter::write(const std::string& ncclTrace) { + // Open a file for writing. The ios::binary flag is used to write data as + // binary. + std::ofstream file(filename_, std::ios::binary); + + // Check if the file was opened successfully. + if (!file.is_open()) { + LOG(ERROR) << "Error opening file for writing NCCLPG debug info: " + << filename_; + return; + } + + file.write(ncclTrace.data(), ncclTrace.size()); + LOG(INFO) << "Finished writing NCCLPG debug info to " << filename_; +} + +DebugInfoWriter& DebugInfoWriter::getWriter(int rank) { + if (writer_ == nullptr) { + std::string fileNamePrefix = getCvarString( + {"TORCH_NCCL_DEBUG_INFO_TEMP_FILE"}, "/tmp/nccl_trace_rank_"); + // Using std::unique_ptr here to auto-delete the writer object + // when the pointer itself is destroyed. + std::unique_ptr writerPtr( + new DebugInfoWriter(fileNamePrefix, rank)); + DebugInfoWriter::registerWriter(std::move(writerPtr)); + } + return *writer_; +} + +void DebugInfoWriter::registerWriter(std::unique_ptr writer) { + TORCH_CHECK_WITH( + DistBackendError, + hasWriterRegistered_.load() == false, + "debugInfoWriter already registered"); + hasWriterRegistered_.store(true); + writer_ = std::move(writer); +} + +std::unique_ptr DebugInfoWriter::writer_ = nullptr; +std::atomic DebugInfoWriter::hasWriterRegistered_(false); + +inline std::string pickle_str(const c10::IValue& v) { + std::vector result; + { + auto writer = [&](const char* data, size_t size) { + result.insert(result.end(), data, data + size); + }; + torch::jit::Pickler pickler( + writer, nullptr, nullptr, nullptr, nullptr, false); + pickler.protocol(); + pickler.pushIValue(v); + pickler.stop(); + } + return std::string(result.begin(), result.end()); +} + +inline std::string get_python_cpp_trace() { + // usage: + // LOG(INFO) << "stacktrace: " + // << get_python_cpp_trace(); + // warn: might be slow in getting cpp traces + // because of slow/broken addr2line + // in different system libs + std::shared_ptr tb = + torch::CapturedTraceback::gather( + /*python=*/true, /*script=*/true, /*cpp=*/true); + torch::SymbolizedTracebacks s_tbs = torch::symbolize({tb.get()}); + const auto& s_tb = s_tbs.tracebacks.at(0); + std::stringstream oss; + for (auto idx : c10::irange(s_tb.size())) { + auto frame_id = s_tb[idx]; + const auto& frame = s_tbs.all_frames.at(frame_id); + oss << "#" << idx << " " << frame.funcname << " from " << frame.filename + << ":" << frame.lineno << std::endl; + } + return oss.str(); +} + +inline c10::Dict new_dict() { + return c10::Dict( + c10::AnyType::get(), c10::AnyType::get()); +} + +inline c10::List new_list() { + return c10::List(c10::AnyType::get()); +} + +struct NCCLTraceBuffer { + static NCCLTraceBuffer* get() { + // intentionally leak on exit + // because this will hold python state that may get destructed + static NCCLTraceBuffer* instance = new NCCLTraceBuffer(); + return instance; + } + NCCLTraceBuffer() { + max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0); + capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false); + enabled_ = max_entries_ > 0; + pg_id_to_ranks_ = {}; + } + using Event = at::cuda::CUDAEvent; + struct Entry { + size_t id_; // incremented id in the trace buffer + // used to figure out where in the circular entries + // buffer this entry will be located to + // update state information + size_t pg_id_; + + // Both seq_id_ and op_id_ are per_pg incrementing counters + // seq_id refers to actual kernel launches (e.g. 1 per coalesced group) + // op_id refers to logical operations (e.g. one per op inside coalesced + // group) + size_t seq_id_; + size_t op_id_; + std::string profiling_name_; + + std::shared_ptr traceback_; + // we borrow pointers to start_ and end_ so we can query the state + // on reporting. However, once the event is completed, the call + // to `complete` will clear these. + Event *start_, *end_; + + // timestamp when the entry was created, likely close to the time the work + // was 'enqueued'- not necessarily started + c10::time_t time_created_; + c10::optional duration_; + + // timestamp when our CPU threads discovered that the kernel started. + // will always be _after_ it actually started, and can be very late + // if the watchdog thread got stuck on CUDA APIs. + c10::optional time_discovered_started_; + + // timestamp when our CPU threads discovered that the kernel completed. + // will always be _after_ it actually complated, and can be the same time + // as the discovery of the start if the watchdog thread is stuck on CUDA + // APIs + c10::optional time_discovered_completed_; + + // size information for input/output tensors + c10::SmallVector input_dims_; + c10::SmallVector output_dims_; + c10::SmallVector sizes_; // flattened from inputs, outputs + bool retired_ = false; // is this work entry no longer in the workMetaList_? + // a retired but not completed event has timed out + }; + + bool enabled_ = false; + bool capture_cpp_stack_ = false; + std::mutex mutex_; + std::vector entries_; + size_t max_entries_ = 0; + size_t next_ = 0; + size_t id_ = 0; + std::map> pg_id_to_ranks_; + + c10::optional record( + size_t pg_id, + size_t seq_id, + size_t op_id, + std::string profiling_name, + const std::vector& inputs, + const std::vector& outputs, + Event* start, + Event* end) { + if (!enabled_) { + return c10::nullopt; + } + auto traceback = + torch::CapturedTraceback::gather(true, true, capture_cpp_stack_); + std::lock_guard guard(mutex_); + + auto te = Entry{ + id_, + pg_id, + seq_id, + op_id, + std::move(profiling_name), + std::move(traceback), + std::move(start), + std::move(end), + c10::getTime()}; + + for (const auto& input : inputs) { + c10::IntArrayRef sizes = input.sizes(); + te.input_dims_.push_back(sizes.size()); + te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end()); + } + + for (const auto& output : outputs) { + c10::IntArrayRef sizes = output.sizes(); + te.output_dims_.push_back(sizes.size()); + te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end()); + } + + if (entries_.size() < max_entries_) { + entries_.emplace_back(std::move(te)); + } else { + entries_[next_++] = std::move(te); + if (next_ == max_entries_) { + next_ = 0; + } + } + return id_++; + } + + void record_pg_ranks(size_t pg_id, std::vector ranks) { + if (!enabled_) { + return; + } + std::lock_guard guard(mutex_); + pg_id_to_ranks_[pg_id] = ranks; + } + + void update_state(Entry& r) { + if (r.start_ != nullptr) { + bool started = r.start_->query(); + if (started && !r.time_discovered_started_) { + r.time_discovered_started_ = c10::getTime(); + } + } + if (r.end_ != nullptr) { + bool completed = r.end_->query(); + if (completed && !r.time_discovered_completed_) { + r.time_discovered_completed_ = c10::getTime(); + } + } + } + + std::vector dump_entries() { + std::lock_guard guard(mutex_); + std::vector result; + result.reserve(entries_.size()); + result.insert(result.end(), entries_.begin() + next_, entries_.end()); + result.insert(result.end(), entries_.begin(), entries_.begin() + next_); + // query any remaining events + for (auto& r : result) { + update_state(r); + r.start_ = r.end_ = nullptr; + } + return result; + } + + /* + Mark an Event as completed and free its events. + + This is called by the watchdog thread, and is asynchronous from the + perspective of the main thread. + + compute_duration defaults to true since retire_id is only called in the + watchdog thread, which is currently a place we call cuda APIs which may hang, + but care should be taken to avoid computing duration in any function that must + never hang. (timing must also be enabled for compute_duration - see + TORCH_NCCL_ENABLE_TIMING). + */ + void retire_id(c10::optional id, bool compute_duration = true) { + if (!enabled_ || !id) { + return; + } + + bool can_compute_duration = false; + Event* startEvent = nullptr; + Event* endEvent = nullptr; + c10::optional duration = c10::nullopt; + + std::unique_lock guard(mutex_); + + Entry* entry = &entries_.at(*id % max_entries_); + if (entry->id_ == *id) { + update_state(*entry); + + if (compute_duration) { + can_compute_duration = entry->time_discovered_completed_.has_value() && + entry->start_ && entry->end_; + startEvent = entry->start_; + endEvent = entry->end_; + } + } + + if (can_compute_duration) { + // Compute duration without without holding the lock, because + // cudaEventDuration() can hang, and we need to acquire the lock before we + // can dump(), which we never want to block. + guard.unlock(); + duration = getDurationFromEvent(*startEvent, *endEvent); + guard.lock(); + + // Refresh the entry pointer, see if the entry has been overwritten + entry = &entries_.at(*id % max_entries_); + if (entry->id_ != *id) { + LOG(INFO) + << "retire_id abandoned for id " << *id + << ", event was overwritten while waiting to compute duration."; + return; + } + if (duration.has_value()) { + entry->duration_ = duration.value(); + } + } + + entry->retired_ = true; + entry->start_ = entry->end_ = nullptr; + } + + std::string dump( + const c10::optional>>& ncclDumpMap) { + auto result = dump_entries(); + auto entries = new_list(); + c10::IValue entries_key = "entries"; + c10::IValue nccl_comm_key = "nccl_comm_state"; + c10::IValue version_key = "version"; + // Update whenever changing contents or formatting of the dump + // (minor when adding fields, major when changing existing fields) + c10::IValue version_val = "1.4"; + c10::IValue pg_config_key = "pg_config"; + c10::IValue record_id_key = "record_id"; + c10::IValue pg_id_key = "pg_id"; + c10::IValue seq_id_key = "seq_id"; + c10::IValue op_id_key = "op_id"; + c10::IValue profiling_name_key = "profiling_name"; + c10::IValue input_sizes_key = "input_sizes"; + c10::IValue output_sizes_key = "output_sizes"; + c10::IValue time_created_key = "time_created_ns"; + c10::IValue duration_key = "duration_ms"; + + c10::IValue frames_key = "frames"; + c10::IValue state_key = "state"; + c10::IValue line_key = "line"; + c10::IValue name_key = "name"; + c10::IValue filename_key = "filename"; + c10::IValue retired_key = "retired"; + c10::IValue time_discovered_started_key = "time_discovered_started_ns"; + c10::IValue time_discovered_completed_key = "time_discovered_completed_ns"; + + std::vector tracebacks; + for (auto& e : result) { + tracebacks.push_back(e.traceback_.get()); + } + torch::SymbolizedTracebacks stracebacks = torch::symbolize(tracebacks); + std::vector all_frames; + for (const auto& f : stracebacks.all_frames) { + auto d = new_dict(); + d.insert(name_key, f.funcname); + d.insert(filename_key, f.filename); + d.insert(line_key, int64_t(f.lineno)); + all_frames.emplace_back(std::move(d)); + } + + for (auto i : c10::irange(result.size())) { + auto& e = result.at(i); + auto& tb = stracebacks.tracebacks.at(i); + auto dict = new_dict(); + dict.insert(record_id_key, int64_t(e.id_)); + dict.insert(pg_id_key, int64_t(e.pg_id_)); + dict.insert(seq_id_key, int64_t(e.seq_id_)); + dict.insert(op_id_key, int64_t(e.op_id_)); + dict.insert(profiling_name_key, e.profiling_name_); + dict.insert(time_created_key, int64_t(e.time_created_)); + if (e.duration_) { + dict.insert(duration_key, *e.duration_); + } + + auto it = e.sizes_.begin(); + auto read_sizes = [&](const c10::SmallVector& dims) { + auto sizes = new_list(); + for (auto dim : dims) { + auto arg_sizes = new_list(); + for (auto i : c10::irange(dim)) { + (void)i; + arg_sizes.push_back(*it++); + } + sizes.push_back(arg_sizes); + } + return sizes; + }; + + dict.insert(input_sizes_key, read_sizes(e.input_dims_)); + dict.insert(output_sizes_key, read_sizes(e.output_dims_)); + if (e.time_discovered_completed_.has_value()) { + dict.insert(state_key, "completed"); + } else if (e.time_discovered_started_.has_value()) { + dict.insert(state_key, "started"); + } else { + dict.insert(state_key, "scheduled"); + } + + dict.insert( + time_discovered_started_key, + e.time_discovered_started_.has_value() + ? int64_t(*e.time_discovered_started_) + : c10::IValue()); + dict.insert( + time_discovered_completed_key, + e.time_discovered_completed_.has_value() + ? int64_t(*e.time_discovered_completed_) + : c10::IValue()); + dict.insert(retired_key, e.retired_); + + auto frames = new_list(); + for (int64_t frame : tb) { + frames.push_back(all_frames.at(frame)); + } + dict.insert(frames_key, frames); + entries.push_back(dict); + } + auto pg_config = new_dict(); + for (const auto& [pg_id, ranks] : pg_id_to_ranks_) { + auto pg_ranks = new_list(); + for (const auto& rank : ranks) { + pg_ranks.push_back(static_cast(rank)); + } + pg_config.insert(static_cast(pg_id), pg_ranks); + } + + // convert ncclDumpMap into a dictionary + auto per_comm_dict = new_dict(); + if (ncclDumpMap.has_value()) { + for (const auto& [ncclId, ncclDump] : ncclDumpMap.value()) { + auto inner_dict = new_dict(); + for (const auto& [key, value] : ncclDump) { + inner_dict.insert(key, value); + } + per_comm_dict.insert(ncclId, inner_dict); + } + } + + auto dict = new_dict(); + dict.insert(entries_key, entries); + dict.insert(version_key, version_val); + if (per_comm_dict.size() > 0) { + dict.insert(nccl_comm_key, per_comm_dict); + } + dict.insert(pg_config_key, pg_config); + + return pickle_str(dict); + } +}; + +#endif +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp new file mode 100644 index 0000000000000000000000000000000000000000..953cec8a1bc36e8550b026e49ef4d2b4fab76e75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp @@ -0,0 +1,58 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include + +namespace c10d { + +#define RECORD_COMMS_TRACE( \ + _comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \ + do { \ + if (torch_ucc_config.enable_comms_logger) { \ + _comms_tracer->recordComms( \ + opTypeToString(_opType), \ + (uintptr_t)_work.get(), \ + _rank, \ + _comm_size, \ + _inTensors, \ + _outTensors); \ + } \ + } while (0) + +// interfaces to collect communication traces +class TORCH_API CommTraceLogger : public torch::CustomClassHolder { + private: + std::vector comms_trace_; + std::vector curBlocks_; /* unused */ + std::vector curOutSplitSizes_; + std::vector curInSplitSizes_; + int curRoot_ = -1; + unsigned long seqnum = 0; + + public: + void setCurBlock(const std::string& name); /* unused */ + void popBlock(); /* unused */ + // record root info if applicable, e.g., broadcast, gather, scatter + void recordOptionalInfo(int root = -1); + // record input/output splits of Alltoallv + void recordOptionalInfo( + const std::vector& outputSplitSizes = {}, + const std::vector& inputSplitSizes = {}); + // record essential comms information + void recordComms( + const std::string& collName, + const uintptr_t workReq = 0, + const int rank = -1, + const int world_size = -1, + const std::vector& inputTensors = {}, + const std::vector& outputTensor = {}); + // return collected comms traces + std::vector& getCommsTrace() { + return comms_trace_; + } +}; + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a44e2de86ef7dc2477d59cbf221f477b00cc8370 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp @@ -0,0 +1,187 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include +#include +#include + +namespace c10d { + +// Macro to generate the error message on a non-successful UCC return value. +#define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \ + do { \ + _err = c10::str( \ + "[", \ + std::string(__FILE__), \ + ":", \ + std::to_string(__LINE__), \ + "] ", \ + logger->getLogPrefix(), \ + _error_msg, \ + ", error code ", \ + _result, \ + ": ", \ + ucc_status_string(_result), \ + ", system error code ", \ + errno); \ + } while (0) + +// Macro to throw on a non-successful UCC return value. +#define TORCH_UCC_CHECK(_cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macro and throw on a non-successful UCC return value and free its request. +#define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \ + do { \ + ucc_status_t result = _cmd; \ + if (result != UCC_OK) { \ + std::string err; \ + TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \ + if (_request != nullptr) { \ + ucc_collective_finalize(_request); \ + } \ + TORCH_CHECK(false, err); \ + } \ + } while (0) + +// Macros to print logs with unified format +#define TORCH_UCC_LOG_ERROR(_phase, _msg) \ + LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg; +#define TORCH_UCC_LOG_INFO(_phase, _msg) \ + LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg; +#define TORCH_UCC_LOG_DEBUG(_phase, _msg) \ + VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg; + +enum torch_ucc_phase_t { + TORCH_UCC_UNKNOWN = -1, + TORCH_UCC_INIT, + TORCH_UCC_HEALTH_CHECK, + TORCH_UCC_READY, + TORCH_UCC_COLL_POST, + TORCH_UCC_COLL_PROGRESS, + TORCH_UCC_FINALIZE, +}; + +const std::map ucc_phase_map = { + {TORCH_UCC_UNKNOWN, "UNKNOWN"}, + {TORCH_UCC_INIT, "INIT"}, + {TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"}, + {TORCH_UCC_READY, "READY"}, + {TORCH_UCC_COLL_POST, "COLL_POST"}, + {TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"}, + {TORCH_UCC_FINALIZE, "FINALIZE"}, +}; + +class CommTraceLogger; + +class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder { + public: + ProcessGroupUCCLogger(); + ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase); + + std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN); + void setLogPrefix(std::string log_prefix); + inline void setPhase(torch_ucc_phase_t phase) { + local_phase = phase; + } + + void initCommsTracer(); + void flushComms(int rank, int world_size); + std::shared_ptr trace_generator = nullptr; + + protected: + std::string log_prefix; + torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN; + bool initialized_CommTraceLogger = false; +}; + +struct torch_ucc_oob_coll_info_t { + c10::intrusive_ptr store; + uint32_t comm_id; + int rank; + int size; + void* rbuf; + size_t msglen; + std::string getKey(std::string key) { + return std::to_string(comm_id) + key; + } +}; + +class CommBase { + public: + CommBase(const c10::intrusive_ptr& logger_) + : logger(logger_) {} + virtual void progress() = 0; + virtual void free_request(ucc_coll_req_h request) = 0; + virtual ~CommBase() {} + c10::intrusive_ptr logger; +}; +class CommUCC : public CommBase { + public: + ucc_lib_h lib{nullptr}; + ucc_context_h context{nullptr}; + + public: + void progress() override; + CommUCC( + std::shared_ptr oob, + const c10::intrusive_ptr& logger); + void free_request(ucc_coll_req_h request) override; + ~CommUCC(); +}; + +ucc_status_t oob_allgather( + void* sbuf, + void* rbuf, + size_t msglen, + void* coll_info, + void** req); + +ucc_status_t oob_allgather_test(void* req); + +ucc_status_t oob_allgather_free(void* req); + +// trim: remove spaces before and after the string view +// implementation borrowed from https://stackoverflow.com/a/17976541 +inline c10::string_view trim(c10::string_view s) { + auto wsfront = std::find_if_not( + s.begin(), s.end(), [](int c) { return std::isspace(c); }); + auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) { + return std::isspace(c); + }).base(); + return ( + wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront)); +} + +inline std::string tolower(c10::string_view s) { + std::string result; + result.reserve(s.size()); + for (auto c : s) { + result.push_back(std::tolower(c)); + } + return result; +} + +inline std::vector parse_list(std::string list) { + std::vector result; + list = tolower(trim(list)); + while (!list.empty()) { + const auto end_pos = list.find_first_of(','); + const auto token = trim(list.substr(0, end_pos)); + result.push_back(std::string(token)); + list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : ""; + } + return result; +} + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ffce091b6c5f0841de4ca514911bc64ed3ce30d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace c10d { +namespace tcputil { + +#define CONNECT_SOCKET_OFFSET 2 + +inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) { + return ::poll(fds, nfds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({.fd = socket, .events = events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {.fd = socket, .events = events}; + return res; +} + +} // namespace tcputil +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a4a65a263d22297f04d51a4747030187bb24ed04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp @@ -0,0 +1,731 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +typedef SSIZE_T ssize_t; +#pragma comment(lib, "Ws2_32.lib") +#else +#include +#include +#include +#include +#include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10d { + +TORCH_API size_t getTensorsNumel(const std::vector& tensors); + +// Retrieve tensor shapes from a given tensor. +TORCH_API std::vector getTensorShapes( + const std::vector& tensors); + +// Use -2 to represent unset state of env vars +#define C10D_ENV_NOT_SET -2 + +// Turns at::IntArrayRef into "(1, 2, 3, 4)". +inline std::string toString(at::IntArrayRef l) { + std::stringstream ss; + ss << "("; + for (const auto i : c10::irange(l.size())) { + if (i > 0) { + ss << ", "; + } + ss << l[i]; + } + ss << ")"; + return ss.str(); +} + +inline std::string toString(const c10::Layout& layout) { + std::stringstream ss; + ss << layout; + return ss.str(); +} + +inline void assertSameType( + const at::DeprecatedTypeProperties& type, + const std::vector& tensors) { + for (const auto i : c10::irange(tensors.size())) { + if (!tensors[i].options().type_equal(type.options())) { + const std::string expected = type.toString(); + const std::string actual = tensors[i].toString(); + throw std::invalid_argument( + "mixed types (" + expected + " and " + actual + ")"); + } + } +} + +inline std::vector split( + char separator, + const std::string& string) { + std::vector pieces; + std::stringstream ss(string); + std::string item; + while (std::getline(ss, item, separator)) { + pieces.push_back(std::move(item)); + } + return pieces; +} + +inline std::string getCvarString( + const std::vector& env, + const char* def) { + const char* ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (int i = env.size() - 1; i >= 0; i--) { + const char* val = std::getenv(env[i].c_str()); + if (val == nullptr) { + continue; + } else if (i) { + TORCH_WARN( + "Environment variable " + env[i] + " is deprecated; use " + env[0] + + " instead"); + } + + ret = val; + } + + return ret; +} + +inline int getCvarInt(const std::vector& env, int def) { + int ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (int i = env.size() - 1; i >= 0; i--) { + char* val = std::getenv(env[i].c_str()); + if (val == nullptr) { + continue; + } else if (i) { + TORCH_WARN( + "Environment variable " + env[i] + " is deprecated; use " + env[0] + + " instead"); + } + + try { + ret = std::stoi(val); + } catch (std::exception& e) { + TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]); + } + } + + return ret; +} + +inline bool getCvarBool(const std::vector& env, bool def) { + bool ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (int i = env.size() - 1; i >= 0; i--) { + char* val_ = std::getenv(env[i].c_str()); + if (val_ == nullptr) { + continue; + } else if (i) { + TORCH_WARN( + "Environment variable " + env[i] + " is deprecated; use " + env[0] + + " instead"); + } + + std::string val = std::string(val_); + for (auto& x : val) { + x = std::tolower(x); + } + + if (val == "y" || val == "yes" || val == "1" || val == "t" || + val == "true") { + ret = true; + } else if ( + val == "n" || val == "no" || val == "0" || val == "f" || + val == "false") { + ret = false; + } else { + TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]); + return ret; + } + } + + return ret; +} + +inline void assertSameSizes( + const at::IntArrayRef& sizes, + const std::vector& tensors) { + for (const auto i : c10::irange(tensors.size())) { + if (!tensors[i].sizes().equals(sizes)) { + const auto expected = toString(sizes); + const auto actual = toString(tensors[i].sizes()); + throw std::invalid_argument( + "mixed sizes (" + expected + " and " + actual + ")"); + } + } +} + +inline void assertSameSizeAndType(const std::vector& tensors) { + // Ensure we have at least one tensor + if (tensors.empty()) { + throw std::invalid_argument("argument is empty"); + } + + // Ensure all tensors have identical type and shape + auto options = tensors[0].options(); + auto sizes = tensors[0].sizes(); + for (const auto i : c10::irange(1, tensors.size())) { + if (!tensors[i].options().type_equal(options)) { + const auto expected = toString(options); + const auto actual = toString(tensors[i].options()); + throw std::invalid_argument( + "argument contains mixed types (" + expected + " and " + actual + + ")"); + } + if (!tensors[i].sizes().equals(sizes)) { + const auto expected = toString(sizes); + const auto actual = toString(tensors[i].sizes()); + throw std::invalid_argument( + "argument contains mixed sizes (" + expected + " and " + actual + + ")"); + } + } +} + +inline void assertTypeMatch( + std::function fn, + const at::DeprecatedTypeProperties& type, + const at::ArrayRef tensors, + size_t index) { + if (!tensors[index].options().type_equal(type.options())) { + fn("invalid tensor type at index " + std::to_string(index) + " (expected " + + type.toString() + ", got " + tensors[index].toString() + ")"); + } +} + +inline void assertTypeMatch( + std::function fn, + const at::TensorOptions& options, + const at::ArrayRef tensors, + size_t index) { + if (!tensors[index].options().type_equal(options)) { + fn("invalid tensor type at index " + std::to_string(index) + " (expected " + + toString(options) + ", got " + toString(tensors[index].options()) + ")"); + } +} + +inline void assertSizesMatch( + std::function fn, + const at::IntArrayRef& sizes, + const at::ArrayRef tensors, + size_t index) { + if (tensors[index].sizes() != sizes) { + fn("invalid tensor size at index " + std::to_string(index) + " (expected " + + toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")"); + } +} + +inline void assertLayoutMatch( + std::function fn, + const c10::Layout& expected, + const at::ArrayRef tensors, + size_t index) { + const auto& actual = tensors[index].layout(); + if (actual != expected) { + fn("invalid tensor layout at index " + std::to_string(index) + + " (expected " + toString(expected) + ", got " + toString(actual) + ")"); + } +} + +inline void assertLayoutMatch( + std::function fn, + const at::ArrayRef tensors) { + const auto& layout = tensors[0].layout(); + for (const auto i : c10::irange(1, tensors.size())) { + assertLayoutMatch(fn, layout, tensors, i); + } +} + +inline void assertNonEmpty( + std::function fn, + const at::ArrayRef tensors) { + if (tensors.empty()) { + fn("requires non-empty tensor list"); + } +} + +inline void assertSingleElement( + std::function fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element tensor list"); + } +} + +inline void assertSingleElementInput( + std::function fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element input tensor list"); + } +} + +inline void assertSingleElementOutput( + std::function fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element output tensor list"); + } +} + +inline void assertRootRank( + std::function fn, + int rank, + int size) { + if (rank < 0 || rank >= size) { + fn("invalid root rank: " + std::to_string(rank)); + } +} + +inline void assertRootTensor( + std::function fn, + int rank, + int size) { + if (rank < 0 || rank >= size) { + fn("invalid root tensor: " + std::to_string(rank)); + } +} + +inline void assertDense( + std::function fn, + const at::ArrayRef tensors) { + const auto& layout = tensors[0].layout(); + if (layout != at::kStrided) { + fn("only supports dense tensors"); + } +} + +inline void assertCPU( + std::function fn, + const at::ArrayRef tensors) { + const auto& device = tensors[0].device(); + if (device.type() != at::kCPU) { + fn("only supports CPU tensors"); + } +} + +inline void assertSameDevice( + std::function fn, + const at::ArrayRef tensors) { + if (tensors.size() < 2) { + return; + } + const auto& device = tensors[0].device(); + for (const auto i : c10::irange(1, tensors.size())) { + if (tensors[i].device() != device) { + fn("tensors should be on the same device"); + } + } +} + +inline void assertTypeAndSizesMatch( + std::function fn, + const at::ArrayRef tensors, + const at::DeprecatedTypeProperties& type, + const at::IntArrayRef& sizes) { + for (const auto i : c10::irange(tensors.size())) { + assertTypeMatch(fn, type, tensors, i); + assertSizesMatch(fn, sizes, tensors, i); + } +} + +inline void assertTypeAndSizesMatch( + std::function fn, + const at::ArrayRef tensors, + const at::TensorOptions& options, + const at::IntArrayRef& sizes) { + for (const auto i : c10::irange(tensors.size())) { + assertTypeMatch(fn, options, tensors, i); + assertSizesMatch(fn, sizes, tensors, i); + } +} + +inline void assertTypeAndSizesMatch( + std::function fn, + const at::ArrayRef tensors) { + const auto& options = tensors[0].options(); + const auto sizes = tensors[0].sizes(); + assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes); +} + +// Copied from ATen/core/functional.h. +template +inline auto fmap(T& inputs, const F& fn) + -> std::vector { + std::vector r; + r.reserve(inputs.size()); + for (auto& input : inputs) { + r.push_back(fn(input)); + } + return r; +} + +// Copied from torch/csrc/utils/tensor_flatten.h. +inline at::Tensor flattenDenseTensors(at::TensorList tensors) { + static const auto flatten = [](const at::Tensor& t) { + return t.contiguous().view({-1}); + }; + if (tensors.size() == 1) { + return flatten(tensors[0]); + } + return at::cat(::c10d::fmap(tensors, flatten)); +} + +inline at::Tensor newLikeFlat( + std::vector>& tensors, + size_t deviceIdx) { + if (tensors.empty() || tensors[0].empty()) { + TORCH_CHECK(false, "Received an empty list"); + } + if (deviceIdx >= tensors.size()) { + TORCH_CHECK(false, "Invalid device index"); + } + auto& t = tensors[deviceIdx][0]; + auto device = t.device(); + for (const auto i : c10::irange(1, tensors[deviceIdx].size())) { + if (tensors[deviceIdx][i].device() != device) { + TORCH_CHECK(false, "Expecting all tensors on the same device"); + } + } + at::DeviceGuard gpuGuard(device); + std::vector sizes{static_cast(tensors[deviceIdx].size())}; + std::vector strides{static_cast(t.numel())}; + sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end()); + strides.insert(strides.end(), t.strides().begin(), t.strides().end()); + return at::empty_strided( + sizes, strides, t.options().memory_format(c10::nullopt)); +} + +inline at::Tensor newLikeFlat(std::vector& tensors) { + if (tensors.empty()) { + TORCH_CHECK(false, "Received an empty list"); + } + auto& t = tensors[0]; + at::DeviceGuard gpuGuard(t.device()); + std::vector sizes{static_cast(tensors.size())}; + sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end()); + return at::empty(sizes, t.options()); +} + +inline std::vector> getSizes( + const std::vector& tensors) { + std::vector> sizes(tensors.size()); + for (const auto i : c10::irange(tensors.size())) { + sizes[i] = tensors[i].sizes().vec(); + } + return sizes; +} + +inline std::vector getDevices(const std::vector& tensors) { + std::vector devices(tensors.size(), -1); + if (tensors[0].device().is_cuda()) { + for (const auto i : c10::irange(tensors.size())) { + devices[i] = tensors[i].storage().device().index(); + } + } + return devices; +} + +template +inline T* getDataPointer(const at::Tensor& tensor) { + // This method is only used in ProcessGroupGloo for now. Call sites must make + // sure that the input tensor is contiguous. It is OK if the tensor does not + // start from the beginning of the storage. For example, it could come from + // chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of + // tensor.storage().data() + // NB: not using tensor.data() because tensor is not aware of gloo::TYPE + return static_cast(tensor.data_ptr()); +} + +template +std::vector getDataPointers(const std::vector& tensors) { + std::vector ptrs(tensors.size()); + for (const auto i : c10::irange(tensors.size())) { + ptrs[i] = getDataPointer(tensors[i]); + } + return ptrs; +} + +// For alltoall split size sanity check +inline void checkSplitSizes( + const std::vector& split_sizes, + const at::Tensor& tensor, + int group_size) { + if (split_sizes.empty()) { + TORCH_CHECK( + tensor.size(0) % group_size == 0, + "Tensor's dim 0 does not divide equally across group size"); + } else { + TORCH_CHECK( + split_sizes.size() == static_cast(group_size), + "Number of tensor splits not equal to group size"); + const auto sum = c10::sum_integers(split_sizes); + TORCH_CHECK( + sum == tensor.size(0), "Split sizes doesn't match total dim 0 size"); + } +} + +// Compute alltoall lengths and offsets, handling multi-dimension tensors +template +size_t computeLengthsAndOffsets( + const std::vector& split_sizes, + const at::Tensor& tensor, + std::vector* lengths, + std::vector* offsets) { + size_t group_size = lengths->size(); + bool equal_splits = false; + size_t dim0_size = tensor.size(0); + size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1); + size_t split_size = 0; + size_t offset = 0; + + if (split_sizes.empty()) { + equal_splits = true; + split_size = tensor.size(0) / group_size; + } + for (const auto i : c10::irange(group_size)) { + size_t length = row_size * (equal_splits ? split_size : split_sizes[i]); + (*lengths)[i] = length; + (*offsets)[i] = offset; + // TODO: see if we should add overflow protection for offset + offset += length; + } + return offset; +} + +template +size_t computeLengthsAndOffsets( + const std::vector& tensors, + std::vector* lengths, + std::vector* offsets) { + size_t group_size = lengths->size(); + size_t offset = 0; + for (const auto i : c10::irange(group_size)) { + size_t length = tensors[i].numel(); + (*lengths)[i] = length; + (*offsets)[i] = offset; + offset += length; + } + return offset; +} + +using RankType = uint32_t; +using SizeType = uint64_t; + +// `errno` is only meaningful when it fails. E.g., a successful `fork()` sets +// `errno` to `EINVAL` in child process on some macos +// (https://stackoverflow.com/a/20295079), and thus `errno` should really only +// be inspected if an error occurred. +// +// `success_cond` is an expression used to check if an error has happend. So for +// `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output +// is stored in variable `__output` and may be used in `success_cond`. +#ifdef _WIN32 +#define SYSCHECK(expr, success_cond) \ + while (true) { \ + auto __output = (expr); \ + auto errno_local = WSAGetLastError(); \ + (void)__output; \ + if (!(success_cond)) { \ + if (errno == EINTR) { \ + continue; \ + } else if ( \ + errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \ + C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \ + } else { \ + C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \ + } \ + } else { \ + break; \ + } \ + } +#else +#define SYSCHECK(expr, success_cond) \ + while (true) { \ + auto __output = (expr); \ + (void)__output; \ + if (!(success_cond)) { \ + if (errno == EINTR) { \ + continue; \ + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { \ + C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \ + } else { \ + C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \ + } \ + } else { \ + break; \ + } \ + } +#endif + +// Most functions indicate error by returning `-1`. This is a helper macro for +// this common case with `SYSCHECK`. +// Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1 +#define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1) + +namespace tcputil { + +// Send and receive +template +void sendBytes( + int socket, + const T* buffer, + size_t length, + bool moreData = false) { + size_t bytesToSend = sizeof(T) * length; + if (bytesToSend == 0) { + return; + } + + auto bytes = reinterpret_cast(buffer); + uint8_t* currentBytes = const_cast(bytes); + + int flags = 0; + +#ifdef MSG_MORE + if (moreData) { // there is more data to send + flags |= MSG_MORE; + } +#endif + +// Ignore SIGPIPE as the send() return value is always checked for error +#ifdef MSG_NOSIGNAL + flags |= MSG_NOSIGNAL; +#endif + + while (bytesToSend > 0) { + ssize_t bytesSent; + SYSCHECK_ERR_RETURN_NEG1( + bytesSent = + ::send(socket, (const char*)currentBytes, bytesToSend, flags)) + if (bytesSent == 0) { + C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET)); + } + + bytesToSend -= bytesSent; + currentBytes += bytesSent; + } +} + +template +void recvBytes(int socket, T* buffer, size_t length) { + size_t bytesToReceive = sizeof(T) * length; + if (bytesToReceive == 0) { + return; + } + + auto bytes = reinterpret_cast(buffer); + uint8_t* currentBytes = bytes; + + while (bytesToReceive > 0) { + ssize_t bytesReceived; + SYSCHECK_ERR_RETURN_NEG1( + bytesReceived = recv(socket, (char*)currentBytes, bytesToReceive, 0)) + if (bytesReceived == 0) { + C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET)); + } + + bytesToReceive -= bytesReceived; + currentBytes += bytesReceived; + } +} + +// send a vector's length and data +template +void sendVector(int socket, const std::vector& vec, bool moreData = false) { + SizeType size = vec.size(); + sendBytes(socket, &size, 1, true); + sendBytes(socket, vec.data(), size, moreData); +} + +// receive a vector as sent in sendVector +template +std::vector recvVector(int socket) { + SizeType valueSize; + recvBytes(socket, &valueSize, 1); + std::vector value(valueSize); + recvBytes(socket, value.data(), value.size()); + return value; +} + +// this is only for convenience when sending rvalues +template +void sendValue(int socket, const T& value, bool moreData = false) { + sendBytes(socket, &value, 1, moreData); +} + +template +T recvValue(int socket) { + T value; + recvBytes(socket, &value, 1); + return value; +} + +// send a string's length and data +inline void sendString( + int socket, + const std::string& str, + bool moreData = false) { + SizeType size = str.size(); + sendBytes(socket, &size, 1, true); + sendBytes(socket, str.data(), size, moreData); +} + +// receive a string as sent in sendString +inline std::string recvString(int socket) { + SizeType valueSize; + recvBytes(socket, &valueSize, 1); + std::vector value(valueSize); + recvBytes(socket, value.data(), value.size()); + return std::string(value.data(), value.size()); +} + +} // namespace tcputil +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9b2b1aa245f841eac7d61f2238bf7a8385846612 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include + +namespace c10d { +namespace tcputil { + +#define CONNECT_SOCKET_OFFSET 1 + +inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) { + return WSAPoll(fdArray, fds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({(SOCKET)socket, events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {(SOCKET)socket, events}; + return res; +} + +} // namespace tcputil +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f6c9c9be5ea9b93843898bd26c004148a19df6de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp @@ -0,0 +1,164 @@ +#pragma once + +#include +#include +#include + +constexpr auto kNoTimeout = std::chrono::milliseconds(0); + +namespace c10d { + +constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY"; + +enum class OpType : std::uint8_t { + BROADCAST = 0, + ALLREDUCE = 1, + ALLREDUCE_COALESCED = 2, + REDUCE = 3, + ALLGATHER = 4, + _ALLGATHER_BASE = 5, + ALLGATHER_COALESCED = 6, + GATHER = 7, + SCATTER = 8, + REDUCE_SCATTER = 9, + ALLTOALL_BASE = 10, + ALLTOALL = 11, + SEND = 12, + RECV = 13, + RECVANYSOURCE = 14, + BARRIER = 15, + _REDUCE_SCATTER_BASE = 16, + COALESCED = 17, + _ALLREDUCE_SPARSE = 18, + UNKNOWN = 100, +}; + +// Converts OpType to human readable string. +TORCH_API std::string opTypeToString(OpType opType); + +// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE) +TORCH_API bool isP2POp(OpType opType, bool batchP2P = false); + +// Please do not use Work API, it is going away, to be +// replaced by ivalue::Future. +// Python binding for this class might change, please do not assume +// this will be bound using pybind. +class TORCH_API Work : public torch::CustomClassHolder { + public: + Work( + int rank = -1, + OpType opType = OpType::UNKNOWN, + const char* profilingTitle = nullptr, + const c10::optional>& inputTensors = + c10::nullopt); + + ~Work() override; + + // Checks if request has completed. Non-blocking operation. + virtual bool isCompleted(); + + // Returns if the work completed successfully. + // If false, the exception function can be called to get details. + virtual bool isSuccess() const; + + // Returns exception if isSuccess() returned false. + virtual std::exception_ptr exception() const; + + // Returns source rank if this objects represents a recv-from-any. + virtual int sourceRank() const; + + // Returns result tensors, if applicable. + // If work is not supposed to have result, we return empty list. + virtual std::vector result(); + + // Ensures that operations on the output tensors that are invoked + // after this function returns are correctly sequenced after the + // asynchronous completion of this work. + // + // For CUDA tensors, it inserts stream synchronization such that + // the streams of the caller wait for completion of the + // asynchronous operations on the destination tensors. + // + // For CPU tensors, it is currently a nop. + // + // This function should only be used if the caller polls for + // completion through the `isCompleted` function, it has returned + // true, and the `isSuccess` function also has returned true. + // + virtual void synchronize(); + + // Waits until request completes. Blocking operation. + // Throws if the work completed with an exception. + // Returns false if the work is aborted. + // Otherwise, it always returns true, indicating the work is completed. + // + // Functionally equivalent to: + // + // while (!isCompleted()) { /* nop */ } + // auto success = isSuccess(); + // if (!success) { std::rethrow_exception(exception()); } + // return success; + // + virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout); + + virtual void abort(); + + // Returns a Future object that will be associated with the completion of + // work. Only NCCL backend is currently supported. + virtual c10::intrusive_ptr getFuture(); + + virtual float getDuration() const; + + virtual uint64_t getSequencenumber() const; + + OpType retrieveOpType() const; + + static c10::intrusive_ptr create_from_future( + const c10::intrusive_ptr&); + + protected: + // Completes the work object and optionally sets the exception in a + // thread-safe manner. Notifies all waiting condition variables as well. + void finish(std::exception_ptr exception = nullptr); + + // Similar to finish, but throws an exception if one is already set or + // provided by the user. + void finishAndThrow(std::exception_ptr exception); + + mutable std::mutex mutex_; + std::condition_variable cv_; + bool completed_ = false; + std::exception_ptr exception_; + + // Current rank of the node. + const int rank_; + + // Operation type that this work object refers to. + OpType opType_; + + // When profiling, the callback to record end of operation event. This + // callback needs to be called when collective operation is complete. + std::function recordFunctionEndCallback_; +}; + +struct TORCH_API WorkInfo { + WorkInfo( + const OpType& opType, + const uint64_t seq, + const std::chrono::time_point& timeStarted, + const std::chrono::time_point& timeFinished, + const std::chrono::duration& activeDuration) + : opType(opType), + seq(seq), + timeStarted(timeStarted), + timeFinished(timeFinished), + activeDuration(activeDuration) {} + + OpType opType; + uint64_t seq; + std::chrono::time_point timeStarted; + std::chrono::time_point timeFinished; + std::chrono::duration activeDuration; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h new file mode 100644 index 0000000000000000000000000000000000000000..5151a33f7ee351184e53daa68155dcc6c7390358 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace c10d { + +PyMethodDef* python_functions(); + +} // namespace c10d +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ee8db21c172a4d0dd3febc39bb9bb5021caee5f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp @@ -0,0 +1,140 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10d { + +// Broadcast many tensors to all processes in the process group. +TORCH_API void broadcast_coalesced( + const c10::intrusive_ptr& process_group, + at::TensorList tensors, + size_t buffer_size, + int rank = 0); + +// This class passes bucket contents tensor to DDP communication hook. +class TORCH_API GradBucket { + public: + explicit GradBucket( + size_t index, + size_t bucket_count, + at::Tensor tensor, + std::vector offsets, + std::vector lengths, + std::vector sizes_vec, + std::vector parameters, + c10::optional sparse_grad_indices) + : index_(index), + bucket_count_(bucket_count), + buffer_(std::move(tensor)), + offsets_(std::move(offsets)), + lengths_(std::move(lengths)), + sizes_vec_(std::move(sizes_vec)), + parameters_(std::move(parameters)), + sparse_grad_indices_(std::move(sparse_grad_indices)) {} + + // Returns the index of the bucket, which is unique across all the buckets. + size_t getIndex() const { + return index_; + } + + const at::Tensor& getBuffer() const { + return buffer_; + } + + // Returns a mutable buffer compared with the above method. + at::Tensor& getBufferRef() { + return buffer_; + } + + // Overwrites the buffer at a specific index. + void setBuffer(at::Tensor& buffer) { + buffer_ = buffer; + } + + // Each tensor in the list that getGradients corresponds to a + // parameter. + std::vector getGradients() const; + + // Returns model parameters belonging to this bucket. They are returned in the + // same order as gradient tensors via getGradients(). For example, + // getParameters[i] will have its gradient stored in + // getGradients[i] + const std::vector getParameters() const { + return parameters_; + } + + // Returns whther this bucket is the last bucket to allreduce in an iteration. + bool isLast() const { + return index_ == bucket_count_ - 1; + } + + c10::optional& getSparseGradIndices() { + return sparse_grad_indices_; + } + + private: + size_t index_; + size_t bucket_count_; + at::Tensor buffer_; + + // Per-variable info in buffer_. + std::vector offsets_; + std::vector lengths_; + std::vector sizes_vec_; + + // Model parameters for this bucket. + const std::vector parameters_; + + // Predefined sparse indices for this bucket (only used for sparse tensors). + // The gradients will be updated to have indices with these tensor values + c10::optional sparse_grad_indices_; +}; + +// Base class of both `PythonCommHook` and `CppCommHook`. +// Requires implementing 1) `runHook` method that communicates gradients +// asynchronously, and 2) `parseHookResult` method that converts the hook +// result into a tensor. +class TORCH_API CommHookInterface { + public: + virtual ~CommHookInterface() = default; + + // Passes the input grad bucket to the registered communication hook. + // Once the tensor in the bucket are ready, kicks off the hook asynchronously + // and returns a future that holds the communication results. + virtual c10::intrusive_ptr runHook( + GradBucket& bucket) = 0; + + // Returns the resulting tensor once the communication hook result is + // ready. The resulting tensor will then be copied to the grads of + // individual parameters. + virtual at::Tensor parseHookResult(const c10::IValue& result) = 0; +}; + +namespace detail { +// This helper function is called both by CppCommHookInterface below and inside +// reducer. +TORCH_API at::Tensor parseCppCommHookResult(const c10::IValue& result); +} // namespace detail + +// This CppCommHook interface only requires implementing runHook method that +// potentially uses a state. +template +class CppCommHookInterface : public CommHookInterface { + public: + explicit CppCommHookInterface(T state) : state_(std::move(state)) {} + + ~CppCommHookInterface() override = default; + + at::Tensor parseHookResult(const c10::IValue& result) override { + return detail::parseCppCommHookResult(result); + } + + protected: + T state_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..8524191515190083dc1f3063405533bd2782e315 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h @@ -0,0 +1,23 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +namespace c10d { + +enum class DebugLevel { Off = 0, Info = 1, Detail = 2 }; + +TORCH_API void setDebugLevel(DebugLevel level); + +// Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG` +// environment variable. +TORCH_API void setDebugLevelFromEnvironment(); + +TORCH_API DebugLevel debug_level() noexcept; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp new file mode 100644 index 0000000000000000000000000000000000000000..683841f3ba885f96c94d688190fc530a88e01003 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp @@ -0,0 +1,52 @@ +#pragma once + +#include +#include + +namespace c10d { + +enum class BuiltinCommHookType { + ALLREDUCE = 1, + FP16_COMPRESS = 2, +}; + +class AllReduceCommHook + : public CppCommHookInterface> { + public: + explicit AllReduceCommHook(const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~AllReduceCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +class FP16CompressCommHook + : public CppCommHookInterface> { + public: + explicit FP16CompressCommHook(const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~FP16CompressCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +// Almost same as AllReduceCommHook, but without division inside the hook. +// This enables the optimization of fusing copy and division and saves one scan +// over all the input parameters, when no communication hook is provided by the +// user. Only used internally and not released as a public built-in +// communication hook. +class _AllReduceBySumCommHook + : public CppCommHookInterface> { + public: + explicit _AllReduceBySumCommHook( + const c10::intrusive_ptr& state) + : CppCommHookInterface>(state) {} + + ~_AllReduceBySumCommHook() override = default; + + c10::intrusive_ptr runHook(GradBucket& bucket) override; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h new file mode 100644 index 0000000000000000000000000000000000000000..fff2b45c4c952b99b3ba2f27696cb6d2b9c29326 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h @@ -0,0 +1,56 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include + +#include + +namespace fmt { + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_category& cat, FormatContext& ctx) + const { + if (std::strcmp(cat.name(), "generic") == 0) { + return fmt::format_to(ctx.out(), "errno"); + } else { + return fmt::format_to(ctx.out(), "{} error", cat.name()); + } + } +}; + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_code& err, FormatContext& ctx) const { + return fmt::format_to( + ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message()); + } +}; + +} // namespace fmt + +namespace c10d { +namespace detail { + +inline std::error_code lastError() noexcept { + return std::error_code{errno, std::generic_category()}; +} + +} // namespace detail +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h new file mode 100644 index 0000000000000000000000000000000000000000..a00b6f70653aaa8d4456033800c5dc69942e3b03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h @@ -0,0 +1,33 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include + +// Utility macro similar to C10_THROW_ERROR, the major difference is that this +// macro handles exception types defined in the c10d namespace, whereas +// C10_THROW_ERROR requires an exception to be defined in the c10 namespace. +#define C10D_THROW_ERROR(err_type, msg) \ + throw ::c10d::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +namespace c10d { + +using c10::DistNetworkError; + +class TORCH_API SocketError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +class TORCH_API TimeoutError : public DistNetworkError { + using DistNetworkError::DistNetworkError; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5ae305ce4eb9f1347ef710ddd7c79f2c55865566 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp @@ -0,0 +1,104 @@ +#include +#include + +#include + +namespace c10d { + +class TORCH_API Logger { + public: + explicit Logger(std::shared_ptr reducer); + // Set logging data that can be got during DistributedDataParallel + // construction time. + void set_construction_data_and_log( + const std::string& module_name, + const std::vector& device_ids, + int output_device, + bool broadcast_buffers, + bool has_sync_bn, + bool static_graph); + + void set_static_graph(); + + // An interface for users to get DDPLoggingData and log them + // in the applications. Explanation of logging fields are in + // "struct DDPLoggingData" of "torch/c10/util/Logging.h". + at::DDPLoggingData get_ddp_logging_data(); + + // Stream insertion operator for logging data to stream under + // TORCH_DISTRIBUTED_DEBUG. + friend std::ostream& operator<<(std::ostream& output, const Logger& logger); + + ~Logger() noexcept(false) { + // Log if DDP graph is static in Logger dtor instead of Reducer dtor since + // Logger is deleted before Reducer. + log_if_graph_static(reducer_->ddp_graph_static()); + } + + // Set environment variables. + void set_env_variables(); + // Set parameters stats. + void set_parameter_stats(); + // Get size of each bucket (Bytes). + std::vector get_bucket_sizes(); + // Get variable indices for each bucket. + std::vector> get_per_bucket_variable_indices(); + // Set comm. hook, if used + void set_comm_hook(const std::string& hook); + // Set running with uneven input detection (model.join() context manager) + void set_uneven_input_join(); + + // Reset performance stats at current iteration + void reset_performance_stats(); + + // Calculate avg stats using cpu timer and gpu timer + // that has been recorded in reducer. + void calculate_avg_time( + int64_t& avg_time, + int64_t& time_duration, + Timer& timer, + Timer::Event start_event, + Timer::Event end_event); + + // Set the absolute time of the event that has been recorded in reducer. + void set_event_time(int64_t& event_time, Timer& timer, Timer::Event event); + // Set stats that can be collected only during + // training loop. It is called at the beginning of forward call + // to record the run time stats of sampled iterations that previously ran. + // GPU performance stats are collected only for single process + // single device program and single device module right now. + // TODO to support single process multiple devices and multi device modules, + // events need to be created and recorded on multiple devices. + void set_runtime_stats_and_log(); + + // Called when DDP/reducer is failing with an error. The + // logging data structure will have two fields filled: "has_error" indicating + // that this iteration encountered an error and other fields are not valid, + // and "error", a string which contains the error message that DDP failed + // with. + template + void set_error_and_log(const std::string& ddp_error, const Args&... args) { + ddp_logging_data_->ints_map["has_error"] = 1; + auto err = c10::str(ddp_error, args...); + ddp_logging_data_->strs_map["error"] = err; + // Report the iteration we are erroring at so user knows how many examples + // successfully processed before this error was hit. + ddp_logging_data_->ints_map["iteration"] = reducer_->num_iterations_; + at::LogPyTorchDDPUsage(*ddp_logging_data_); + } + + // When running without static graph, called when reducer is destroyed to log + // if graph was actually static and is a candidate for static graph + // optimization. + void log_if_graph_static(bool is_static); + + private: + // ddp_logging_data_ is used to hold all the ddp related logging + // data fields. + std::unique_ptr ddp_logging_data_; + std::shared_ptr reducer_; + // track the number of iterations when runtime stats are collected so far. + long num_iterations_stats_recorded_ = 0; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b4df02e773807ee8786570cf0044f0d3615ff592 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h @@ -0,0 +1,51 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include +#include + +namespace c10d { +namespace detail { + +enum class LogLevel { Trace, Debug, Info, Warning, Error }; + +TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept; + +template +std::string formatLogMessage(fmt::string_view fmt, T&&... args) { + return fmt::vformat(fmt, fmt::make_format_args(args...)); +} + +} // namespace detail +} // namespace c10d + +#define C10D_ERROR(...) \ + LOG_IF( \ + ERROR, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_WARNING(...) \ + LOG_IF( \ + WARNING, \ + c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_INFO(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \ + << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_DEBUG(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \ + << "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__) + +#define C10D_TRACE(...) \ + LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \ + << "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__) diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..43782204be05496e52b2e7f0415847d17f12b1a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp @@ -0,0 +1,589 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif + +namespace c10d { + +constexpr int kDefaultFirstBucketBytes = int(1024 * 1024); +constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024); +// Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations. +constexpr int kDDPRuntimeLoggingSampleRate = 100; + +// Forward declaration +class Logger; + +// Local accumulator type for a single bucket. +struct BucketAccumulator { + std::vector indices; + size_t size = 0; + size_t size_limit = 0; +}; + +class TORCH_API Reducer { + public: + // The constructor takes a list of variables (i.e. parameters) for this + // process's single model replica (as DDP assumes single-process + // single-device). The bucket assignment for this reducer, `bucket_indices`, + // is specified as a list of buckets, each of which is specified as a list of + // indices into the bucket's `variables` list. + explicit Reducer( + std::vector params, + std::vector> bucket_indices, + std::vector per_bucket_size_limits, + c10::intrusive_ptr process_group, + std::vector expect_sparse_gradients, + int64_t bucket_bytes_cap, + bool find_unused_parameters, + bool gradient_as_bucket_view, + std::unordered_map param_names, + int64_t first_bucket_bytes_cap); + + ~Reducer() noexcept(false); + + // To (re-)initialize bucket assignment, pass a list of buckets, each of + // which is specified by a list of indices in the bucket's `variables` list. + // This function performs validation that the variables within a bucket + // all live on the same device and have the same dimensionality. + void initialize_buckets(std::vector> bucket_indices); + + void autograd_hook(size_t index); + + // This function is called when the forward function has produced an output, + // and the user wishes to reduce gradients in the backwards pass. + // If they don't, and wish to accumulate gradients before reducing them, + // a call to this function can simply be omitted. + void prepare_for_backward(const std::vector& outputs); + + // Called at the beginning of forward() inside DistributedDataParallel, + // right now it captures the starting time of forward in each iteration. + void prepare_for_forward(); + + // Returns the relative time in nanoseconds when gradients were ready, + // with respect to the time `prepare_for_backward` was called. The + // vector is for parameters for a single model replica. + std::vector get_backward_stats() const { + return backward_stats_; + } + + // Registers a hook to the reducer. The hook is `CommHookInterface` + // type to allow both Python and CPP hooks. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_builtin_comm_hook`. + void register_comm_hook(std::unique_ptr iface); + + // Registers a built-in C++ comm hook to the reducer. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_comm_hook`. + void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type); + + // Informs reducer that optimizer is running in backward, so gradients + // don't need to be copied from buckets as the optimizer would've already + // been applied. + void set_optimizer_in_backward() { + optim_in_backward_ = true; + }; + + // Runs allreduce or installed communication hook given GradBucket instance. + c10::intrusive_ptr run_comm_hook( + GradBucket& grad_bucket); + + // Runs default allreduce hook. + c10::intrusive_ptr run_allreduce_hook( + GradBucket& grad_bucket); + + // Returns gradient buckets in sequential order of buckets_. This is the order + // in which buckets are reduced across processes. If return_zero_tensors=true, + // will return zero tensors of the same shape instead of the true tensors. + std::vector get_grad_buckets( + bool return_zero_tensors = true) const; + + // Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_ + // according to when tensors received grads in the backward pass. + // TODO this function makes broadcast communication call and + // could be overlapped with next forward() call, thus + // it could be async. Will make it async when rebuilding buckets for + // find_unused_parameters = true case, as we could rebuild buckets more than + // once for find_unused_parameters = true case, where subgraphs are trained + // and parameter indices order may change more frequently. + // For find_unused_parameters = false case, buckets are only rebuilt once, + // the performance cost is negligible. Returns true if the buckets were + // rebuilt. + bool rebuild_buckets(); + + void setSparseMetadata(std::map& metadata); + + // Install futures that should be awaited at end of backwards. Currently these + // are only used by user-defined custom buffer reduction hooks, but can be + // generalized to any user-originating futures that need to be awaited. + void install_futures(c10::List> futs); + + // Returns true if we should rebuild buckets, else false. We only rebuild + // buckets once after the first iteration and never rebuild them if + // find_unused_parameters_. + inline bool should_rebuild_buckets() const { + return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_; + } + + // Pushes all parameters to be rebuilt. + void push_rebuilt_params_for_all_indices(); + + // Creates and sets ForwardPassWorkHandle given a Work and the + // corresponding tensor being reduced. + void set_forward_pass_work_handle( + c10::intrusive_ptr forwardPassWorkHandle, + bool useStaticWorldSize); + + // Retrieve on-device tensors used to track locally unused parameters. It is + // a tensor where index i = 1 if the Variable with that index has been used. + at::Tensor get_local_used_map_on_device() const; + + // An function for users to set sample_rate of collecting + // runtime stats. The time stats will be recorded for the + // first 10 iterations, after 10 iterations time stats will be + // recorded once every "sample_rate" training iterations. + void set_ddp_runtime_logging_sample_rate(int sample_rate); + + // Specify the training graph is static. + void set_static_graph(); + + // Delay all reduce to be after all gradients' calculation is complete. + void delay_all_reduce(); + + void set_mixed_precision_param_dtype(c10::ScalarType dtype); + + // Weak reference to associated DDP logger. The reference is weak to avoid + // refcycle between reducer and logger. + void set_logger(std::weak_ptr logger); + + // When graph is not explicitly set by user as static and has unused + // parameters, this will return whether the graph has been static until the + // current iteration, which means unused params set has not changed. + bool ddp_graph_static(); + + // Removes autograd hooks registered by the Reducer on the model parameters. + void remove_autograd_hooks(); + + // Checks whether or not the reducer has finalized the current backward + // iteration. + void check_finalized(); + + // Updates the underlying process group used by DDP with the new process + // group. + void update_process_group( + c10::intrusive_ptr new_process_group); + + // Resets reducer state. + void reset_state(); + + protected: + // Forward declaration. + struct Bucket; + + void push_rebuilt_params(const size_t& index); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + mutable std::mutex mutex_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const std::vector params_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::intrusive_ptr<::c10d::ProcessGroup> process_group_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector expect_sparse_gradients_; + + std::vector> + grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unordered_map gradAccToVariableMap_; + std::vector>> + hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool expect_autograd_hooks_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool require_finalize_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t next_bucket_; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool has_marked_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool find_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool gradient_as_bucket_view_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector unused_parameters_; + // Previous iteration's unused params, used for checking if unused parameters + // change between iterations. Only filled during the first backwards call. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector prev_iteration_unused_parameters_; + // Whether graph is static or not. When user does not explicitly set static + // graph, the only possible dynamism is set of unused parameters changing + // between iterations which is tracked by this flag. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool ddp_graph_static_{true}; + // Locally used parameter maps indicating if parameters are used locally + // during the current iteration or no_sync session if no_sync is on. + // Each map is a one-dim int32 tensor of number of parameters. These tensors + // are marked in autograd_hook to indicate the corresponding param has been + // used, and get allreduced in the end of backward step of current iteration + // or no_sync session for figuring out the globally unused parameters. + // + // local_used_map_: CPU tensor for bookkeeping locally used params + // local_used_map_dev_: dev tensor for reducing globally unused params + at::Tensor local_used_map_; + at::Tensor local_used_map_dev_; + // Indicate that reduction is done and D2H copy is done as well. + bool local_used_map_reduced_; + + // Weak pointer to associated DDP logger. + std::weak_ptr logger_; + // List of futures installed by Reducer::install_futures that should be + // awaited at the end of backwards pass. + c10::optional>> + installed_futures_{c10::nullopt}; + // Mixed precision parameter dtype for bucket type checking. + c10::optional mixed_precision_param_dtype_{c10::nullopt}; + + // Work handle for allreduce on local_used_map_ + c10::intrusive_ptr local_used_work_; + + void mark_variable_ready_dense(size_t variable_index); + + void mark_variable_ready_sparse(size_t variable_index); + + void mark_variable_ready(size_t variable_index); + + void mark_bucket_ready(size_t bucket_index); + + void finalize_bucket_dense(Bucket& bucket); + + void finalize_backward(); + + // Returns list of model parameters corresponding to the given bucket. + // bucket_index is a key to cache after buckets are rebuilt, after which this + // mapping never changes. + std::vector get_variables_for_bucket( + size_t bucket_index, + const Bucket& bucket) const; + + // Asserts that the reduction for the previous iteration has finished before + // rebuilding buckets or kicking off the next one. + void ensure_prior_reduction_finished(); + + // Broadcast rebuilt buckets from rank 0 to other ranks before initializing + // the buckets + void sync_bucket_indices(std::vector>& bucket_indices); + + // We'd like to use DistAutogradContext::GradCallback here but dist autograd + // doesn't exist under Windows. So we just directly use the concrete type but + // to preserve and enforce our original intent we do a static assert when dist + // autograd is available. + using GradCallback = std::function; +#ifndef _WIN32 + static_assert( + std::is_same< + GradCallback, + torch::distributed::autograd::DistAutogradContext::GradCallback>:: + value, + ""); +#endif + void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb); + + // This function is called inside `initialize_buckets()`. It initializes both + // `bucket_views_in` and `bucket_views_out` with views for each variable's + // gradient into the bucket's flattened `gradients` tensor. Views serve as + // entry points to `copy_()` each grad's data in/out of the flattened + // `gradients` tensor. + void initialize_bucket_views(Bucket& bucket); + + // This function is called inside `finalize_backward`, it happens only if + // DDP communication hook was registered to recreate just bucket_views_out + // with the result of `future_work`. + void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor); + + // If gradient_as_bucket_view_ is false, after allreduce buckets, + // copy bucket results back to grads. + void copy_bucket_to_grad( + at::Tensor& variable, + Reducer::Bucket& bucket, + size_t intra_bucket_index, + bool global_unused); + // Check layout of grad and bucket_view before copying the grad to bucket. + void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view); + + // A bucket contains [1..N] gradients to be reduced, where the gradients + // have the same dtype and device. + // Coalescing gradients together before reducing can result in lower overhead + // and/or faster time to completion. Coalescing requires the constituent + // gradients to have the same dtype and device, and the resulting flattened + // tensor uses that common dtype and device. The flattened tensor is filled + // as the corresponding gradients are computed (triggered by autograd hooks), + // and the buckets are reduced in a predetermined order consistent across + // processes. + struct Bucket { + // Gradients of the bucket flattened into a 1-dimensional tensor + at::Tensor gradients; + + // Views into the `gradients` tensor for each individual gradient + // Each view is created with layout (size and stride) matching the + // gradient's expected layout (see the "Gradient Layout Contract" in + // torch/csrc/autograd/functions/accumulate_grad.h). + // `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])` + // provide convenient ways to copy gradient data in/out of `gradients`, + // respectively. + // We keep both `bucket_views_in` and `bucket_views_out` because + // registering a DDP communication hook may re-initialize + // `bucket_views_out` with the value of the hook's `future_work` but we + // still need separate views into the bucket's original flattened gradient + // to copy in gradient data. + std::vector bucket_views_in; + std::vector bucket_views_out; + + // Variables whose gradients are held in this bucket + // We use refcounted tensors here so that we can easily unflatten the + // bucket's flattened `gradients` tensor into the participating variables + // after reduction has completed. + std::vector variables; + + // Per-variable offset/length into the flattened `gradients` tensor and + // the corresponding `GradBucket` instance for communication hooks + std::vector offsets; + std::vector lengths; + + // Per-variable sizes slicing into the bucket's `gradients` tensor + std::vector sizes_vec; + + // Number of gradients left to be computed before the bucket is ready to + // be reduced + size_t pending; + + // Global indices of participating variables in the bucket + std::vector variable_indices; + + // Future work handle for DDP communication hook + // If no hook is registered, a temporary vanilla allreduce hook is used. + c10::intrusive_ptr future_work; + + // If this bucket should expect a single sparse gradient + // If `true`, then this implies that `bucket.variables.size() == 1`. + bool expect_sparse_gradient = false; + + // Sparse indices tensor + c10::optional sparse_tensor_indices = c10::nullopt; + + // TODO(@pietern) + // Memory copies from gradient tensors into the bucket are potentially + // done on different CUDA streams. We record an event for every copy + // so that we can synchronize with them prior to kicking off the reduction. + // std::vector events; + }; + + std::vector buckets_; + + // A variable locator locates a particular variable in the reducer's buckets + struct VariableLocator { + // Index of the bucket containing the variable in the `buckets_` vector + size_t bucket_index; + // Index of the variable in the bucket, which may be used consistently + // across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`, + // `lengths`, `sizes_vec`, and `variable_indices` in `Bucket` + size_t intra_bucket_index; + + VariableLocator() = default; + + VariableLocator(size_t bucket_index_, size_t intra_bucket_index_) + : bucket_index(bucket_index_), + intra_bucket_index(intra_bucket_index_) {} + }; + + // Map the index of a variable to its location in the bucket structure. + std::vector variable_locators_; + + // track the number of iterations to synchronize grads in training so far. + long num_iterations_; + // track distinct iteration of backward call. This is distinct from + // num_iterations_, for example in the case of multiple forward before + // backward. + long num_bwd_calls_; + // whether the first autograd hook for a distinct backward pass has been + // called. + bool first_autograd_hook_called_; + // track the number of buckets that have been ready for + // communication calls like allReduce or communication hooks. + int num_buckets_ready_; + + // Timing information. + int64_t backward_compute_start_time_ = -1; + std::unique_ptr timer_; + + // We collect the relative timestamp of every gradient being ready + // when executing autograd. This can be used to derive a timeline of + // the point in time buckets were ready, or ideal bucket assignment/ordering. + std::vector backward_stats_; + + bool should_collect_runtime_stats(); + void record_forward_compute_start_time(); + void record_backward_compute_start_time(); + void record_backward_compute_end_time(); + void record_backward_comm_start_time(); + void record_backward_comm_end_time(); + + int get_ddp_runtime_logging_sample_rate(); + int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate; + + bool is_multi_device_module_ = false; + + // Following variables are to help build dynamic bucket order + bool has_rebuilt_bucket_; + std::vector rebuilt_params_; + std::vector rebuilt_param_indices_; + const int64_t bucket_bytes_cap_; + +#ifndef _WIN32 + struct RpcContext { + using ContextPtr = torch::distributed::autograd::ContextPtr; + // The shared_ptr is to hold the context instance. + ContextPtr context_ptr_holder; + std::atomic context_ptr{nullptr}; + + void set(ContextPtr&& new_context_ptr); + }; + RpcContext rpc_context_; +#endif + + // A struct containing work handle and tensor for allreduce scheduled in + // forward pass, if applicable. + struct ForwardPassAllreduceWork { + c10::intrusive_ptr workHandle; + at::Tensor resultTensor; + // whether we should divide by the initial world_size or the no. of + // remaining DDP ranks. + bool useStaticWorldSize; + }; + + // Handle for the currently scheduled allreduce in the forward pass, if + // applicable. + ForwardPassAllreduceWork forwardPassWorkHandle_; + + // Division factor for reduction of gradients. + // Equal to the process group size, with an exception of handling uneven + // input. + int div_factor_; + + bool static_graph_; + + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() should be triggered before marking this variable's grad as + // ready for communication. Map will not change after 1st iteration. + std::unordered_map numGradHooksTriggeredMap_; + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() are left to be triggered before marking this variable's + // grad as ready for communication. Map will change after 1st iteration to + // track a grad is ready for communication or not. + std::unordered_map numGradHooksTriggeredMapPerIteration_; + + private: + // reset counting for buckets before backward starts + void reset_bucket_counting(); + // search unused parameters beore backward starts + void search_unused_parameters( + const std::vector& outputs); + void set_divide_factor(); + // kick off all reduce for the ready bucket + void all_reduce_bucket(Bucket& bucket); + // kick off all reduce to local used map, it can help find global unused + // parameters + void all_reduce_local_used_map(); + // initialize locally used parameter maps + void initialize_local_used_map(); + // get current cuda stream + const c10::Stream get_current_stream(); + bool dynamic_graph_find_unused(); + bool static_graph_first_iteration(); + bool static_graph_after_first_iteration(); + + // comm_hook_ is used to access the DDP communication hook if registered. + std::unique_ptr comm_hook_; + + // Sparse metadata contains the indices that will be used + // when calling into sparse allreduce. + // This is only used in the sparse allreduce collective calls + std::unique_ptr> sparse_metadata_; + + // Debug level setting. It is parsed once when Reducer is constructed, and + // remains the same across a single invocation of DDP training. + DebugLevel ddp_debug_level_; + // Mapping of variable index to fully qualified name of model to notify users + // about errors when certain parameters do not get gradient. + std::unordered_map param_names_; + // Variable indices stored sequentially in order of when the gradient is ready + // for the current backwards pass. + std::vector grad_ready_order_indices_; + // Bytes capacity of first bucket, can be configured by user + int64_t first_bucket_bytes_cap_; + // Per iteration set of parameter indices that have been marked ready. + std::unordered_set perIterationReadyParams_; + // Retrieves parameter names that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamsForIteration(); + // Retrieves parameter indices that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamIndicesForIteration(); + // Raises appropriate error if mark_variable_ready is called on the same + // variable twice, which is unexpected. + void checkAndRaiseMarkedTwiceError(size_t curVariableIndex); + // Retrieves parameter corresponding to the given VariableIndex. + at::Tensor& get_param_from_index(size_t index); + + // Cached bucket index to model parameter mapping. Populated after buckets + // are rebuilt after which this mapping is static. + mutable std::unordered_map> + cached_variables_for_bucket_; + + bool optim_in_backward_{false}; + friend class Logger; +}; + +// This is equivalent to take_tensors but returns indices into the +// tensor list argument for bucket assignment. Also, it is aware +// of device placement and will not allow buckets to span devices. +// The index of tensors[i] assigned to bucket is tensor_indices[i], +// when tensor_indices is empty, the index of tensors[i] assigned to +// bucket is i. +TORCH_API std::tuple>, std::vector> +compute_bucket_assignment_by_size( + const std::vector& tensors, + const std::vector& bucket_size, + const std::vector& expect_sparse_gradient = {}, + const std::vector& tensor_indices = {}, + const c10::optional>& logger = {}); + +// Verify models across all processes are the same as model on rank 0 with +// respect to no. of params and matching dtype/size/layout. +TORCH_API void verify_params_across_processes( + const c10::intrusive_ptr& process_group, + const std::vector& params, + const c10::optional>& logger); +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..acd8975c4d2db13cac2e988238a0a8a2a191df68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp @@ -0,0 +1,81 @@ +#pragma once +#include +#include + +namespace c10d { +constexpr int kUnsetTime = -1; + +inline int64_t current_time_in_nanos() { + return c10::getTime(); +} + +class TORCH_API Timer { + private: + // The timestamp of forward call start time in each iteration. + int64_t forward_start_time = kUnsetTime; + // The timestamp of backward computation start and end time in each + // iteration. + int64_t backward_compute_start_time = kUnsetTime; + int64_t backward_compute_end_time = kUnsetTime; + // The timestamp of first communication call start time in each iteration. + int64_t backward_comm_start_time = kUnsetTime; + // The timestamp of last communication call end time in each iteration. + int64_t backward_comm_end_time = kUnsetTime; + + public: + enum class Event { + kForwardStart, + kBackwardComputeStart, + kBackwardComputeEnd, + kBackwardCommStart, + kBackwardCommEnd, + }; + + // Record the current event, i.e., mark it as having occurred now. Default + // CPU implementation. + virtual void record(Event event) { + getTimeRef(event) = current_time_in_nanos(); + } + + // Return the difference between when two events occurred, in nanoseconds. + // Or nullopt if one of them hasn't been recorded. + virtual c10::optional measureDifference(Event start, Event end) = 0; + + virtual ~Timer() = default; + + // Return host-side timestamp, or nullopt if it has not yet been recorded. + c10::optional getTimestamp(Event event) { + auto time = getTimeRef(event); + if (time == kUnsetTime) { + return c10::nullopt; + } else { + return time; + } + } + + // Return host-side time member variable corresponding to the given event. + int64_t& getTimeRef(Event event) { + switch (event) { + case Event::kForwardStart: + return forward_start_time; + case Event::kBackwardComputeStart: + return backward_compute_start_time; + case Event::kBackwardComputeEnd: + return backward_compute_end_time; + case Event::kBackwardCommStart: + return backward_comm_start_time; + case Event::kBackwardCommEnd: + return backward_comm_end_time; + default: + TORCH_INTERNAL_ASSERT(false); + } + } +}; + +TORCH_DECLARE_TYPED_REGISTRY( + TimerRegistry, + c10::DeviceType, + Timer, + std::unique_ptr, + c10::Device); +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp new file mode 100644 index 0000000000000000000000000000000000000000..50c800e8d7980d20fc942043e0a6894a9d31872c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10d { +const int kUnsetSeqNum = 0; + +namespace { +constexpr int kByteOffset = 8; +} + +// Converts from int to char vec to write in store +template +inline std::vector toVec(uint64_t num, int numBytes) { + std::vector values; + // Read off bytes from right to left, pushing them into + // char array. + for (const auto i : c10::irange(numBytes)) { + uint8_t x = (num >> (kByteOffset * i)) & 0xff; + values.push_back(static_cast(x)); + } + return values; +} + +// Converts from char vec (such as from store read) to int. +template +inline uint64_t fromVec(const std::vector& values) { + uint64_t num = 0; + // Set each byte at the correct location on num + for (const auto i : c10::irange(values.size())) { + uint8_t x = static_cast(values[i]); + num |= (static_cast(x) << (kByteOffset * i)); + } + return num; +} + +class TORCH_API SequenceNum { + public: + SequenceNum(); + explicit SequenceNum(const uint64_t num); + // Retrieve num_. Will throw if not set. + uint64_t get() const; + // Increment num_. Will throw if not set. + void increment(); + // Increment num_ and return the old value. Will throw if not set. + uint64_t getAndIncrement(); + // Sets num_ + void set(const uint64_t num); + // Returns true if this SequenceNum is properly initialized with a value, else + // false. + bool isSet() const; + + SequenceNum& operator=(const SequenceNum& other); + + SequenceNum(const SequenceNum& other); + + private: + c10::optional num_; + mutable std::mutex lock_; +}; + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..52832722304cf651b6333f849f29fd9d96a0fc42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h @@ -0,0 +1,93 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace c10d { +namespace detail { + +class SocketOptions { + public: + SocketOptions& prefer_ipv6(bool value) noexcept { + prefer_ipv6_ = value; + + return *this; + } + + bool prefer_ipv6() const noexcept { + return prefer_ipv6_; + } + + SocketOptions& connect_timeout(std::chrono::seconds value) noexcept { + connect_timeout_ = value; + + return *this; + } + + std::chrono::seconds connect_timeout() const noexcept { + return connect_timeout_; + } + + private: + bool prefer_ipv6_ = true; + std::chrono::seconds connect_timeout_{30}; +}; + +class SocketImpl; + +class Socket { + public: + // This function initializes the underlying socket library and must be called + // before any other socket function. + static void initialize(); + + static Socket listen(std::uint16_t port, const SocketOptions& opts = {}); + + static Socket listenFromFd(int fd, std::uint16_t expected_port); + + static Socket connect( + const std::string& host, + std::uint16_t port, + const SocketOptions& opts = {}); + + Socket() noexcept = default; + + Socket(const Socket& other) = delete; + + Socket& operator=(const Socket& other) = delete; + + Socket(Socket&& other) noexcept; + + Socket& operator=(Socket&& other) noexcept; + + ~Socket(); + + Socket accept() const; + + int handle() const noexcept; + + std::uint16_t port() const; + + bool waitForInput(std::chrono::milliseconds timeout); + + private: + explicit Socket(std::unique_ptr&& impl) noexcept; + + std::unique_ptr impl_; +}; + +} // namespace detail + +} // namespace c10d diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8ba7226dc1fe7eab537049db26d5c090820fb0d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// All RPC peers should call into this function at the same time. Each peer +// provides its own id and name, and this function uses the given Store to +// gather global name-to-id mapping on all peers. +TORCH_API std::unordered_map collectNames( + ::c10d::PrefixStore store, + const worker_id_t selfId, + const std::string& selfName, + const int worldSize); + +// Ranks in dynamic RPC groups will initially call into this to establish the +// name-to-id mapping for the current peers in the group. The current rank will +// put its own worker info in the store and discover all the ranks that came +// before it. NOTE: This needs to be called with the Dynamic RPC group +// membership management token held. +TORCH_API std::unordered_map collectCurrentNames( + ::c10d::PrefixStore store, + const worker_id_t selfId, + const std::string& selfName); + +// Remove name frmo Store, used in dynamic RPC groups. +// NOTE: This needs to be called with the Dynamic RPC group +// membership management token held. +TORCH_API void removeCurrentName( + ::c10d::PrefixStore store, + const worker_id_t selfId, + const std::string& selfName); + +// This performs a synchronization of all call counts by using store. +// All RPC peers wait for others to join to exit at the same time. +TORCH_API int syncCallCount( + ::c10d::PrefixStore store, + const int worldSize, + int activeCalls = 0); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h new file mode 100644 index 0000000000000000000000000000000000000000..6ef573cf14ff33a49a47b2fa0f61714b2ac05e85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h @@ -0,0 +1,193 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// An enum denoting common RPC errors to allow specific error handling for them. +enum RPCErrorType { + UNKNOWN_ERROR = 0, /* Indicates that error type could not be parsed */ + TIMEOUT = 1, /* Indicates that the RPC has timed out */ + INTENTIONAL_FAILURE = 2 /* Deliberate failure, such as those injected by + FaultyAgent for testing */ +}; + +// The enum values are bitwise ORed with MessageType +// They are bit flags starting from 0x100 and should have +// value such as 0x100, 0x200, 0x400, 0x800, 0xF00, etc. +enum MessageTypeFlags { + REQUEST_TYPE = 0x100, + RESPONSE_TYPE = 0x200, +}; + +// Message types must have values between 0x00 to 0xff +enum MessageType { + // messages for dist.rpc on builtin operators + SCRIPT_CALL = 0x00 | MessageTypeFlags::REQUEST_TYPE, + SCRIPT_RET = 0x01 | MessageTypeFlags::RESPONSE_TYPE, + + // messages for dist.rpc on Python UDF + PYTHON_CALL = 0x02 | MessageTypeFlags::REQUEST_TYPE, + PYTHON_RET = 0x03 | MessageTypeFlags::RESPONSE_TYPE, + + // messages for dist.remote on builtin operators and Python UDF + SCRIPT_REMOTE_CALL = 0x04 | + MessageTypeFlags::REQUEST_TYPE, // A remote call on a builtin operator + PYTHON_REMOTE_CALL = + 0x05 | MessageTypeFlags::REQUEST_TYPE, // A remote call on a Python UDF + REMOTE_RET = + 0x06 | MessageTypeFlags::RESPONSE_TYPE, // Response for remote calls for + // UDF, builtin, or script + + // RRef related internal messages + SCRIPT_RREF_FETCH_CALL = + 0x07 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef fetches value + // from owner + PYTHON_RREF_FETCH_CALL = + 0x08 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef fetches + // value from owner + SCRIPT_RREF_FETCH_RET = 0x09 | + MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends ivalue to user + PYTHON_RREF_FETCH_RET = 0x0a | + MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends py::object to user + RREF_USER_DELETE = 0x0b | + MessageTypeFlags::REQUEST_TYPE, // A UserRRef tells the owner to deref + RREF_FORK_REQUEST = + 0x0c | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells the owner + // about itself + RREF_CHILD_ACCEPT = + 0x0d | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells parent + // that owner knows it + RREF_ACK = + 0x0e | MessageTypeFlags::RESPONSE_TYPE, // ACK to internal RRef messages + + // Messages with autograd info + FORWARD_AUTOGRAD_REQ = 0x0f | MessageTypeFlags::REQUEST_TYPE, + FORWARD_AUTOGRAD_RESP = 0x10 | MessageTypeFlags::RESPONSE_TYPE, + + // Messages to propagate gradients on the backward pass. + BACKWARD_AUTOGRAD_REQ = 0x11 | MessageTypeFlags::REQUEST_TYPE, + BACKWARD_AUTOGRAD_RESP = 0x12 | MessageTypeFlags::RESPONSE_TYPE, + + // Messages to tell workers to clean up their autograd context. + CLEANUP_AUTOGRAD_CONTEXT_REQ = 0x13 | MessageTypeFlags::REQUEST_TYPE, + CLEANUP_AUTOGRAD_CONTEXT_RESP = 0x14 | MessageTypeFlags::RESPONSE_TYPE, + + // Messages that tell workers to run requests with profiling enabled. + RUN_WITH_PROFILING_REQ = 0x15 | MessageTypeFlags::REQUEST_TYPE, + RUN_WITH_PROFILING_RESP = 0x16 | MessageTypeFlags::RESPONSE_TYPE, + + // Messages to support RRef.backward(). + RREF_BACKWARD_REQ = 0x17 | MessageTypeFlags::REQUEST_TYPE, + RREF_BACKWARD_RESP = 0x18 | MessageTypeFlags::RESPONSE_TYPE, + + // Other internal message types + EXCEPTION = 0x37 | MessageTypeFlags::RESPONSE_TYPE, + UNKNOWN = 0x3c +}; + +// A message to be sent/received by an RpcAgent. +// +// A Message object contains 4 fields: +// payload (std::vector): a binary chunk of data. +// tensors (std::vector): all tensors. Tensor data are not +// included in the payload, and it is up to the RpcAgent implementation +// to determine how to serialize them. This design is helpful for +// communicating super large tensors where serializing all the data at +// once leads to excessively large memory footprint. An implementation +// can then serialize and send tensors chunk-by-chunk, in the streaming +// fashion. +// type (MessageType): type of the message. +// id (int64_t): message id, this is used to match request and response. +// Other implementation can ignore it if they have their own +// ways to do matching. +// +// Layers above ``RpcAgent`` only converts ScriptCall, ScriptResp, PythonCall, +// and PythonResp into a Message, and it is up to the RpcAgent +// implementation to determine how to serialize a message. +class TORCH_API Message final : public torch::CustomClassHolder { + private: + // Keep these private in order to force users to go through make_intrusive and + // thus prevent creating a Message that's not held by an intrusive_ptr. + Message(); + + Message( + std::vector&& payload, + std::vector&& tensors, + MessageType type); + + Message( + std::vector&& payload, + std::vector&& tensors, + MessageType type, + int64_t id); + + friend c10::intrusive_ptr; + + public: + Message(const Message& other) = delete; + Message(Message&& other) = delete; + Message& operator=(Message const& rhs) = delete; + Message& operator=(Message&& rhs) = delete; + + // Destructively retrieves the payload. + std::vector&& movePayload() &&; + std::vector&& moveTensors() &&; + + std::vector& payload(); + const std::vector& payload() const; + std::vector& tensors(); + const std::vector& tensors() const; + MessageType type() const; + + bool isRequest() const; + bool isResponse() const; + bool isShutdown() const; + + // id is an optional field to match request/response. If an RpcAgent + // implementation is able to do the matching without using this id, it can be + // dropped during message serialization. + int64_t id() const; + void setId(int64_t id); + + std::vector> getStorages() const; + + private: + std::vector payload_; + std::vector tensors_; + MessageType type_ = MessageType::UNKNOWN; + int64_t id_ = -1; +}; + +// Create a response Message of type Exception. +// The exception string representation will be used as the message's payload. +// A message ID corresponding to the request that resulted in this response can +// be provided for matching requests/responses. +TORCH_API c10::intrusive_ptr createExceptionResponse( + const std::exception& e, + int64_t id); + +// Create a response Message of type Exception. +// The passed in string representation will be used as the message's payload. +// A message ID corresponding to the request that resulted in this response can +// be provided for matching requests/responses. +TORCH_API c10::intrusive_ptr createExceptionResponse( + const std::string& exceptionStr, + int64_t id); + +inline std::tuple< + c10::intrusive_ptr, + std::vector>> +withStorages(c10::intrusive_ptr message) { + auto storages = message->getStorages(); + return std::make_tuple(std::move(message), std::move(storages)); +} + +using JitFuture = c10::ivalue::Future; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h new file mode 100644 index 0000000000000000000000000000000000000000..432141a97cf5c07dc4c7e2b63fbf393dd22ec420 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE }; + +// Python wrapper of an RRef shared_ptr that supports Python +// pickle and unpickle. +class PYBIND11_EXPORT PyRRef { + public: + // The first ctor can only be called while holding GIL. See its implementation + // for more explanations. + explicit PyRRef(const py::object& value, const py::object& type_hint); + explicit PyRRef(c10::intrusive_ptr rref); + PyRRef(const PyRRef&) = default; + ~PyRRef(); + + bool isOwner() const; + bool confirmedByOwner() const; + WorkerInfo owner() const; + std::string ownerName() const; + py::object toHere( + const float timeoutSeconds = + torch::distributed::rpc::kUnsetRpcTimeout) const; + py::object localValue() const; + std::string str() const; + py::tuple pickle() const; + static PyRRef unpickle(const py::tuple& t); + c10::IValue toIValue() const; + // Future that is associated with the creation of this RRef on the remote end. + // This is only used to get the future corresponding to the rref for profiling + // use cases. + c10::intrusive_ptr getFuture() const; + // Keeps track of the future responsible for profiling owner creation + // acknowledgement + c10::intrusive_ptr getProfilingFuture() const; + // Sets the future responsible for profiling owner creation acknowledgement. + // This future is set from python to be a future that returns when profiling + // callbacks have been run. + void setProfilingFuture(c10::intrusive_ptr profilingFuture); + + // create a proxy on this RRef, which can be used to launch RPC on the owner + // of this RRef to run functions on the object referenced by this RRef. + py::object createRRefProxy( + const RRefProxyType& mode, + float timeoutSeconds = rpc::kUnsetRpcTimeout) const; + + // get the type of the data object referenced by this RRef. Timeout argument + // is only used in the first invocation of this function as an argument to the + // RPC to the owner node of the RRef. + py::object getRRefType( + float timeout = rpc::kUnsetRpcTimeout, + bool blocking = true); + + // Run the backward pass with the RRef as the root. + void backward(int64_t autogradContextId, bool retainGraph); + + // Helper static function to run backward on a given rref. + static void backward( + int64_t autogradContextId, + bool retainGraph, + const c10::intrusive_ptr& rref); + + // Specialization of backward if the rref is an OwnerRRef. + static void backwardOwnerRRef( + int64_t autogradContextId, + bool retainGraph, + IValue value); + + private: + c10::intrusive_ptr rref_; + c10::optional> profilingFuture_; + c10::optional type_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h new file mode 100644 index 0000000000000000000000000000000000000000..e640f48838f37c4a185e6d2840671d550ccbe6ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// RPC call representing calling a Python function over RPC. +class TORCH_API PythonCall final : public RpcCommandBase { + public: + PythonCall(SerializedPyObj&& serializedPyObj, bool isAsyncExecution); + + c10::intrusive_ptr toMessageImpl() && override; + + static std::unique_ptr fromMessage(const Message& message); + + const SerializedPyObj& serializedPyObj() const; + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + private: + SerializedPyObj serializedPyObj_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..4de41b26fc69540dbc251b6f46ff682cbed3c0c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Converts an internal ivalue::Future of Message into a user-facing +// ivalue::Future of py::object type by creating a new ivalue::Future and call +// its markCompleted as a callback in the given ivalue::Future. +// If hasValue is true, the Message will be converted into a py::object and then +// wrap it with an IValue. If hasValue is false, this ivalue::Future is only +// used for signaling and launching callbacks. In this case, the message will be +// discarded and then set the ivalue::Future using an empty IValue or the given +// FutureError if there is an error. +c10::intrusive_ptr toPyJitFuture( + const c10::intrusive_ptr& messageJitFuture, + bool hasValue = true); + +c10::intrusive_ptr pyRpcBuiltin( + const WorkerInfo& dst, + const std::string& opName, + const py::args& args, + const py::kwargs& kwargs, + const float rpcTimeoutSeconds); + +c10::intrusive_ptr pyRpcPythonUdf( + const WorkerInfo& dst, + std::string& pickledPythonUDF, + std::vector& tensors, + const float rpcTimeoutSeconds, + const bool isAsyncExecution); + +c10::intrusive_ptr pyRpcTorchscript( + const std::string& dstWorkerName, + const std::string& qualifiedNameStr, + const py::tuple& argsTuple, + const py::dict& kwargsDict, + const float rpcTimeoutSeconds, + const bool isAsyncExecution); + +PyRRef pyRemoteBuiltin( + const WorkerInfo& dst, + const std::string& opName, + const float rpcTimeoutSeconds, + const py::args& args, + const py::kwargs& kwargs); + +PyRRef pyRemotePythonUdf( + const WorkerInfo& dst, + std::string& pickledPythonUDF, + std::vector& tensors, + const float rpcTimeoutSeconds, + const bool isAsyncExecution); + +PyRRef pyRemoteTorchscript( + const std::string& dstWorkerName, + const std::string& qualifiedNameStr, + const float rpcTimeoutSeconds, + const bool isAsyncExecution, + const py::args& args, + const py::kwargs& kwargs); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h new file mode 100644 index 0000000000000000000000000000000000000000..c230037bf8ff2b8267538e8e07bafe7053feb045 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +class TORCH_API PythonRemoteCall : public RpcCommandBase { + public: + PythonRemoteCall( + SerializedPyObj&& serializedPyObj, + at::IValue retRRefId, + at::IValue retForkId, + const bool isAsyncExecution); + + inline const SerializedPyObj& serializedPyObj() const { + return serializedPyObj_; + } + + inline const at::IValue& retRRefId() const { + return retRRefId_; + } + + inline const at::IValue& retForkId() const { + return retForkId_; + } + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + SerializedPyObj serializedPyObj_; + const at::IValue retRRefId_; + const at::IValue retForkId_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..7564bfd36aee9bc3f4241d6d5e7cc3d09c0d2a52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// RPC call representing the response of a Python UDF over RPC. +class TORCH_API PythonResp final : public RpcCommandBase { + public: + explicit PythonResp(SerializedPyObj&& serializedPyObj); + + c10::intrusive_ptr toMessageImpl() && override; + + static std::unique_ptr fromMessage(const Message& message); + + const SerializedPyObj& serializedPyObj() const; + + private: + SerializedPyObj serializedPyObj_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..fccdbd2d16d43c4fc3228d20840cfe9ab5943ef6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h @@ -0,0 +1,133 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Singleton class provides interface to execute python UDF remote call +// and deserialize the returned results by running python function +// in internal_rpc_utilities. +// The singleton object is constructed at first when RPC agent is +// constructed, where the python function in +// torch/distributed/internal_rpc_utils.py are imported only once. +class PYBIND11_EXPORT PythonRpcHandler { + public: + struct RRefProxyFunctions { + py::object rrefProxyCtor_; + py::object rpcSync_; + py::object rpcAsync_; + py::object remote_; + }; + + struct RRefTypeFunctions { + py::object onOwner_; + py::object onUser_; + }; + + static PythonRpcHandler& getInstance(); + + // Run a pickled Python UDF and return the result py::object + py::object runPythonUdf(const py::object& pythonUdf); + + // Serialized a py::object into a string + SerializedPyObj serialize(const py::object& obj); + + // Deserialize a string into a py::object + py::object deserialize(const SerializedPyObj& serializedObj); + + // Check if obj is RemoteException, then throw it + void handleException(const py::object& obj); + // Alternative if the caller is already holding the GIL. + void handleExceptionGILHeld(const py::object& obj); + // Check if obj is an RemoteException instance. + bool isRemoteException(const py::object& obj); + + // Explicitly clean up py::objects to avoid segment faults when + // py::objects with CPython are cleaned up later at program exit + // See similar issues reported https://github.com/pybind/pybind11/issues/1598 + // and https://github.com/pybind/pybind11/issues/1493 + // Our local tests also caught this segment faults if py::objects are cleaned + // up at program exit. The explanation is: CPython cleans up most critical + // utilities before cleaning up PythonRpcHandler singleton, so when + // PythonRpcHandler singleton cleans up py::objects and call dec_ref(), it + // will crash. + // The solution is to clean up py::objects earlier when Rpc agent join(). + // Be note that py::objects can not be cleaned up when Rpc agent is destroyed + // as well, as Rpc agent is global variable and it will have same issue as + // PythonRpcHandler. + void cleanup(); + + std::shared_ptr jitCompilationUnit(); + + // Parse the string to recover the jit_type, this is used for RRef python + // pickling/unpickling type recovery. The type string inference rule is as + // follows: + // 1. first try to parse if this is primitive types. + // i.e. TensorType, IntType, PyObjectType, etc. + // 2. if not primitive type, we query the python_cu to see if it is a + // class type or interface type registered in python + // We use a ScriptTypeParser instance with custom PythonTypeResolver + // to resolve types according to the above rules. + TypePtr parseTypeFromStr(const std::string& typeStr); + + // Return a set of Python functions for RRef helpers. + const RRefProxyFunctions& getRRefProxyFunctions() const; + + // Return a set of Python functions to retrieve the type of the object + // referenced by a given RRef. + const RRefTypeFunctions& getRRefTypeFunctions() const; + + PythonRpcHandler(const PythonRpcHandler&) = delete; + PythonRpcHandler& operator=(const PythonRpcHandler&) = delete; + PythonRpcHandler(PythonRpcHandler&&) = delete; + PythonRpcHandler& operator=(PythonRpcHandler&&) = delete; + + private: + void init(); + PythonRpcHandler(); + ~PythonRpcHandler() = default; + + // Ref to `torch.distributed.rpc.internal._run_function`. + py::object pyRunFunction_; + + // Ref to `torch.distributed.rpc.internal.serialize`. + py::object pySerialize_; + + // Ref to `torch.distributed.rpc.internal.deserialize`. + py::object pyDeserialize_; + + // Ref to 'torch.distributed.rpc.internal._handle_exception' + py::object pyHandleException_; + + // Python functions for RRef proxy + RRefProxyFunctions rrefProxyFunctions_; + + // Ref to 'torch.distributed.rpc.api._rref_typeof_on_' + RRefTypeFunctions rrefTypeFunctions_; + + // Shared ptr to python compilation unit in jit, it is constructed in python + // side (see _python_cu = torch._C.CompilationUnit() in jit/__init__.py) + // and imported in C++ (see get_python_cu() in + // csrc/jit/python/pybind_utils.h). We import the compilation unit here only + // once for less cost and thread safety. + std::shared_ptr jitCompilationUnit_; + + // jit type parser to parse type_str back to TypePtr for RRef type + // recovery when pickling and unpickling RRef + std::shared_ptr typeParser_; + + // Indicates whether or not we have properly initialized the handler. + bool initialized_; + + // Lock to protect initialization. + std::mutex init_lock_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h new file mode 100644 index 0000000000000000000000000000000000000000..b73d0fba7255ad9ae984ede5572e46a15707dfbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Functor which is invoked to process an RPC message. This is an abstract class +// with some common functionality across all request handlers. Users need to +// implement this interface to perform the actual business logic. +class TORCH_API RequestCallback { + public: + // Invoke the callback. + c10::intrusive_ptr operator()( + Message& request, + std::vector streams) const; + + virtual ~RequestCallback() = default; + + protected: + // RpcAgent implementation should invoke ``RequestCallback`` to process + // received requests. There is no restriction on the implementation's + // threading model. This function takes an rvalue reference of the Message + // object. It is expected to return the future to a response message or + // message containing an exception. Different rpc agent implementations are + // expected to ensure delivery of the response/exception based on their + // implementation specific mechanisms. + virtual c10::intrusive_ptr processMessage( + Message& request, + std::vector streams) const = 0; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..24e4c3026f8e66d7fa6831e161d6b4a2164be7e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +class TORCH_API RequestCallbackImpl : public RequestCallbackNoPython { + public: + std::unique_ptr deserializePythonRpcCommand( + std::unique_ptr rpc, + const MessageType& messageType) const override; + + c10::intrusive_ptr processPythonCall( + RpcCommandBase& rpc, + std::vector streams) const override; + + c10::intrusive_ptr processScriptCall( + RpcCommandBase& rpc, + std::vector streams) const override; + + c10::intrusive_ptr processScriptRemoteCall( + RpcCommandBase& rpc, + std::vector streams) const override; + + c10::intrusive_ptr processPythonRemoteCall( + RpcCommandBase& rpc, + std::vector streams) const override; + + c10::intrusive_ptr processPythonRRefFetchCall( + RpcCommandBase& rpc) const override; + + void handleRRefDelete(c10::intrusive_ptr& rref) const override; + + c10::intrusive_ptr processRpcWithErrors( + RpcCommandBase& rpc, + const MessageType& messageType, + std::vector streams) const override; + + bool cudaAvailable() const override; + + c10::intrusive_ptr processRRefBackward( + RpcCommandBase& rpc) const override; + + // Helpers to run user-defined functions, operators and other computations. + + c10::intrusive_ptr runJitFunction( + const c10::QualifiedName& name, + std::vector& stack, + std::vector streams, + bool isAsyncExecution) const; + + c10::intrusive_ptr runPythonFunction( + const py::object& function, + std::vector streams, + bool isAsyncExecution) const; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h new file mode 100644 index 0000000000000000000000000000000000000000..ded3a57a8610ebee4efb6f6e524dbd5e59e38bb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// RequestCallback implementation with no Python dependencies. +class TORCH_API RequestCallbackNoPython : public RequestCallback { + public: + c10::intrusive_ptr processMessage( + Message& request, + std::vector streams) const override; + + protected: + virtual std::unique_ptr deserializePythonRpcCommand( + std::unique_ptr rpc, + const MessageType& messageType) const; + + virtual c10::intrusive_ptr processScriptCall( + RpcCommandBase& rpc, + std::vector streams) const; + + virtual c10::intrusive_ptr processPythonCall( + RpcCommandBase& rpc, + std::vector streams) const; + + c10::intrusive_ptr assignOwnerRRef( + const RRefId& rrefId, + const RRefId& forkId, + c10::intrusive_ptr valueFuture) const; + + virtual c10::intrusive_ptr processScriptRemoteCall( + RpcCommandBase& rpc, + std::vector streams) const; + + virtual c10::intrusive_ptr processPythonRemoteCall( + RpcCommandBase& rpc, + std::vector streams) const; + + c10::intrusive_ptr retrieveOwnerRRef(const RRefId& rrefId) const; + + c10::intrusive_ptr processScriptRRefFetchCall( + RpcCommandBase& rpc) const; + + virtual c10::intrusive_ptr processPythonRRefFetchCall( + RpcCommandBase& rpc) const; + + c10::intrusive_ptr processRRefUserDelete( + RpcCommandBase& rpc) const; + + c10::intrusive_ptr processRRefChildAccept( + RpcCommandBase& rpc) const; + + c10::intrusive_ptr processRRefForkRequest( + RpcCommandBase& rpc) const; + + c10::intrusive_ptr processForwardAutogradReq( + RpcCommandBase& rpc, + std::vector streams) const; + + c10::intrusive_ptr processBackwardAutogradReq( + RpcCommandBase& rpc, + std::vector streams) const; + + c10::intrusive_ptr processCleanupAutogradContextReq( + RpcCommandBase& rpc) const; + + c10::intrusive_ptr processRunWithProfilingReq( + RpcCommandBase& rpc) const; + + virtual void handleRRefDelete(c10::intrusive_ptr& rref) const; + + c10::intrusive_ptr processRpc( + RpcCommandBase& rpc, + const MessageType& messageType, + std::vector streams) const; + + virtual c10::intrusive_ptr processRpcWithErrors( + RpcCommandBase& rpc, + const MessageType& messageType, + std::vector streams) const; + + c10::intrusive_ptr handleError( + const std::exception& e, + const MessageType messageType, + int64_t messageId) const; + + virtual bool cudaAvailable() const; + + virtual c10::intrusive_ptr processRRefBackward( + RpcCommandBase& rpc) const; + + // Helpers to run user-defined functions, operators and other computations. + + c10::intrusive_ptr runJitOperator( + const jit::Operator& op, + std::vector& stack, + std::vector streams) const; + + // Helpers to convert various kinds of objects into already-completed futures. + + c10::intrusive_ptr asFuture(IValue value, TypePtr type) const; + + c10::intrusive_ptr asFuture( + c10::intrusive_ptr message) const; + + c10::intrusive_ptr asFuture(std::exception_ptr err) const; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h new file mode 100644 index 0000000000000000000000000000000000000000..fdfc29f33deb79af2bd821c5709142281faf5261 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace rpc { + +PyMethodDef* python_functions(); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h new file mode 100644 index 0000000000000000000000000000000000000000..0b04c0828708736b85b1fbe9968961a425f38c15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h @@ -0,0 +1,341 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using DeviceMap = std::unordered_map; + +// Default RPC timeout +constexpr float kDefaultRpcTimeoutSeconds = 60; +// Unset RPC timeout. This is the value agent::send() will have if user does not +// pass in a specific timeout, and indicates that we must use the default +// timeout for RPCs. +constexpr float kUnsetRpcTimeout = -1; +constexpr auto kDefaultInitMethod = "env://"; +constexpr float kSecToMsConversion = 1000; +constexpr auto kRpcTimeoutErrorStr = + "RPC ran for more than set timeout ({} ms) and will now be marked with an error"; + +using steady_clock_time_point = + std::chrono::time_point; +// Input is qualified name string, output is JIT StrongTypePtr +// Same as jit::TypeResolver, did not import jit::TypeResolver to here +// because it could introduce cyclic dependencies. +using TypeResolver = + std::function; + +struct TORCH_API RpcBackendOptions { + RpcBackendOptions() + : RpcBackendOptions(kDefaultRpcTimeoutSeconds, kDefaultInitMethod) {} + + RpcBackendOptions(float rpcTimeoutSeconds, std::string initMethod) + : rpcTimeoutSeconds(rpcTimeoutSeconds), + initMethod(std::move(initMethod)) { + TORCH_CHECK(rpcTimeoutSeconds >= 0, "RPC Timeout must be non-negative"); + } + + float rpcTimeoutSeconds; + std::string initMethod; +}; + +// A globally unique ID to identify an RpcAgent +struct TORCH_API WorkerInfo : torch::CustomClassHolder { + WorkerInfo(std::string name, int64_t id); + + WorkerInfo(std::string name, worker_id_t id); + + bool operator==(const WorkerInfo& rhs) { + return (id_ == rhs.id_) && (name_ == rhs.name_); + } + + static constexpr size_t MAX_NAME_LEN = 128; + + const std::string name_; + const worker_id_t id_; +}; + +struct TORCH_API RegisterWorkerInfoOnce { + RegisterWorkerInfoOnce(); +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const WorkerInfo& workerInfo); + +// Struct for options to configure the RPC Retry protocol. +struct TORCH_API RpcRetryOptions { + // Using a default constructor like all other Options structs in the RPC + // codebase. TORCH_CHECKs for input validation are done in the + // sendWithRetries function. + RpcRetryOptions() = default; + // Maximum number of times we will retry the RPC + int maxRetries{5}; + // Initial duration between consecutive RPC send attempts + std::chrono::milliseconds rpcRetryDuration{std::chrono::milliseconds(1000)}; + // Constant for exponential backoff used while calculating future wait + // durations + float retryBackoff{1.5}; +}; + +// Struct that stores all the metadata needed to retry a given RPC. +struct TORCH_API RpcRetryInfo { + RpcRetryInfo( + const WorkerInfo& to, + c10::intrusive_ptr message, + c10::intrusive_ptr originalFuture, + int retryCount, + RpcRetryOptions options) + : to_(to), + message_(std::move(message)), + originalFuture_(std::move(originalFuture)), + retryCount_(retryCount), + options_(options) {} + + const WorkerInfo& to_; + c10::intrusive_ptr message_; + // Future that is returned to the caller of sendWithRetries(). + c10::intrusive_ptr originalFuture_; + // Number of send attempts completed so far. + int retryCount_; + RpcRetryOptions options_; +}; + +// ``RpcAgent`` is the base class for sending and receiving RPC messages. It +// provides a unified ``send`` API for both request and response messages, and +// will invoke the given ``RequestCallback`` to process received requests. It +// should immediately become ready to serve request and accept response after +// construction. +class TORCH_API RpcAgent { + public: + // `WorkerInfo` is the globally unique identifier for this RpcAgent instance. + // It contains a ``name_`` field and an ``id_`` field. ``name_`` is the + // globally unique name for this ``RpcAgent``. It is up to the ``RpcAgent`` + // implementation to determine how to resolve names. ``id_`` is the globally + // unique ID for this ``RpcAgent``. This should be determined by the + // ``RpcAgent`` implementation. + // The ``RequestCallback`` will be invoked to handle received requests. This + // ``RpcAgent`` base class makes no assumption on the thread-safeness of the + // ``RequestCallback``. ``RpcAgent`` implementations need to make sure that + // its threading model conform to ``RequestCallback``'s requirement. + // NB: RpcAgent implementations should not start serving requests until + // ``start()`` is called, as there could be other contexts that have not been + // initialized yet at this time. + RpcAgent( + WorkerInfo id, + std::unique_ptr cb, + std::chrono::milliseconds rpcTimeout); + + virtual ~RpcAgent(); + + // Send a message to the ``RpcAgent`` of id ``to`` and returns a + // ``JitFuture`` ptr. The implementation must be asynchronous, i.e., it + // cannot block until it receives the response. + // + // If ``message.isRequest()`` is true, the ``JitFuture`` will be + // completed when the response arrives. For other message types, the Future + // should be ignored by the caller. + virtual c10::intrusive_ptr send( + const WorkerInfo& to, + c10::intrusive_ptr message, + const float rpcTimeoutSeconds = kUnsetRpcTimeout, + const DeviceMap& deviceMap = {}) = 0; + + // Retries sending the message up to maxRetries times until an ACK is + // received. The duration between consecutive sends is increased over + // time using an exponential backoff algorithm. + // + // Sends ``message`` to the ``RpcAgent`` of id ``to`` and returns a + // ``JitFuture`` ptr, just like send(). Caller can specify the maximum + // number of retries for this RPC (default is 5), initial duration between + // sends (default is 1000ms), and backoff constant (default is 1.5) by + // passing in the RpcRetryOptions struct. This API might end up + // executing a method twice on the remote end (it does not guarantee + // exactly-once semantics). Therefore, the user must ensure their requests + // are idempotent. + c10::intrusive_ptr sendWithRetries( + const WorkerInfo& to, + c10::intrusive_ptr message, + RpcRetryOptions retryOptions = RpcRetryOptions()); + + // Return a reference to the ``WorkerInfo`` of this RpcAgent. + // NB: not using ``c10::optional`` here because we might + // need to create a separate RPC API lib and avoid forcing all ``RpcAgent`` + // implementations to depend on libtorch. + const WorkerInfo& getWorkerInfo() const; + + // Return a reference to the ``WorkerInfo`` of the given ``workerName``. + virtual const WorkerInfo& getWorkerInfo( + const std::string& workerName) const = 0; + + virtual const WorkerInfo& getWorkerInfo(worker_id_t id) const = 0; + + virtual std::vector getWorkerInfos() const = 0; + + // Retrieve the timeout for all RPCs. + inline std::chrono::milliseconds getRpcTimeout() const { + return rpcTimeout_.load(); + } + + // Set the timeout for all RPCs + inline void setRpcTimeout(const std::chrono::milliseconds& rpcTimeout) { + rpcTimeout_.store(rpcTimeout); + } + + // Call sync and join all internal threads. This method should be called + // before every RPC process exits. + virtual void join(bool shutdown = false, float timeout = 0) = 0; + + // Synchronize the this process with other ``RpcAgent`` processes. Block until + // all ``RpcAgent``s reach this method and send all pending messages. + virtual void sync() = 0; + + // Sets up backend-agnostic state for accepting requests. Currently, this + // entails setting rpcAgentRunning_ to true, creating the retry thread, and + // calling the backend's startImpl. + void start(); + + // Derived classes must override this function to start accepting requests. + // This is used to initialize any backend-specific state. Users must call + // start, not startImpl, to initialize the RPC Agent. + virtual void startImpl() = 0; + + // Stop accepting requests and shutdown the RPC framework as soon as possible + // by terminating all RPC threads. + void shutdown(); + + // Derived classes must override this function to start accepting requests. + // THis is used to clean up any backend-specific state. Users must call + // shutdown, not shutdownImpl, to shutdown the RPC Agent. + virtual void shutdownImpl() = 0; + + // Check if current RPC agent is set. + static bool isCurrentRpcAgentSet(); + + // Retrieve the valid current RPC agent. + static std::shared_ptr getCurrentRpcAgent(); + + // Set the current RPC agent. + static void setCurrentRpcAgent(std::shared_ptr rpcAgent); + + // Retrieve metrics as KV map + virtual std::unordered_map getMetrics() = 0; + + // Retrieve debug info in addition to metrics as KV map + virtual std::unordered_map getDebugInfo(); + + // Flag to control whether GIL wait times + // should be profiled or not. + void enableGILProfiling(bool flag); + + // Retrieve wheher we should profile GIL wait times or not. + bool isGILProfilingEnabled(); + + // Set type resolver that will be passed to JIT pickler to resolver type Ptr + // based on type str. + void setTypeResolver(std::shared_ptr typeResolver); + + // Get the type resolver + std::shared_ptr getTypeResolver(); + + // Retrieves the device map for the provided destination worker. + virtual DeviceMap getDeviceMap(const WorkerInfo& dst) const; + + // Retrieve the (non-CPU) devices that are supported by the agent. + virtual const std::vector& getDevices() const; + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const WorkerInfo workerInfo_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const std::unique_ptr cb_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::atomic rpcTimeout_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::atomic profilingEnabled_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr typeResolver_; + // Atomic boolean indicating whether this agent is running. It controls + // whether several background threads should be running. It is set in + // RpcAgent::start() and unset in the derived class shutdown(). + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::atomic rpcAgentRunning_; + + private: + static std::shared_ptr currentRpcAgent_; + // Add GIL wait time data point to metrics + virtual void addGilWaitTime(const std::chrono::microseconds gilWaitTime) = 0; + friend class PythonRpcHandler; + + // Map that stores metadata for RPC's that may need to be re-tried as well as + // the timepoint at which we should re-try them. + std::map< + steady_clock_time_point, + std::unordered_set>> + rpcRetryMap_; + + // Thread that checks for retryable RPC's in the rpcRetryMap_ and sleeps until + // the next unACKed RPC's timeout has expired. + std::thread rpcRetryThread_; + + // Function that rpcRetryThread_ calls in a loop as long as RpcAgent is + // running. + void retryExpiredRpcs(); + + // This is the callback attached to futures corresponding to send retries. + // This handles 3 cases: 1). send was completed, 2). send failed with an + // error and we've done maxRetries failed send attempts, and 3). send + // failed with an error and we have more retries to go. In case 1, we mark + // the original future as complete. In case 2, we mark the future with an + // error and do not retry again. In case 3, we move the RpcRetryInfo struct + // to another time point in the map to schedule the RPC for a future send. + void rpcRetryCallback( + JitFuture& message, + steady_clock_time_point newTime, + std::shared_ptr earliestRpc); + + // Function that uses the exponential backoff algorithm to compute the next + // time point to retry a given RPC. + inline steady_clock_time_point computeNewRpcRetryTime( + RpcRetryOptions& options, + int retryCount) { + // The exponential backoff algorithm being used here is: + // newTime = timeNow + (retryDuration * (backoffConstant ^ retryCount)). + std::chrono::milliseconds timedelta = + std::chrono::duration_cast( + options.rpcRetryDuration * pow(options.retryBackoff, retryCount)); + return std::chrono::time_point_cast( + std::chrono::steady_clock::now() + timedelta); + } + + // Condition Variable to signal when the rpcRetryMap_ has been populated. + std::condition_variable rpcRetryMapCV_; + + // Mutex to protect RpcRetryMap_. + std::mutex rpcRetryMutex_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch + +namespace std { +template <> +struct hash { + std::size_t operator()( + const torch::distributed::rpc::WorkerInfo& worker_info) const noexcept { + return worker_info.id_; + } +}; +} // namespace std diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h new file mode 100644 index 0000000000000000000000000000000000000000..31e54d3a659adcdc2cac93e175b29635d942870a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Base class for all RPC request and responses. +class RpcCommandBase { + public: + // Need to override this to serialize the RPC. This should destructively + // create a message for the RPC (Hence the &&). + c10::intrusive_ptr toMessage() && { + JitRRefPickleGuard jitPickleGuard; + return std::move(*this).toMessageImpl(); + } + virtual c10::intrusive_ptr toMessageImpl() && = 0; + virtual ~RpcCommandBase() = 0; +}; + +inline RpcCommandBase::~RpcCommandBase() = default; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h new file mode 100644 index 0000000000000000000000000000000000000000..87ffd4f868e3d76bcb5777650e2fa8a22391e7b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h @@ -0,0 +1,339 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace distributed { +namespace rpc { + +namespace callback { +// It's the callback for RemoteCall. +void TORCH_API +confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId); + +// It's the callback for finishing creating owner rref, it returned deletedRRef, +// so that the deletedRRef can be handled under GIL in python_functions.cpp if +// deletedRRef contains python object. +c10::intrusive_ptr TORCH_API +finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId); +} // namespace callback + +// Manages RRef lifetime and keeps track of RRef forks. +class TORCH_API RRefContext { + public: + static RRefContext& getInstance(); + // NB: This method must be called before destructing RRefContext singleton. + // Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that + // hold py::object. The call-site is also responsible for resetting those + // shared_ptr objects with a GIL. See comments at delForkOfOwner() for more + // details. + static std::vector> destroyInstance( + bool ignoreRRefLeak = true); + + static void handleException(const JitFuture& jitFuture); + + // handle exception without throw ::c10::Error again + static void handleExceptionSilent(const JitFuture& jitFuture); + + RRefContext(const RRefContext&) = delete; + RRefContext(RRefContext&& other) = delete; + void operator=(const RRefContext&) = delete; + RRefContext& operator=(RRefContext&& other) = delete; + + ~RRefContext(); + + // get the worker id of the current worker + inline worker_id_t getWorkerId() const { + return agent_->getWorkerInfo().id_; + } + + // get the worker name of the current worker + inline const std::string& getWorkerName() const { + return agent_->getWorkerInfo().name_; + } + + // generate a globally unique ID + inline GloballyUniqueId genGloballyUniqueId() { + return GloballyUniqueId(getWorkerId(), nextLocalId_++); + } + + inline const std::shared_ptr& agent() const { + return agent_; + } + + // create a ``UserRRef`` owned by the worker ``ownerId`` + c10::intrusive_ptr createUserRRef( + worker_id_t ownerId, + const TypePtr& type); + + // Convert an RRefForkData into an RRef. This RRef could be user or owner. + // This RRef could have already existed before, or could be created in this + // method, we pass type here to validate or help the rref creation. + c10::intrusive_ptr getOrCreateRRef( + const RRefForkData& rfd, + const TypePtr& type); + + // Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new + // one. This function is called in two places: + // 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL`` + // ``PYTHON_REMOTE_CALL``. + // 2. when unpickling ``OwnerRRef``. + // What's common in these two cases are, 1) the RRefId is already generated + // 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if + // it is not yet available. + c10::intrusive_ptr getOrCreateOwnerRRef( + const RRefId& rrefId, + const TypePtr& type); + + // Create an empty owner rref of type. + // This method is called to first time generate an ``OwnerRRef``, e.g., + // 1) ``rpc.RRef(obj)`` + // 2) create the ``OwnerRRef`` on `rpc.remote()` caller side. + // What's common in these two cases are, 1) the RRefId hasn't been generated + // 2) the TypePtr is presented. + c10::intrusive_ptr createOwnerRRef(const TypePtr& type); + + // Returns a Future of the OwnerRRef, which will be marked completed when + // ``OwnerRRef`` is created. This method is used when the TypePtr is not + // available, e.g., when processing to_here(). The forceCreated flag can be + // used to ensure that the rref is created on the owner, otherwise throw in + // cases where the user of this API expects this to return a completed future. + // Note that the return value is a intrusive_ptr to a c10::ivalue::Future that + // holds the RRef. + c10::intrusive_ptr getOwnerRRef( + const RRefId& rrefId, + bool forceCreated = false); + + // Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when + // making a remote call to self, which as for now, still goes through serde + // and invokes request callback. In this case, the OwnerRRef has already been + // created on the send side, and we need to pass it to the receive side, + // instead of creating a new OwnerRRef. This is done by adding the OwnerRRef + // into owners_. However, that alone is not enough, as it could be deleted + // when all UserRRef die, which would then remove the OwnerRRef from owners_ + // and this could happen before the self remote call finishes. To prevent + // that, this API adds the RRefId as a ForkId, which will then delete the + // ForkId when the self remote is done. + void addSelfAsFork(c10::intrusive_ptr& rref); + + // Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the + // ``OwnerRRef`` in a map to keep it alive. + void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId); + // Performs the same function as addForkOfOwner but ignores duplicate + // requests. This idempotent function is used with RREF_FORK_REQUEST calls, + // whereas all other message types use the non-idempotent variant. + void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId); + // Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the + // IValue or py::object. For the later, this method will acquire GIL. + // NB: If this fork deletion triggered deleting OwnerRRef, this method will + // return a shared_ptr to the OwnerRRef, which is likely to be the last + // shared_ptr instance for it. Therefore, deleting this shared_ptr + // will also trigger deleting the object it points to. If OwnerRRef holds a + // py::object, deleting it require GIL. The call site should guarded it with + // a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally + // left out of this function to avoid creating dependency on pybind. + c10::intrusive_ptr delForkOfOwner( + const RRefId& rrefId, + const ForkId& forkId); + + // Invoked when pickling an RRef to setup child/fork properly + RRefForkData prepareChildFork(const c10::intrusive_ptr& rref); + // Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and + // send RREF_CHILD_ACCEPT to the parent. + // NB: forkId is necessary here as the rref could be an OwnerRRef + void notifyOwnerAndParentOfFork( + const ForkId& forkId, + worker_id_t parent, + const c10::intrusive_ptr& rref); + + // When a UserRRef is forked to another worker (user or owner), it is added + // into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT + // from the child. + // NB: This is necessary for both user and owner child. As we do not have FIFO + // communication between workers, we need this strategy to make sure that all + // previously submitted rpc/remote calls are acked before sending out the + // RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too + // soon. + void addPendingChild( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + void delPendingChild(const ForkId& forkId); + + // When a UserRRef is created, it is added into pendingUsers_ to be held alive + // until it receives RREF_USER_ACCEPT from the owner. + void addPendingUser( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + void delPendingUser(const ForkId& forkId); + void addConfirmedUser( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + + // Retrieve a pending user given the fork ID. Throws if the user has already + // been confirmed (i.e. is no longer in the pendingUsers_ map). + c10::intrusive_ptr getPendingUser(const ForkId& forkId); + + // Start recording new pending UserRRefs. All pending UserRRefs introduced + // after this point will be put into the thread_local userTable_, which will + // then be consumed and cleared in waitForThreadLocalPendingRRefs(). + void recordThreadLocalPendingRRefs(); + // End recording new pending UserRRefs, and clear the thread_local userTable_. + // Returns a Future which will be marked as completed when all pending + // UserRRefs in the current userTable_ are confirmed by their owners. The bool + // value in the Future is unused. + // This method is useful to make sure RRefs in user function arguments are + // confirmed before launching user code. + // NB: Callers of this method does not need to keep the returned Future alive, + // because this Future is already captured in callbacks of the + // PendingUserState. If there is no pending UserRRefs, this method returns a + // completed future. + c10::intrusive_ptr waitForThreadLocalPendingRRefs(); + // Only call this function when there are errors during a recording session, + // and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked + // properly. + // TODO: make this a context guard + void clearRecordedPendingRRefsOnError(); + + void delUser( + const worker_id_t owner, + const RRefId& rrefId, + const ForkId& forkId); + void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis); + + std::unordered_map getDebugInfo(); + + private: + struct PendingUserState { + PendingUserState(c10::intrusive_ptr rref) + : rref_(std::move(rref)), + confirmationFuture_(c10::make_intrusive(BoolType::get())) { + } + + inline void confirm() { + c10::static_intrusive_pointer_cast(rref_)->confirm(); + confirmationFuture_->markCompleted(); + } + + c10::intrusive_ptr rref_; + // Use Future.wait() and Future.markCompleted() to block and unblock user + // functions. The bool value wrapped by the future_ is not used. + c10::intrusive_ptr confirmationFuture_; + }; + + RRefContext(std::shared_ptr); + + c10::intrusive_ptr createUserRRef( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + const TypePtr& type); + + void finishForkRequest(const ForkId& forkId, worker_id_t parent); + + // If there is any leak on any RRef, this method will throw an error. + void checkRRefLeaks(bool ignoreRRefLeak); + + static std::atomic nextLocalId_; + + const std::shared_ptr agent_; + mutable std::mutex mutex_; + // Keep OwnerRRefs alive until there is no living UserRRefs. + std::unordered_map, RRefId::Hash> owners_; + // A map to track OwnerRRefs that are requested but not yet created. This can + // happen if the to_here() message is processed on the owner before the + // corresponding creator rpc.remote() message. If this happens, instead of + // to_here() RPC thread to block waiting for the OwnerRRef creation, the + // RRefContext returns a Future, so that the RPC request processing logic can + // attach subsequent code as a callback to that Future. + // NB: the OwnerRRefs in this map must be cleared when the corresponding + // OwnerRRef is created. Note that the values in this map are intrusive_ptrs + // to c10::ivalue::Future that will be marked completed with the owner RRef. + std::unordered_map, RRefId::Hash> + pendingOwners_; + // Tracks known living UserRRefs of an OwnerRRef + std::unordered_map< + RRefId, + std::unordered_set, + RRefId::Hash> + forks_; + + // This cond var is used by deleteAllUsers(), a event notification is sent if + // number of pending UserRRef or UserRRef children is reduced, or + // number of owned OwnerRRef is reduced. + std::condition_variable deleteAllUsersCV_; + // The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the + // RRef instances. A UserRRef must be added into this map if any of the + // following two conditions is true: + // + // (1) A UserRRef has not been accepted by owner yet. + // + // It can be used or shared, but cannot be deleted, and hence kept alive + // in this map. A message of type RREF_USER_ACCEPT will move the + // corresponding RRef from pendingUsers_ map to confirmedUsers_ map. + std::unordered_map, ForkId::Hash> + pendingUsers_; + // UserRRefs are added into this map when it is confirmed by the owner. + // When destroying RRefContext this map helps to find local UserRRefs + // and send delete messages if they are still not deleted by Python + // garbage collection. + std::unordered_map, ForkId::Hash> + confirmedUsers_; + + // (2) A UserRRef has forked a child UserRRef which has not been accepted by + // the owner yet. + // + // In this case, this UserRRef cannot send out RREF_USER_DELETE message, + // as it could potentially trigger the OwnerRRef been deleted before the + // owner learns about the forked child. + std::unordered_map, ForkId::Hash> + pendingChildren_; + + // The RRef context performs its operations through async RPC requests, in + // order to not block the user code. Therefore the RRef context's state may be + // lagging a bit behind what it is intended to be, while it waits for these + // requests to complete. To allow syncing when needed, we store the count of + // these pending requests, so that users can wait for it to reach zero. + std::atomic numPendingFutures_{0}; + + std::mutex destroyedMutex_; + bool destroyed_{false}; + + // Thread local states to keep UserRRefs deserialized from user function + // arguments. + static thread_local std::vector> userTable_; + // A flag indicating whether subsequently created UserRRefs should be added to + // the thread_local userTable_. The flag is set to true before serializing + // RPC arguments and then set to false before running the corresponding + // user code. See addPendingUser and delPendingUser for more details. + // NB: The reason for having this flag is because addPendingUser are called in + // two cases, and we only want to track the 2nd case. + // (1) RRef as the return value: when calling rpc.remote, the UserRRef on the + // caller side is added to the context using addPendingUser. + // (2) RRef as an argument: When running an RPC using RRefs as arguments, the + // RRef is forwarded to the callee as new UserRRefs (if the callee is not + // the owner). In this case, we block running the user function until all + // UserRRefs are confirmed by the owner. + // This contract gurantees that no UserRRefs can be used remotely without + // confirmation. Note that, however, the UserRRef created by rpc.remote can + // still be passed to local functions as arguments and used there. This is by + // design, because this feature is especially useful when, say a master node + // creates multiple UserRRefs in a loop and then shares them with other nodes. + // Blocking every iteration in the loop until RRefs are confirmed will slow + // this down. This nuance on UserRRef can be interpreted as we only make + // exceptions for UserRRef creators. And using the UserRRef on its creator + // without confirmation is OK, because the creator would either call to_here + // or forward the UserRRef, and both would then require confirmations from the + // owner. + static thread_local bool recording_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..ccb00b45e1d5e33993aaf83bdba4d5170c47a920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h @@ -0,0 +1,420 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace distributed { +namespace rpc { + +class RRef; +class RRefContext; +class UserRRef; + +constexpr int OWNER_IDX = 0; // index of ownerId in the tuple +constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple +constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple +constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple +constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple +constexpr int PARENT_IDX = 5; // index of parent in the tuple +constexpr int TYPE_IDX = 6; // index of parent in the tuple + +// NB: if more fields are added, make sure this field is also bumped +constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple + +// Represents fork of an RRef to be sent over the wire. +struct TORCH_API RRefForkData { + const worker_id_t ownerId_; + const RRefId rrefId_; + const ForkId forkId_; + const worker_id_t parent_; + const std::string typeStr_; + + RRefForkData( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + worker_id_t parent, + std::string typeStr); +}; + +// Note [RRef Protocol] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// [Background] +// +// RRef stands for Remote REFerence. Each RRef is owned by a single worker +// (i.e., owner) and can be used by multiple users. The owner stores the real +// data referenced by its RRefs. RRef needs to support fast and scalable RPC. +// Hence, in the design, we avoid using a single global master to keep RRef +// states, instead owners will keep track of the global reference counts +// for its RRefs. Every RRef can be uniquely identified by a global RRefId, +// which is assigned at the time it is first created either on a user or on the +// owner. +// +// On the owner worker, there is only one OwnerRRef instance, which contains the +// real data, while on user workers, there can be as many UserRRefs as +// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef +// should retrieve the unique OwnerRRef instance using the globally unique +// RRefId. //A UserRRef will be created when it is used as an argument or return +// value in dist.rpc or dist.remote call, but RRef forking and reference +// counting (RC) are completely transparent to applications. Every UserRRef will +// also have its globally unique ForkId. +// +// [Assumptions] +// +// 1. Transient Network Failures +// +// TODO: current RRef implementation does not tolerate failures +// +// The RRef design handles transient network failures by retrying +// messages. Node crashes or permanent network partition is beyond the scope. +// When those incidents occur, the application may take down all workers, revert +// to the previous checkpoint, and resume training. +// +// 2. Non-idempotent UDFs +// +// We assume UDFs are not idempotent and therefore cannot be retried. However, +// internal RRef control messages are idempotent and retried upon message +// failure. +// +// TODO: RRef internal messages are not yet idempotent +// +// 3. Out of Order Message Delivery +// +// We do not assume message delivery order between any pair of nodes, because +// both sender and receiver are using multiple threads. There is no guarantee on +// which message will be processed first. +// +// [RRef Lifetime] +// +// The goal of the protocol is to delete an OwnerRRef at an appropriate time. +// The right time to delete an OwnerRRef is when there are no living UserRRefs +// and Python GC also agrees to delete the OwnerRRef instance on the owner. The +// tricky part is to determine if there are any living UserRRefs. +// +// A user can get a UserRRef in three situations: +// +// (1). Receiving a UserRRef from the owner. +// (2). Receiving a UserRRef from another user. +// (3). Creating a new UserRRef owned by another worker. +// +// (1) is the simplest case where the owner initiates the fork, and hence it can +// easily increment local RC. The only requirement is that any UserRRef must +// notify the owner before destruction. Hence, we need the first guarantee: +// +// G1. The owner will be notified when any UserRRef is deleted. +// +// As messages might come delayed or out-of-order, we need more one guarantee to +// make sure the delete message is not sent out too soon. Let us first introduce +// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef +// on A the parent RRef and the RRef on B the child RRef. +// +// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the +// owner. +// +// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply +// means that the user will not send out the delete message until all previous +// messages are ACKed. Note that ACKed does not mean the owner finishes +// executing the function, instead, it only means the owner has retrieved its +// local OwnerRRef and about to pass it to the function, which is sufficient to +// keep the OwnerRRef alive even if the delete message from the user arrives at +// the owner before the function finishes execution. +// +// With (2) and (3), it is possible that the owner only partially knows the RRef +// fork graph or not even knowing it at all. For example, the RRef could be +// constructed on a user, and before the owner receives the RPC call, the +// creator user might have already shared the RRef with other users, and those +// users could further share the RRef. One invariant is that the fork graph of +// any RRef is always a tree rooted at the owner, because forking an RRef always +// creates a new RRef instance, and hence every RRef has a single parent. One +// nasty detail is that when an RRef is created on a user, technically the owner +// is not its parent but we still consider it that way and it does not break the +// argument below. +// +// The owner's view on any node (fork) in the tree has three stages: +// +// 1) unknown -> 2) known -> 3) deleted. +// +// The owner's view on the entire tree keeps changing. The owner deletes its +// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when +// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or +// unknown. The dangerous case is when some forks are unknown and others are +// deleted. +// +// G2 trivially guarantees that no parent UserRRef Y can be deleted before the +// owner knows all of Y's children UserRRefs. +// +// However, it is possible that the child UserRRef Z may be deleted before the +// owner knows its parent Y. More specifically, this can happen when all of Z's +// messages are processed by the owner before all messages from Y, including the +// delete message. Nevertheless, this does not cause any problem. Because, at +// least one of Y's ancestor will be alive, and it will prevent the owner from +// deleting the OwnerRRef. Consider the following example: (NB: this scenario +// will no longer relevant when we block UDF until all RRefs are confirmed by +// the owner) +// +// OwnerRRef -> A -> Y -> Z +// +// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted +// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as +// the owner directly forks the RRef to A. A won't die before the owner knows Y. +// +// Things get a little trickier if the RRef is created on a user: +// +// OwnerRRef +// ^ +// | +// A -> Y -> Z +// +// If Z calls to_here on the UserRRef, the owner at least knows A when Z is +// deleted, because otherwise to_here wouldn't finish. If Z does not call +// to_here, it is possible that the owner receives all messages from Z before +// any message from A and Y. In this case, as the real data of the OwnerRRef has +// not been created yet, there is nothing to be deleted either. It is the same +// as Z does not exist at all Hence, it's still OK. +// +// See #26759 for more details and discussions. +// +// TODO: make RRef an IValue, and edit createStackForSchema accordingly +// TODO: make RRef system messages idempotent and retry on failures. +// +// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``. +// Each ``RRef`` has a globally unique ``RRefId``. +class TORCH_API RRef : public RRefInterface { + public: + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + explicit RRef(const RRef& other) = delete; + explicit RRef(RRef&& other) = delete; + RRef& operator=(RRef&& other) = delete; + + ~RRef() override = default; + + // returns the worker id of the owner + inline worker_id_t owner() const override { + return ownerId_; + } + + // returns the worker name of the owner + inline std::string ownerName() const override { + return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_; + } + + // returns the worker info of the owner + inline WorkerInfo ownerWorkerInfo() const { + return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_); + } + + // Returns the globally unique RRefId of this RRef + inline const RRefId& rrefId() const { + return rrefId_; + } + + inline bool isPyObj() const { + return type_ == PyObjectType::get(); + } + inline const TypePtr type() const override { + return type_; + } + + // Save the future corresponding to the creation of this RRef on a remote + // node. Note that this is only set when processing requests invoked with + // rpc.remote. This is only used to get the future corresponding to the rref + // for profiling use cases. + inline void registerOwnerCreationFuture(c10::intrusive_ptr fut) { + ownerCreationFuture_ = std::move(fut); + } + + // Get the future corresponding to the creation of this rref. + inline c10::intrusive_ptr getOwnerCreationFuture() const { + return ownerCreationFuture_; + } + + // Check if creation of this RRef on owner node has timed out. + inline bool getTimedOut() const { + return timedOut_.load(); + } + + // Dispatches an error to the correct handler based on its RPCErrorType. + void handleError(RPCErrorType errorType, const JitFuture& JitFuture); + + // Send delete UserRRef request to Owner, + // if the request hasn't been sent yet. + // There are 2 cases to call it, + // 1, Python GC decides end of UserRRef lifetime, calling destructor. + // 2, RPC module graceful shutdown calls it on all UserRRefs tracked + // in the RRefContext. + virtual void tryDel() {} + + protected: + // Indicates that the creation of this RRef on owner node has timed out. + inline void setTimedOut() { + timedOut_ = true; + } + friend class RRefContext; + + RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type); + + virtual RRefForkData fork() const; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const worker_id_t ownerId_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const RRefId rrefId_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::atomic timedOut_{false}; + + // type field to denote the type of the element that the RRef is holding + // it could be any TypePtr that JIT support, including PyObjectType + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const TypePtr type_; + // Future corresponding to request to create RRef on remote node. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::intrusive_ptr ownerCreationFuture_; +}; + +// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user +// also has a globally unique ``ForkId`` to identify this user. ``UserRRef`` +// never owns the real value, the only way to get the value of the ``RRef`` is +// to call ``to_here()`` and get a copy.. +class TORCH_API UserRRef final : public RRef { + public: + UserRRef(const UserRRef& other) = delete; + UserRRef(UserRRef&& other) = delete; + UserRRef& operator=(const UserRRef& other) = delete; + UserRRef& operator=(UserRRef&& other) = delete; + + UserRRef( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + TypePtr type); + + inline bool isOwner() const override { + return false; + } + + inline bool confirmedByOwner() const override { + return confirmedByOwner_; + } + + // Returns the globally unique ForkId of this RRef + const ForkId& forkId() const; + + // Get of copy of the value from the ``OwnerRRef``. If the value is not ready + // yet, this call will block. + IValue toHere( + const float timeoutSeconds = + torch::distributed::rpc::kUnsetRpcTimeout) const; + + void tryDel() override; + + // Will be called when refcount reaches 0. + // Upon destruction, this ``UserRRef`` will tell the owner to deref. + void release_resources() override; + + // Will be called when both refcount and weakcount reach 0. See + // https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204 + // This is called on destructing the wrapping intrusive_ptr_target instance + // and it's data members. + ~UserRRef() override; + + private: + friend class RRefContext; + + RRefForkData fork() const override; + inline void confirm() { + confirmedByOwner_ = true; + } + + const ForkId forkId_; + + // Indicates if this user has sent delete message to it's owner. + // Note, thread safety is needed because delete message could be sent by + // either the destructor called by Python garbage collection or RRefContext + // proactive cleanup on RPC graceful shutdown. + std::mutex deletedOnOwnerMutex_; + bool deletedOnOwner_{false}; + // Indicating whether this UserRRef has been confirmed by its owner. + std::atomic confirmedByOwner_; +}; + +// Keep the template only on the derived class because ``RRefContext`` needs to +// erase the type on ``RRef`` and keep them in one map. +class TORCH_API OwnerRRef final : public RRef { + public: + OwnerRRef(const OwnerRRef& other) = delete; + OwnerRRef(OwnerRRef&& other) = delete; + OwnerRRef& operator=(const OwnerRRef& other) = delete; + OwnerRRef& operator=(OwnerRRef&& other) = delete; + + OwnerRRef( + worker_id_t ownerId, + const RRefId& rrefId, + TypePtr type, + std::vector devices); + + OwnerRRef( + worker_id_t ownerId, + const RRefId& rrefId, + TypePtr type, + c10::optional value, + std::vector devices); + + inline bool isOwner() const override { + return true; + } + + // OwnerRRef is always confirmed, while UserRRef is only confirmed when the + // owner knows about it. + inline bool confirmedByOwner() const override { + return true; + } + + // Get a constant reference of the real value. This method will block if the + // value is not ready. This method does not need GIL as it does not create + // any new py::object. It will throw if there is an error. + const IValue& getValue() const; + + // Set the value of this ``OwnerRRef``. This method does not need GIL as it + // does not create any new py::object. + void setValue(IValue&& value); + // Sets the value of this ``OwnerRRef`` to contain an exception. + void setError(std::exception_ptr eptr); + + // Has a value or error been set? + bool hasValue() const; + // Gets a future that is satisfied when the value or error is set. + c10::intrusive_ptr getFuture(); + + private: + friend class RRefContext; + + c10::intrusive_ptr future_; +}; + +TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref); + +// Helper function that casts from c10::RRefInterface to OwnerRRef +inline TORCH_API c10::intrusive_ptr fromRRefInterface( + const c10::intrusive_ptr& rrefInterface) { + return c10::static_intrusive_pointer_cast(rrefInterface); +} + +// Helper function that casts from OwnerRRef to c10::RRefInterface +inline TORCH_API c10::intrusive_ptr fromOwnerRRef( + const c10::intrusive_ptr& ownerRRef) { + return c10::static_intrusive_pointer_cast(ownerRRef); +} + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h new file mode 100644 index 0000000000000000000000000000000000000000..bd2f35744930728c3474f2d38e64b36296d69574 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h @@ -0,0 +1,164 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Temporary solution of RRef operations. +// TODO: Remove all these messages and use rpc + registered functions instead. +class TORCH_API RRefMessageBase : public RpcCommandBase { + public: + RRefMessageBase(const RRefId& rrefId, MessageType type) + : rrefId_(rrefId), type_(type) {} + + const RRefId& rrefId(); + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const RRefId rrefId_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const MessageType type_; +}; + +class TORCH_API ForkMessageBase : public RRefMessageBase { + public: + ForkMessageBase(const RRefId& rrefId, const ForkId& forkId, MessageType type) + : RRefMessageBase(rrefId, type), forkId_(forkId) {} + + const ForkId& forkId(); + + c10::intrusive_ptr toMessageImpl() && override; + static std::pair fromMessage( + const Message& message, + MessageType type); + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const ForkId forkId_; +}; + +// UserRRef uses this message to fetch the remote RRef value from the owner. +class TORCH_API ScriptRRefFetchCall final : public RRefMessageBase { + public: + ScriptRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId) + : RRefMessageBase(rrefId, MessageType::SCRIPT_RREF_FETCH_CALL), + fromWorkerId_(fromWorkerId) {} + + inline worker_id_t fromWorkerId() const { + return fromWorkerId_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const Message& message); + + private: + const worker_id_t fromWorkerId_; +}; + +class TORCH_API PythonRRefFetchCall final : public RRefMessageBase { + public: + PythonRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId) + : RRefMessageBase(rrefId, MessageType::PYTHON_RREF_FETCH_CALL), + fromWorkerId_(fromWorkerId) {} + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage( + const Message& message); + + private: + const worker_id_t fromWorkerId_; +}; + +// OwnerRRef uses this message to send the RRef value to a remote UserRRef +class TORCH_API RRefFetchRet : public RpcCommandBase { + public: + RRefFetchRet(std::vector values, MessageType type) + : values_(std::move(values)), type_(type) {} + + const std::vector& values(); + c10::intrusive_ptr toMessageImpl() && override; + + private: + std::vector values_; + const MessageType type_; +}; + +class TORCH_API ScriptRRefFetchRet final : public RRefFetchRet { + public: + explicit ScriptRRefFetchRet(std::vector values) + : RRefFetchRet(std::move(values), MessageType::SCRIPT_RREF_FETCH_RET) {} + + static std::unique_ptr fromMessage( + const Message& message); +}; + +class TORCH_API PythonRRefFetchRet final : public RRefFetchRet { + public: + explicit PythonRRefFetchRet(std::vector values) + : RRefFetchRet(std::move(values), MessageType::PYTHON_RREF_FETCH_RET) {} + + static std::unique_ptr fromMessage( + const Message& message); +}; + +// UserRRef (regardless it's the creator or not) uses this message to notify +// OwnerRRef on delete. +class TORCH_API RRefUserDelete final : public ForkMessageBase { + public: + RRefUserDelete(const RRefId& rrefId, const ForkId& forkId) + : ForkMessageBase(rrefId, forkId, MessageType::RREF_USER_DELETE) {} + + static std::unique_ptr fromMessage(const Message& message); +}; + +class TORCH_API RemoteRet final : public ForkMessageBase { + public: + RemoteRet(const RRefId& rrefId, const ForkId& forkId) + : ForkMessageBase(rrefId, forkId, MessageType::REMOTE_RET) {} + + static std::unique_ptr fromMessage(const Message& message); +}; + +// A child RRef uses this message to notify its parent that the child has been +// confirmed by the owner. +class TORCH_API RRefChildAccept final : public RpcCommandBase { + public: + explicit RRefChildAccept(const ForkId& forkId) : forkId_(forkId) {} + + const ForkId& forkId() const; + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + const ForkId forkId_; +}; + +// A child RRef uses this message to send a fork request to the owner. +class TORCH_API RRefForkRequest final : public ForkMessageBase { + public: + RRefForkRequest(const RRefId& rrefId, const ForkId& forkId) + : ForkMessageBase(rrefId, forkId, MessageType::RREF_FORK_REQUEST) {} + + static std::unique_ptr fromMessage(const Message& message); +}; + +class TORCH_API RRefAck final : public RpcCommandBase { + public: + RRefAck() = default; + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h new file mode 100644 index 0000000000000000000000000000000000000000..2fc0efb8cdc717b27c3adc31103fff3e5e86a783 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using torch::jit::Operator; + +// A ScriptCall instance represents an invocation of a builtin operator for a +// TorchScript function. If it is a builtin operator, it +// contains a shared ptr to the `Operator` and a list of arguments. +// If it is a TorchScript function, it contains a non empty qualifiedName string +// to the TorchScript function schema name and a list of arguments. +class TORCH_API ScriptCall : public RpcCommandBase { + public: + // Constructor for builitin operator call. + ScriptCall(std::shared_ptr op, std::vector&& stack); + // Constructor for TorchScript function call. + ScriptCall( + const c10::QualifiedName& qualifiedName, + std::vector&& stack, + const bool isAsyncExecution = false); + + bool hasOp() const; + std::shared_ptr op() const; + bool hasQualifiedName() const; + const c10::QualifiedName& qualifiedName() const; + // return the argument stack of this builtin operator + const std::vector& stack() const; + std::vector& stackRef(); + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + ~ScriptCall() override = default; + + protected: + virtual void toIValues(std::vector& ivalues) const; + static std::unique_ptr fromIValues( + std::vector& ivalues); + + private: + // Given an operator symbol and a string schema, return the matched operator. + static std::shared_ptr matchOperator(const std::string& str_schema); + + static const std::string BUILTIN_OP_NAMESPACE_; + static const std::string ATEN_PREFIX_; + + // This field has value if this ScriptCall represents invocation of a builtin + // operator. + c10::optional> op_; + // This field has non empty string if this ScriptCall represents invocation of + // an annotated torchscript function defined by users. + c10::optional qualifiedName_; + std::vector stack_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h new file mode 100644 index 0000000000000000000000000000000000000000..460bc7352bd1f1d7571c4a705a4eae49f6532eec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using torch::jit::Operator; + +// A ScriptRemoteCall instance represents an invocation of `dist.remote` on a +// builtin operator. Currently, it does not support using RRef as arguments yet. +// Besides the operator and a vector of arguments, ScriptRemoteCall also +// contains the RRefId and the ForkId of the return value RRef. +class TORCH_API ScriptRemoteCall final : public ScriptCall { + public: + // Constructor for builitin operator call. + ScriptRemoteCall( + std::shared_ptr op, + std::vector&& stack, + const RRefId& retRRefId, + const ForkId& retForkId); + + // Constructor for TorchScript function call. + ScriptRemoteCall( + const c10::QualifiedName& qualifiedName, + std::vector&& stack, + const RRefId& retRRefId, + const ForkId& retForkId, + const bool isAsyncExecution); + + inline const RRefId& retRRefId() const { + return retRRefId_; + } + + inline const ForkId& retForkId() const { + return retForkId_; + } + + static std::unique_ptr fromIValues( + std::vector& ivalues); + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + const RRefId retRRefId_; + const ForkId retForkId_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..958b59bab5bbd1d8818964e5af9c42b3f4a16154 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Return value of a builtin operator or a TorchScript function. +class TORCH_API ScriptResp final : public RpcCommandBase { + public: + explicit ScriptResp(at::IValue&& values); + + const at::IValue& value(); + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + const at::IValue value_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h new file mode 100644 index 0000000000000000000000000000000000000000..ef733bcfb189964ac1aca6f5464f6fddf6b3eca4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h @@ -0,0 +1,495 @@ +#pragma once + +#ifdef USE_TENSORPIPE + +#include +#include + +#include +#include +#include +#include + +// Forward-declare the TensorPipe classes we need, to avoid including its +// headers in PyTorch's ones and thus have it become a public dependency. + +namespace tensorpipe { + +class Context; +class Error; +class Listener; +class Message; +class Pipe; + +namespace transport { +class Context; +} // namespace transport + +namespace channel { +class Context; +} // namespace channel + +} // namespace tensorpipe + +namespace torch { +namespace distributed { +namespace rpc { + +// These priorities instruct TensorPipe on which transport/channel to pick +// during handshake. Higher priorities will take precedence over lower ones. +// The transport with lowest priority will be the one used to bootstrap pipes. + +constexpr int64_t kShmTransportPriority = 200; +constexpr int64_t kIbvTransportPriority = 100; +// The UV transport just uses TCP and should work everywhere, thus keep it last. +constexpr int64_t kUvTransportPriority = 0; + +constexpr int64_t kCmaChannelPriority = 1200; +constexpr int64_t kMultiplexedUvChannelPriority = 1100; +// The basic channel reuses a transport as a channel, and is thus our fallback. +constexpr int64_t kBasicChannelPriority = 1000; + +// CPU channel have higher priority than CUDA channels, since the latter might +// handle CPU-to-CPU transfers, but will always be less efficient than their +// CPU-only counterparts. +constexpr int64_t kCudaIpcChannelPriority = 300; +constexpr int64_t kCudaGdrChannelPriority = 200; +constexpr int64_t kCudaXthChannelPriority = 400; +constexpr int64_t kCudaBasicChannelPriority = 0; + +using steady_clock_time_point = + std::chrono::time_point; + +struct TORCH_API TransportRegistration { + std::shared_ptr transport; + int64_t priority; + std::string address; +}; + +C10_DECLARE_REGISTRY(TensorPipeTransportRegistry, TransportRegistration); + +struct TORCH_API ChannelRegistration { + std::shared_ptr channel; + int64_t priority; +}; + +C10_DECLARE_REGISTRY(TensorPipeChannelRegistry, ChannelRegistration); + +constexpr auto kDefaultNumWorkerThreads = 16; + +struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions { + TensorPipeRpcBackendOptions( + int numWorkerThreads, + optional> transports, + optional> channels, + float rpc_timeout, + std::string init_method, + std::unordered_map device_maps = {}, + std::vector devices = {}) + : RpcBackendOptions(rpc_timeout, init_method), + numWorkerThreads(numWorkerThreads), + transports(std::move(transports)), + channels(std::move(channels)), + deviceMaps(std::move(device_maps)), + devices(std::move(devices)) { + TORCH_CHECK( + numWorkerThreads > 0, + "num_worker_threads must be positive, got ", + numWorkerThreads); + + if (this->transports.has_value()) { + for (const std::string& transportName : this->transports.value()) { + TORCH_CHECK( + TensorPipeTransportRegistry()->Has(transportName), + "Unknown transport: ", + transportName); + } + } + + if (this->channels.has_value()) { + for (const std::string& channelName : this->channels.value()) { + TORCH_CHECK( + TensorPipeChannelRegistry()->Has(channelName), + "Unknown channel: ", + channelName); + } + } + } + + void setDeviceMap(const std::string& workerName, const DeviceMap& deviceMap) { + auto iter = deviceMaps.find(workerName); + if (iter == deviceMaps.end()) { + deviceMaps[workerName] = deviceMap; + } else { + for (auto& entry : deviceMap) { + // c10::Device has no default constructor, hence map[device] dosn't work + // In C++-17 we can use insert_or_assign. + auto entryIter = iter->second.find(entry.first); + if (entryIter == iter->second.end()) { + iter->second.emplace(entry.first, entry.second); + } else { + entryIter->second = entry.second; + } + } + } + } + + int numWorkerThreads; + const optional> transports; + const optional> channels; + std::unordered_map deviceMaps; + std::vector devices; +}; + +// Struct to track the network source metrics +struct TORCH_API NetworkSourceInfo { + worker_id_t srcRank; + std::vector srcMachineAddr; +}; + +// Struct to track aggregated network metrics +struct TORCH_API AggregatedNetworkData { + uint64_t numCalls{0}; + uint64_t totalSentBytes{0}; + uint64_t totalRecvBytes{0}; + uint64_t totalErrors{0}; +}; + +// TensorPipeAgent leverages TensorPipe (https://github.com/pytorch/tensorpipe) +// to transparently move tensors and payloads through the fastest available +// transport or channel. It acts like a hybrid RPC transport, providing shared +// memory (linux) and TCP (linux & mac) support. CUDA support is in progress. +class TORCH_API TensorPipeAgent : public RpcAgent { + public: + TensorPipeAgent( + const c10::intrusive_ptr<::c10d::Store>& store, + std::string selfName, + worker_id_t selfId, + optional worldSize, + TensorPipeRpcBackendOptions opts, + std::unordered_map reverseDeviceMaps, + std::vector devices, + std::unique_ptr cb); + + TensorPipeAgent(const TensorPipeAgent&) = delete; + TensorPipeAgent& operator=(const TensorPipeAgent&) = delete; + + c10::intrusive_ptr send( + const WorkerInfo& to, + c10::intrusive_ptr message, + const float rpcTimeoutSeconds = kUnsetRpcTimeout, + const DeviceMap& deviceMap = {}) override; + + // join() and sync() would be deprecated - + // https://github.com/pytorch/pytorch/issues/27647 + void join(bool shutdown = false, float timeout = 0) override; + void sync() override{}; + void startImpl() override; + void shutdownImpl() override; + + ~TensorPipeAgent() override; + + const WorkerInfo& getWorkerInfo(const std::string& workerName) const override; + const WorkerInfo& getWorkerInfo(worker_id_t workerId) const override; + std::vector getWorkerInfos() const override; + void updateGroupMembership( + const WorkerInfo& workerInfo, + const std::vector& devices, + const std::unordered_map& reverseDeviceMaps, + bool isJoin); + + std::unordered_map getMetrics() override; + + void addGilWaitTime(const std::chrono::microseconds gilWaitTime) override; + + TensorPipeRpcBackendOptions getBackendOptions() const; + + const c10::intrusive_ptr<::c10d::Store> getStore() const; + + DeviceMap getDeviceMap(const WorkerInfo& dest) const override; + + const std::vector& getDevices() const override; + + using NetworkDataDict = + std::unordered_map; + + // Returns metrics tracked by the NetworkDataDict + NetworkDataDict getNetworkData(); + // Returns NetworkSourceInfo struct + NetworkSourceInfo getNetworkSourceInfo(); + + static const std::string& guessAddress(); + + // For testing purposes. + size_t timeoutMapSize(); + size_t numPendingResponses(); + size_t messageIdToTimeoutMapSize(); + + const bool isStaticGroup_; + + protected: + // TensorPipe write function that could be used to write response + // messages by server, and write request messages by client. This + // is a protected method since it is overwritten by FaultyTensorPipeAgent + virtual void pipeWrite( + const std::shared_ptr&, + c10::intrusive_ptr message, + std::vector&& devices, + std::vector streams, + std::function) noexcept; + + private: + // Removes the given messageId with the given expirationTime from the + // timeoutMap_. + void removeFromTimeoutMap(uint64_t messageId); + + // Populates workerIdToInfo_ and workerNameToInfo_ using addressStore_ + void prepareNames(bool isStaticGroup); + + // Check the static group attribute with the value set in store + void checkAndSetStaticGroup(const c10::intrusive_ptr<::c10d::Store>& store); + + const std::string& findWorkerURL(const WorkerInfo& worker) const; + + // Only use for Dynamic RPC groups, method to have worker leave group + void leaveGroup(); + + // TensorPipe read function that could be used to read response messages + // by client, and read request messages by server. + void pipeRead( + const std::shared_ptr&, + std::function, + std::vector)>) noexcept; + + // Callback of listener accept() + void onListenerAccepted( + const tensorpipe::Error& error, + std::shared_ptr& pipe); + + // Respond to a call from a peer + void respond(std::shared_ptr& pipe); + + void sendCompletedResponseMessage( + std::shared_ptr& pipe, + JitFuture& futureResponseMessage, + uint64_t messageId, + std::vector stream); + + // Collects metrics from successful RPC calls + void trackNetworkData( + uint64_t requestSize, + uint64_t responseSize, + const std::string& destWorkerName); + + // Collects metrics from failed RPC calls + void trackNetworkError( + uint64_t requestSize, + const std::string& destWorkerName); + + inline std::vector getDevicesForRemote( + const std::string& remoteName, + const Message& message) const; + + // When a request+response completes, we need to mark the future message as + // complete. However, if its timeout has already expired, it already has an + // error set. There is no atomic "test-and-set" way to mark a future complete + // only if it isn't yet. It does exist for errors (setErrorIfNeeded) but, even + // then, it ends up printing a log message, which may worry the user. To solve + // both issues we use a separate atomic flag to know the status of the future. + struct AtomicJitFuture { + explicit AtomicJitFuture(const std::vector& devices) { + jitFuture = c10::make_intrusive( + at::AnyClassType::get(), devices); + } + + std::atomic_flag isComplete = ATOMIC_FLAG_INIT; + c10::intrusive_ptr jitFuture; + }; + + // Maintains state per client pipe to track pending response messages and + // error states. pendingResponseMessage_ should be protected by a mutex since + // it can be raced with user send() call. + // TODO: To achieve better performance we can have a pipe pool per + // client that can be configured using RpcBackendOptions. + struct ClientPipe { + explicit ClientPipe(std::shared_ptr pipe) + : pipe_(std::move(pipe)) {} + std::shared_ptr pipe_; + mutable std::mutex mutex_; + bool inError_{false}; + // Map from Message Request ID's to corresponding futures. + std::unordered_map> + pendingResponseMessage_; + }; + + const c10::intrusive_ptr<::c10d::Store> store_; + + const TensorPipeRpcBackendOptions opts_; + // For dynamic RPC, the reverse device maps are updated whenever a new rank + // joins or leaves the group + std::unordered_map reverseDeviceMaps_; + // Local devices used by this agent. If application didn't specify this + // field, it will be initialized using corresponding local devices in + // opts_.deviceMaps and reverseDeviceMaps_; + std::vector devices_; + + ThreadPool threadPool_; + std::shared_ptr context_; + std::shared_ptr listener_; + + mutable std::mutex connectedPipesMutex_; + std::unordered_map connectedPipes_; + + // Maps keyed on name and id for easy WorkerInfo lookup. + std::unordered_map workerIdToInfo_; + std::unordered_map workerNameToInfo_; + std::unordered_map workerNameToURL_; + + ::c10d::PrefixStore rankToNameStore_; + ::c10d::PrefixStore nameToAddressStore_; + // Store keys that will used to count joined processes and active calls during + // the shutdown process + ::c10d::PrefixStore shutdownStore_; + int worldSize_ = 0; + std::atomic nextMessageID_{0}; + + // Metadata used for tracking of whether certain RPCs have timed out or not. + struct TimeoutMessageMetadata { + TimeoutMessageMetadata( + uint64_t messageId_, + std::shared_ptr responseFuture_, + std::chrono::milliseconds timeout_) + : messageId(messageId_), + responseFuture(std::move(responseFuture_)), + timeout(timeout_) {} + uint64_t messageId; + std::shared_ptr responseFuture; + std::chrono::milliseconds timeout; + }; + + // Map to store the expiration times for each message. + std::map> + timeoutMap_; + + // Map to store the messageId to expiry time. + std::unordered_map messageIdToTimeout_; + + // Thread that will poll the timeoutMap_ for timed out messages and mark them + // with an error accordingly + std::thread timeoutThread_; + + // Function run by the timeoutThread_ to check for timed out RPCs + void pollTimeoutRpcs(); + + // Mutex to guard the timeoutMap_ + std::mutex timeoutMapMutex_; + + // Condition Variable to signal population of the timeoutMap_ + std::condition_variable timeoutThreadCV_; + + // Returns the expiration time for an RPC by adding the current time to the + // passed in timeout. + inline steady_clock_time_point computeRpcMessageExpiryTime( + std::chrono::milliseconds timeout) const { + return std::chrono::time_point_cast( + std::chrono::steady_clock::now() + timeout); + } + + // Handle error on an outgoing pipe + void handleClientError( + ClientPipe& clientPipe, + const tensorpipe::Error& error); + + // This is a generic struct for capturing Time-Series Metrics. It keeps a + // running sum and count of data points (observations), and can return an + // average of the data points seen so far. This is currently only used for + // tracking the GIL Wait Time in RPC Agents, but can be used for other metrics + // as well. + struct TimeSeriesMetricsTracker { + // Running sum of the data points seen so far + uint64_t currentSum_; + // Running count of the data points seen so far + uint64_t currentCount_; + + explicit TimeSeriesMetricsTracker( + uint64_t currentSum = 0, + uint64_t currentCount = 0); + + // Adds a data point (which is basically one observation for the metric + // being tracked) to the running sum and count. + void addData(uint64_t dataPoint); + // Returns the average of all the data points seen so far. + float computeAverage() const; + }; + + // Map of Time-Series metrics tracked by the RPC Agent + std::unordered_map timeSeriesMetrics_; + // Mutex to guard timeSeriesMetrics_ + std::mutex metricsMutex_; + + // Custom lock guard used to check if the RPC group is dynamic and lock the + // mutex if so + struct GroupMembershipLockGuard { + GroupMembershipLockGuard(std::mutex& mutex, bool isStaticGroup) + : ref_(mutex), isStaticGroup_(isStaticGroup) { + if (isStaticGroup_) { + ref_.lock(); + } + } + + ~GroupMembershipLockGuard() { + if (isStaticGroup_) { + ref_.unlock(); + } + } + + GroupMembershipLockGuard(const GroupMembershipLockGuard&) = delete; + + private: + std::mutex& ref_; + bool isStaticGroup_; + }; + // Mutex to guard access to group membership data + // e.g. updates to (workerIdToInfo_, workerNameToInfo_, workerNameToURL_) + mutable std::mutex groupMembershipMutex_; + + // Map to Track Network Data + NetworkDataDict networkData_; + // Mutex to guard networkData_ + std::mutex networkDataMutex_; + + // A mutex and a cv to guard access to the call counts and watch for changes. + std::mutex callCountMutex_; + std::condition_variable callCountCV_; + // Running total of un-processed, un-errored RPC calls sent + int32_t clientActiveCalls_{0}; + // Running total of un-processed RPC requests received + int32_t serverActiveCalls_{0}; + // Running total of RPC requests that will be completed asynchronously + int32_t serverActiveAsyncCalls_{0}; + + // Whether a global graceful shutdown has begun, in which case we'll silence + // error messages due to remote workers closing their pipes. + std::atomic shuttingDown_{false}; + + // Helpers to modify the counts while correctly dealing with the mutex and cv. + void increaseCallCount(int32_t& count); + void decreaseCallCount(int32_t& count); + + // Helpers to set the state of the requests. + void markFutureAsComplete( + std::shared_ptr atomicFuture, + c10::intrusive_ptr message, + std::vector streams); + void markFutureWithError( + std::shared_ptr atomicFuture, + std::string errorMsg); +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch + +#endif // USE_TENSORPIPE diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..1011a9c34c3d805f249b2a842e227bdfc2ef2cb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h @@ -0,0 +1,123 @@ +#pragma once + +#ifdef USE_TENSORPIPE + +#include + +namespace tensorpipe { +class Message; +class Allocation; +class Descriptor; +} // namespace tensorpipe + +namespace torch { +namespace distributed { +namespace rpc { + +TORCH_API const c10::Stream& getStreamForDevice( + const std::vector& streams, + const c10::Device& device); + +// Inspired by c10/core/impl/DeviceGuardImplInterface.h. + +class TensorpipeDeviceTypeConverter { + public: + // Ideally we'd want this to also return a tensorpipe::Message::Tensor object + // but we cannot forward-declare that class (because it's nested), and we + // cannot include the TensorPipe headers because it's a private dependency. + // Thus we bend over backwards and entrust this method with appending that + // object to the `tensors` field of the tensorpipe::Message object we pass. + virtual c10::optional> prepareTensorForSending( + const c10::Storage& storage, + const std::vector& streams, + tensorpipe::Message& message) const = 0; + + // Same as above: this method cannot return a tensorpipe::Allocation::Tensor, + // thus it appends it to the `tensors` field of the tensorpipe::Allocation. + virtual at::DataPtr allocateTensorForReceiving( + c10::DeviceIndex deviceIndex, + size_t length, + const std::vector& streams, + tensorpipe::Allocation& allocation) const = 0; + + virtual ~TensorpipeDeviceTypeConverter() = default; +}; + +extern TORCH_API std::array< + std::atomic, + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)> + device_type_converter_registry; + +class TORCH_API TensorpipeDeviceTypeConverterRegistrar { + public: + TensorpipeDeviceTypeConverterRegistrar( + DeviceType, + const TensorpipeDeviceTypeConverter*); +}; + +#define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \ + DevType, TensorpipeDeviceTypeConverter) \ + static ::torch::distributed::rpc::TensorpipeDeviceTypeConverterRegistrar \ + C10_ANONYMOUS_VARIABLE(g_##DeviceType)( \ + ::c10::DeviceType::DevType, new TensorpipeDeviceTypeConverter()); + +inline const TensorpipeDeviceTypeConverter* getDeviceTypeConverter( + DeviceType type) { + return device_type_converter_registry[static_cast(type)].load(); +} + +// A struct that holds pointers that keep alive all the memory that will be +// accessed by TensorPipe during a write operation. +struct TensorpipeWriteBuffers { + // Allocate on heap so pointers stay valid as we move the holder. + std::unique_ptr type; + std::unique_ptr id; + std::vector payload; + std::vector pickle; + // This contains the original tensors and the clones of the sparse tensors. + std::vector tensors; + // This contains the copies of the data of the tensors that didn't own their + // memory, e.g., the ones created from torch::from_blob() with no deleter. + std::vector> copiedTensors; +}; + +// A struct that holds pointers that keep alive all the memory that will be +// accessed by TensorPipe during a read operation. +struct TensorpipeReadBuffers { + // Allocate on heap so pointers stay valid as we move the holder. + std::unique_ptr type; + std::unique_ptr id; + std::vector payload; + std::vector pickle; + std::vector tensors; +}; + +// Convert an RPC message into a TensorPipe message, plus a holder to all the +// data that must be kept alive while the write is performed asynchronously. +TORCH_API std::tuple +tensorpipeSerialize( + c10::intrusive_ptr rpcMessage, + std::vector devices, + const std::vector& streams); + +// Allocate the buffers that will hold the incoming data. They will be managed +// by the returned holder, which must be kept alive until the asynchronous read +// has finished. Pointers to these buffers will be stored in the returned +// tensorpipe::Allocation struct. +TORCH_API std::pair +tensorpipeAllocate( + const tensorpipe::Descriptor& tpDescriptor, + const std::vector& streams); + +// Convert a TensorPipe message back into an RPC message. This requires the data +// to be available and can thus only be performed once the asynchronous read has +// completed. The holder can be destroyed once this function returns. +TORCH_API c10::intrusive_ptr tensorpipeDeserialize( + tensorpipe::Descriptor&& tpDescriptor, + TensorpipeReadBuffers&& holder); + +} // namespace rpc +} // namespace distributed +} // namespace torch + +#endif // USE_TENSORPIPE diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..d7eea6b2c8f954086819d45e2c94d7c661aa9448 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// This function sends an rpc call to run torchscript function, currently the +// torchscript function could only be a user defined python function with +// "@torch.jit.script" annotation. The torchscript function could not be +// a class constructor, class method, instance method or a script module. +// dst: destination worker name +// qualifiedName: torchscript function qualified name string like +// "moduleName::torchscriptFunctionName", e.g, +// "dist_autograd_test::my_py_add" +// stack: a bag of IValue args passed to torchscriptFunctionName +// It returns c10::intrusive_ptr +c10::intrusive_ptr TORCH_API rpcTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +c10::intrusive_ptr TORCH_API remoteTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h new file mode 100644 index 0000000000000000000000000000000000000000..9c4029bead95cefbdec8c522943f86d52d99fc11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using worker_id_t = int16_t; +using local_id_t = int64_t; + +bool getAllowJitRRefPickle(); +TORCH_API void enableJitRRefPickle(); +TORCH_API void disableJitRRefPickle(); + +struct TORCH_API JitRRefPickleGuard { + JitRRefPickleGuard(); + ~JitRRefPickleGuard(); +}; + +struct TORCH_API GloballyUniqueId final { + GloballyUniqueId(worker_id_t createdOn, local_id_t localId); + GloballyUniqueId(const GloballyUniqueId& other) = default; + GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete; + + bool operator==(const GloballyUniqueId& other) const; + bool operator!=(const GloballyUniqueId& other) const; + + at::IValue toIValue() const; + static GloballyUniqueId fromIValue(const at::IValue&); + + struct Hash { + size_t operator()(const GloballyUniqueId& key) const { + return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_; + } + }; + + static constexpr int kLocalIdBits = 48; + + const worker_id_t createdOn_; + const local_id_t localId_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const GloballyUniqueId& globalId); + +using RRefId = GloballyUniqueId; +using ForkId = GloballyUniqueId; +using ProfilingId = GloballyUniqueId; + +struct TORCH_API SerializedPyObj final { + SerializedPyObj(std::string&& payload, std::vector&& tensors) + : payload_(std::move(payload)), tensors_(std::move(tensors)) {} + + std::vector toIValues() &&; + static SerializedPyObj fromIValues(std::vector value); + + std::string payload_; + std::vector tensors_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h new file mode 100644 index 0000000000000000000000000000000000000000..c6dda5ba470447ad6ce461566737a4f704107050 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// This class converts the content in a PythonCall into py::object. This is a +// helper class to make sure that all arguments deserialization is done before +// entering RequestCallbackImpl::processRpc(...), so that the deserialization +// related logic can be carried out in one spot instead of scattered in multiple +// places for different message types. +// NB: The reason for not consolidating class into PythonCall is because +// PythonCall is a libtorch type which should not depend on Python types. +class TORCH_API UnpickledPythonCall : public RpcCommandBase { + public: + UnpickledPythonCall( + const SerializedPyObj& serializedPyObj, + bool isAsyncExecution); + ~UnpickledPythonCall() override; + + // toMessage() method is not implemented, as objects of this class should + // never be directly converted into a Message object. + c10::intrusive_ptr toMessageImpl() && override; + const py::object& pythonUdf() const; + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + private: + py::object pythonUdf_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h new file mode 100644 index 0000000000000000000000000000000000000000..c72fbc639ac4f6e6ee034b0a5d1d6885ae74cef3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// This class converts the content in a PythonRemoteCall into py::object. This +// is a helper class to make sure that all arguments deserialization is done +// before entering RequestCallbackImpl::processRpc(...), so that the +// deserialization related logic can be carried out in one spot instead of +// scattered in multiple places for different message types. +// NB: The reason for not consolidating class into PythonRemoteCall is because +// PythonRemoteCall is a libtorch type which should not depend on Python types. +class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall { + public: + explicit UnpickledPythonRemoteCall( + const SerializedPyObj& serializedPyObj, + const at::IValue& retRRefId, + const at::IValue& retForkId, + const bool isAsyncExecution); + + const RRefId& rrefId() const; + const ForkId& forkId() const; + + private: + RRefId rrefId_; + ForkId forkId_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3627d0db14f9c104600d9c1f6af51dd2833a2971 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Parse error message and return RPCErrorType based on the message. +TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture); +// Create an error string given the error description and error type +TORCH_API std::string makeRPCError( + const std::string& rpcErrorStr, + RPCErrorType errorType); + +// Given an RPC message received as a request over the wire, deserialize it into +// the appropriate 'RpcCommandBase' type. +TORCH_API std::unique_ptr deserializeRequest( + const Message& request); + +// Given an RPC message received as a response over the wire, deserialize it +// into the appropriate 'RpcCommandBase' type, if the response is +// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions +// to received tensors and set the wrappedMsgType to its wrapped message type. +TORCH_API std::unique_ptr deserializeResponse( + const Message& response, + MessageType& wrappedMsgType); + +// Given an RPC message received as a response over the wire, deserialize it +// into the valid IValue if the message is for a script rpc result, +// otherwise deserialize it into dummy none ivalue that will never be used. +// In this deserialization, we also attach recv rpc backward functions if +// needed. +IValue deserializeResptoIValueInternal( + RpcCommandBase& rpc, + MessageType messageType); +TORCH_API IValue deserializeRespToIValue(const Message& message); + +// Note: format is subject to change and intended for RPCs. +// For saving persistently to disk, use torch::save(). +TORCH_API std::string wireSerialize( + const std::vector& payload, + const std::vector& tensors); + +TORCH_API std::pair, std::vector> wireDeserialize( + const void* data, + size_t data_size); + +// We use vector as the type of blobs because it's what rpc::Message uses +// for its payload, even though it has the disadvantage that it cannot be +// allocated with uninitialized memory: it is always zeroed out. + +// Some Tensors are effectively views of larger Tensors, where only a small +// subset of the Storage data is referenced. This normally is good and avoids +// copies when kept locally, but if we naively push the whole Storage over the +// wire, we'll end up with excess network traffic. This change clones tensors if +// we'd save at least half the data, and over a minimum hurdle. +TORCH_API c10::List cloneSparseTensors( + const std::vector& tensors); + +// Combines an original payload and wrapped payload into the original payload. +// Used to generate the overall payload for the wrapped RPC. +TORCH_API void writeWrappedPayload( + std::vector& originalPayload, + std::vector& additionalPayload); + +// Reads the additional, wrapped payload from a wrapped RPC off of the input +// payload. After this, payload will contain the payload of the original, +// un-wrapped RPC. +TORCH_API std::vector readWrappedPayload( + std::vector& payload, + const rpc::Message& message); + +// Takes a list of events from autograd profiler and populates them into +// profiledEvents to be carried over RPC. +TORCH_API void populateRemoteProfiledEvents( + std::vector& profiledEvents, + const torch::autograd::profiler::ProfilerConfig& profilerConfig, + const std::vector>& + eventLists); + +} // namespace rpc +} // namespace distributed +} // namespace torch