applied-ai-018 commited on
Commit
6d5387c
·
verified ·
1 Parent(s): 179ee59

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h +174 -0
  3. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h +49 -0
  4. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h +37 -0
  5. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h +25 -0
  6. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h +29 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h +23 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h +42 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h +24 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h +98 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h +62 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h +59 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h +39 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h +21 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp +408 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FakeProcessGroup.hpp +186 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp +63 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp +22 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp +61 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp +529 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp +176 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp +743 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +448 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +1097 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp +113 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp +140 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp +219 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp +73 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp +101 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp +164 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h +723 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp +58 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp +187 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp +27 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp +731 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp +27 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp +164 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h +13 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp +140 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h +23 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp +52 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h +56 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h +33 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp +104 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h +51 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp +589 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp +81 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp +65 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h +93 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h +46 -0
ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3599b233683cf5c07d6f254c52c98de7eaa4f5e3539175700226804e72b3359e
3
+ size 9372
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <functional>
5
+
6
+ #include <ATen/core/Dict.h>
7
+ #include <torch/csrc/autograd/engine.h>
8
+ #include <torch/csrc/distributed/autograd/functions/recvrpc_backward.h>
9
+ #include <torch/csrc/distributed/autograd/functions/sendrpc_backward.h>
10
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
11
+
12
+ namespace torch {
13
+ namespace distributed {
14
+ namespace autograd {
15
+
16
+ class RecvRpcBackward;
17
+
18
+ // DistAutogradContext which stores information for a single distributed
19
+ // autograd pass on a worker.
20
+ class TORCH_API DistAutogradContext {
21
+ public:
22
+ using GradCallback = std::function<bool(torch::Tensor&)>;
23
+
24
+ explicit DistAutogradContext(int64_t contextId);
25
+
26
+ // Retrieves the autograd context id for this context.
27
+ int64_t contextId() const;
28
+
29
+ // Records a 'send' autograd function for this context with the provided
30
+ // message id.
31
+ void addSendFunction(
32
+ const std::shared_ptr<SendRpcBackward>& func,
33
+ int64_t autograd_message_id);
34
+
35
+ // Records a 'recv' autograd function for this context with the provided
36
+ // message id.
37
+ void addRecvFunction(
38
+ std::shared_ptr<RecvRpcBackward>& func,
39
+ int64_t autograd_message_id);
40
+
41
+ // Given an autograd_message_id, retrieve the appropriate send function.
42
+ std::shared_ptr<SendRpcBackward> retrieveSendFunction(
43
+ int64_t autograd_message_id);
44
+
45
+ // Return all send functions for this context.
46
+ std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>> sendFunctions()
47
+ const;
48
+
49
+ // Return all recv functions for this context.
50
+ std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>> recvFunctions()
51
+ const;
52
+
53
+ // Adds a future message recording an outstanding RPC.
54
+ void addOutstandingRpc(const c10::intrusive_ptr<rpc::JitFuture>& jitFuture);
55
+
56
+ // Returns all gradients.
57
+ const c10::Dict<torch::Tensor, torch::Tensor> getGradients() const;
58
+
59
+ // This function gives a mutable grad reference to the callback.
60
+ // If the callback returns true, it means the grad in the context
61
+ // needs to be updated.
62
+ void runGradCallbackForVariable(
63
+ const torch::autograd::Variable& variable,
64
+ GradCallback&& cb);
65
+
66
+ DistAutogradContext(const DistAutogradContext&) = delete;
67
+ DistAutogradContext& operator=(const DistAutogradContext&) = delete;
68
+ DistAutogradContext(DistAutogradContext&&) = delete;
69
+ DistAutogradContext& operator=(DistAutogradContext&&) = delete;
70
+
71
+ // records the workerID of a node that we sent an RPC to.
72
+ // workerIDs are added here when we attach a send function to this autograd
73
+ // context
74
+ void addKnownWorkerId(const rpc::worker_id_t workerId);
75
+
76
+ // Retrieves a set containing the known workerIds for this context
77
+ // These are the different workers that this context has sent RPCs to.
78
+ std::unordered_set<rpc::worker_id_t> getKnownWorkerIds() const;
79
+
80
+ private:
81
+ friend class BackwardPassCleanupGuard;
82
+ friend class DistEngine;
83
+ friend class RecvRpcBackward;
84
+ friend class DistAccumulateGradCaptureHook;
85
+
86
+ // Record that we would like to accumulate the provided gradient on the given
87
+ // variable.
88
+ void accumulateGrad(
89
+ const torch::autograd::Variable& variable,
90
+ const torch::Tensor& grad,
91
+ size_t num_expected_refs);
92
+
93
+ // Retrieve the GraphTask.
94
+ std::shared_ptr<torch::autograd::GraphTask> retrieveGraphTask();
95
+
96
+ // Set the appropriate graph task for the backward pass. Can be called only
97
+ // once.
98
+ void setGraphTask(std::shared_ptr<torch::autograd::GraphTask> graphTask);
99
+
100
+ // Resets the graph task to ensure we can run another distributed backward
101
+ // pass for the same autograd context.
102
+ void resetGraphTask();
103
+
104
+ // Waits for all outstanding RPCs for this context to finish and clears all
105
+ // outstanding rpcs held in this context. This should be called only once.
106
+ c10::intrusive_ptr<c10::ivalue::Future> clearAndWaitForOutstandingRpcsAsync();
107
+
108
+ void clearOutstandingRpcs();
109
+
110
+ // Record an event to mark the completion of gradient computation. These
111
+ // events will later help to properly synchronize gradients consumptions
112
+ // in getGradients(). We need these events because backward and
113
+ // optimizer.step are separate RPC calls, and will occur on different CUDA
114
+ // streams. Without synchronization, it is possible that gradients are
115
+ // consumed before they are ready.
116
+ void recordGradEvent(c10::Device device);
117
+
118
+ const int64_t contextId_;
119
+
120
+ // Set containing known worker IDs, used in cleaning up autograd context.
121
+ // Whenever a sendRpcBackward is attached to the autograd graph for this
122
+ // context, the destination is added here.
123
+ std::unordered_set<rpc::worker_id_t> knownWorkerIds_;
124
+
125
+ // Map from autograd_message_id to appropriate 'send' autograd function.
126
+ std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>>
127
+ sendAutogradFunctions_;
128
+
129
+ // Map from autograd_message_id to appropriate 'recv' autograd function.
130
+ std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>>
131
+ recvAutogradFunctions_;
132
+
133
+ // Gradients accumulated in this context so far. The key is the variable on
134
+ // which the gradient needs to be accumulated and the value is the gradient
135
+ // that needs to be accumulated on that variable..
136
+ c10::Dict<torch::Tensor, torch::Tensor> accumulatedGrads_;
137
+
138
+ // See comments for recordGradEvent(c10::Device device);
139
+ std::unordered_map<c10::Device, c10::Event> gradReadyEvents_;
140
+ const c10::impl::VirtualGuardImpl impl_;
141
+
142
+ // The autograd GraphTask for the backward pass on this node for this context.
143
+ std::shared_ptr<torch::autograd::GraphTask> graphTask_;
144
+
145
+ // List of futures for RPCs initiated by this node to propagate gradients to
146
+ // other nodes. The distributed autograd engine on this node can return
147
+ // successfully only if all these futures are done and are successful.
148
+ std::vector<c10::intrusive_ptr<rpc::JitFuture>> outStandingRpcs_;
149
+
150
+ // Lock to protect concurrent modification of the context.
151
+ mutable std::mutex lock_;
152
+ };
153
+
154
+ using ContextPtr = std::shared_ptr<DistAutogradContext>;
155
+
156
+ // This class stores a shared_ptr to a DistAutogradContext instance in a
157
+ // thread local variable. The instance is given by the call site. The class
158
+ // doesn't know the current context. It's just a util class.
159
+ class TORCH_API ThreadLocalDistAutogradContext {
160
+ public:
161
+ // Store 'new_context' to the thread local variable maintained by this class.
162
+ explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context);
163
+ ~ThreadLocalDistAutogradContext();
164
+
165
+ // Retrieve the stored DistAutogradContext instance.
166
+ static ContextPtr getContextPtr();
167
+
168
+ private:
169
+ ContextPtr prev_context_ptr_;
170
+ };
171
+
172
+ } // namespace autograd
173
+ } // namespace distributed
174
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function.h>
4
+ #include <torch/csrc/distributed/autograd/context/context.h>
5
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
7
+
8
+ namespace torch {
9
+ namespace distributed {
10
+ namespace autograd {
11
+
12
+ // Forward declarations.
13
+ class DistAutogradContext;
14
+
15
+ // As part of our distributed autograd implementation, whenever we receive an
16
+ // RPC from a node, we add a 'RecvRpcBackward' autograd function to the
17
+ // autograd graph. This is more or less a placeholder function that is used to
18
+ // pass gradients to the remote host during the backward pass. The inputs to the
19
+ // RPC function are the inputs to this autograd function.
20
+ class TORCH_API RecvRpcBackward : public torch::autograd::Node {
21
+ public:
22
+ explicit RecvRpcBackward(
23
+ const AutogradMetadata& autogradMetadata,
24
+ std::shared_ptr<DistAutogradContext> autogradContext,
25
+ rpc::worker_id_t fromWorkerId,
26
+ rpc::DeviceMap deviceMap);
27
+
28
+ torch::autograd::variable_list apply(
29
+ torch::autograd::variable_list&& grads) override;
30
+
31
+ private:
32
+ const AutogradMetadata autogradMetadata_;
33
+
34
+ // Hold a weak reference to the autograd context to avoid circular
35
+ // dependencies with the context (since it holds a reference to
36
+ // RecvRpcBackward).
37
+ std::weak_ptr<DistAutogradContext> autogradContext_;
38
+
39
+ // The worker id from which the RPC was received. During the backward pass,
40
+ // we need to propagate the gradients to this workerId.
41
+ rpc::worker_id_t fromWorkerId_;
42
+
43
+ // Device mapping for tensors sent over RPC.
44
+ const rpc::DeviceMap deviceMap_;
45
+ };
46
+
47
+ } // namespace autograd
48
+ } // namespace distributed
49
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function.h>
4
+
5
+ namespace torch {
6
+ namespace distributed {
7
+ namespace autograd {
8
+
9
+ // As part of our distributed autograd implementation, whenever we send an RPC
10
+ // from one node to another, we add a 'SendRpcBackward' autograd function to the
11
+ // autograd graph. This is more or less a placeholder function that is used to
12
+ // kickoff the autograd engine on the current worker on the backward pass. The
13
+ // edges for this autograd function are the inputs to the RPC method.
14
+ //
15
+ // During the backward pass, this function is queued for execution in the
16
+ // autograd engine which eventually runs the rest of the autograd graph.
17
+ struct TORCH_API SendRpcBackward : public torch::autograd::Node {
18
+ public:
19
+ torch::autograd::variable_list apply(
20
+ torch::autograd::variable_list&& inputs) override;
21
+
22
+ // SendRpcBackward is actually the root of an autograd graph on the local
23
+ // node. As a result, it doesn't receive any 'inputs', but rather the RPC
24
+ // framework passes gradients over to this function to kickoff local autograd
25
+ // computation.
26
+ void setGrads(const torch::autograd::variable_list& grads);
27
+
28
+ // Retrieve the grads for the function.
29
+ const torch::autograd::variable_list& getGrads() const;
30
+
31
+ private:
32
+ torch::autograd::variable_list grads_;
33
+ };
34
+
35
+ } // namespace autograd
36
+ } // namespace distributed
37
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstdint>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // This structure represents autograd metadata that we need to pass across
11
+ // different nodes when we call an RPC which needs autograd computation.
12
+ struct TORCH_API AutogradMetadata {
13
+ AutogradMetadata(int64_t autogradContextId, int64_t autogradMessageId);
14
+
15
+ // autogradContextId_ is a globally unique integer that identifies a
16
+ // particular distributed autograd pass.
17
+ int64_t autogradContextId;
18
+ // autogradMessageId_ is a globally unique integer that identifies a pair
19
+ // of send/recv autograd functions.
20
+ int64_t autogradMessageId;
21
+ };
22
+
23
+ } // namespace autograd
24
+ } // namespace distributed
25
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Used to request other workers to clean up their autograd context.
12
+ class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase {
13
+ public:
14
+ explicit CleanupAutogradContextReq(int64_t context_id);
15
+ // Serialization and deserialization methods.
16
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
17
+ static std::unique_ptr<CleanupAutogradContextReq> fromMessage(
18
+ const rpc::Message& message);
19
+
20
+ // Retrieve the context id we are cleaning up with this message.
21
+ int64_t getContextId();
22
+
23
+ private:
24
+ int64_t context_id_;
25
+ };
26
+
27
+ } // namespace autograd
28
+ } // namespace distributed
29
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Empty response for CleanupAutogradContextReq. Send to acknowledge receipt of
11
+ // a CleanupAutogradContextReq.
12
+ class TORCH_API CleanupAutogradContextResp : public rpc::RpcCommandBase {
13
+ public:
14
+ CleanupAutogradContextResp() = default;
15
+ // Serialization and deserialization methods.
16
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
17
+ static std::unique_ptr<CleanupAutogradContextResp> fromMessage(
18
+ const rpc::Message& message);
19
+ };
20
+
21
+ } // namespace autograd
22
+ } // namespace distributed
23
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace distributed {
10
+ namespace autograd {
11
+
12
+ // Used to propagate gradients from one node to another during a distributed
13
+ // backwards pass. This RPC call is invoked when we hit a `recv` autograd
14
+ // function during backward pass execution.
15
+ class TORCH_API PropagateGradientsReq : public rpc::RpcCommandBase {
16
+ public:
17
+ PropagateGradientsReq(
18
+ const AutogradMetadata& autogradMetadata,
19
+ std::vector<torch::autograd::Variable> grads,
20
+ bool retainGraph = false);
21
+
22
+ const AutogradMetadata& getAutogradMetadata();
23
+
24
+ const std::vector<torch::autograd::Variable>& getGrads();
25
+
26
+ // Serialization and deserialization methods.
27
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
28
+ static std::unique_ptr<PropagateGradientsReq> fromMessage(
29
+ const rpc::Message& message);
30
+
31
+ // Whether or not to retain the autograd graph.
32
+ bool retainGraph();
33
+
34
+ private:
35
+ AutogradMetadata autogradMetadata_;
36
+ std::vector<torch::autograd::Variable> grads_;
37
+ bool retainGraph_;
38
+ };
39
+
40
+ } // namespace autograd
41
+ } // namespace distributed
42
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Response for the PropagateGradients call. Currently, this class is mostly
11
+ // just a placeholder and sends an empty message over the wire. The purpose of
12
+ // this RPC command is to indicate whether or not the PropagateGradientsReq call
13
+ // was successfully or not.
14
+ class TORCH_API PropagateGradientsResp : public rpc::RpcCommandBase {
15
+ public:
16
+ PropagateGradientsResp() = default;
17
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
18
+ static std::unique_ptr<PropagateGradientsResp> fromMessage(
19
+ const rpc::Message& message);
20
+ };
21
+
22
+ } // namespace autograd
23
+ } // namespace distributed
24
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Represents an RPC that includes autograd information. This class basically
12
+ // wraps another `RpcCommandBase` object which represents the actual RPC and has
13
+ // additional autograd information associated with that RPC.
14
+ class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase {
15
+ public:
16
+ // Used when we are sending an RPC over the wire.
17
+ RpcWithAutograd(
18
+ rpc::worker_id_t fromWorkerId,
19
+ rpc::MessageType messageType,
20
+ const AutogradMetadata& autogradMetadata,
21
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
22
+ rpc::DeviceMap deviceMap = {});
23
+
24
+ // Used when receiving an RPC over the wire.
25
+ RpcWithAutograd(
26
+ rpc::worker_id_t fromWorkerId,
27
+ rpc::MessageType messageType,
28
+ const AutogradMetadata& autogradMetadata,
29
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
30
+ rpc::MessageType wrappedMessageType,
31
+ std::vector<torch::Tensor> tensors,
32
+ rpc::DeviceMap deviceMap = {});
33
+
34
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
35
+
36
+ static std::unique_ptr<RpcWithAutograd> fromMessage(
37
+ const rpc::Message& message);
38
+
39
+ // Retrieves tensors as part of this RPC, which need to be considered for
40
+ // autograd computations.
41
+ std::vector<torch::Tensor>& tensors();
42
+
43
+ const AutogradMetadata& autogradMetadata() const;
44
+
45
+ RpcCommandBase& wrappedRpc();
46
+
47
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
48
+
49
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
50
+
51
+ // Message type of the wrapped RPC.
52
+ rpc::MessageType wrappedMessageType() const;
53
+
54
+ // Retrieve the worker id from which the RPC originated.
55
+ rpc::worker_id_t fromWorkerId() const;
56
+
57
+ // Retrieve the device map.
58
+ const rpc::DeviceMap& deviceMap();
59
+
60
+ private:
61
+ // WorkerId from which this RPC originated. This is necessary for knowing
62
+ // which worker we need to contact during the backward pass.
63
+ rpc::worker_id_t fromWorkerId_;
64
+
65
+ // Message type for this call.
66
+ rpc::MessageType messageType_;
67
+
68
+ AutogradMetadata autogradMetadata_;
69
+
70
+ // Since wrappedMessage_ is destructively constructed from wrappedRpc_,
71
+ // they are valid exclusively. They are used for different purpose.
72
+ // wrappedRpc_ is used while constructing receive rpcWithAutograd;
73
+ // wrappedMessage_ is used while constructing send rpcWithAutograd;
74
+
75
+ // When receive rpcWithAutograd is constructed fromMessage, it is valid;
76
+ // When send rpcWithAutograd is constructed before toMessage, it is nullptr;
77
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
78
+
79
+ // Serialized message representing wrappedRpc_. Used mostly as a cache to
80
+ // avoid serializing the request twice.
81
+ // When receive rpcWithAutograd is constructed fromMessage, it is nullptr;
82
+ // When send rpcWithAutograd is constructed before toMessage, it is valid;
83
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
84
+
85
+ // message type of the wrappedMessage, this is stored separately since
86
+ // wrappedMessage_ is not always guaranteed to be populated.
87
+ rpc::MessageType wrappedMessageType_;
88
+
89
+ // Tensors part of the wrappedRpc that need to be considered for autograd.
90
+ std::vector<torch::Tensor> tensors_;
91
+
92
+ // Device mapping for tensors that are sent across an RPC to another node.
93
+ rpc::DeviceMap deviceMap_;
94
+ };
95
+
96
+ } // namespace autograd
97
+ } // namespace distributed
98
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/profiler.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
7
+ #include <torch/csrc/distributed/rpc/types.h>
8
+
9
+ namespace torch {
10
+ namespace distributed {
11
+ namespace autograd {
12
+
13
+ class TORCH_API RpcWithProfilingReq : public rpc::RpcCommandBase {
14
+ public:
15
+ // For sending RPCs, invoked when client is creating this RPC command.
16
+ RpcWithProfilingReq(
17
+ rpc::MessageType messageType,
18
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
19
+ torch::autograd::profiler::ProfilerConfig&& profilerConfig,
20
+ rpc::ProfilingId profilingKeyId);
21
+
22
+ // For receiving an RPC
23
+ // Used in fromMessage.
24
+ RpcWithProfilingReq(
25
+ rpc::MessageType messageType,
26
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
27
+ rpc::MessageType wrappedMessageType,
28
+ std::vector<torch::Tensor> tensors,
29
+ torch::autograd::profiler::ProfilerConfig&& profilerConfig,
30
+ rpc::ProfilingId profilingKeyId);
31
+
32
+ // Convert this RPC Command to a Message that can be sent over the wire.
33
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
34
+ static std::unique_ptr<RpcWithProfilingReq> fromMessage(
35
+ const rpc::Message& message);
36
+
37
+ // Retrieve the profiling data that is associated with this command.
38
+ torch::autograd::profiler::ProfilerConfig getProfilingConfig() const;
39
+ // Retrieve the globally unique profiling ID corresponding to this command.
40
+ const rpc::ProfilingId& getProfilingId() const;
41
+ // Retrieve the original RPC which this ProfilingRPC wraps.
42
+ RpcCommandBase& wrappedRpc();
43
+ // Destructively move the wrapped RPC.
44
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
45
+ // Message type of the wrapped RPC
46
+ rpc::MessageType wrappedMessageType() const;
47
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
48
+
49
+ private:
50
+ // message type
51
+ const rpc::MessageType messageType_;
52
+ // wrapped message
53
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
54
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
55
+ rpc::MessageType wrappedMessageType_;
56
+ std::vector<torch::Tensor> tensors_;
57
+ const torch::autograd::profiler::ProfilerConfig profilerConfig_;
58
+ const rpc::ProfilingId profilingKeyId_;
59
+ };
60
+ } // namespace autograd
61
+ } // namespace distributed
62
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/profiler.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
7
+ #include <torch/csrc/distributed/rpc/types.h>
8
+
9
+ namespace torch {
10
+ namespace distributed {
11
+ namespace autograd {
12
+ class TORCH_API RpcWithProfilingResp : public rpc::RpcCommandBase {
13
+ public:
14
+ // For sending RPCs over the wire
15
+ RpcWithProfilingResp(
16
+ rpc::MessageType messageType,
17
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
18
+ std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
19
+ rpc::ProfilingId profilingId);
20
+
21
+ // For receiving RPCs. Used in from message when converting a message received
22
+ // over the wire.
23
+ RpcWithProfilingResp(
24
+ rpc::MessageType messageType,
25
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
26
+ rpc::MessageType wrappedMessageType,
27
+ std::vector<torch::Tensor> tensors,
28
+ std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
29
+ rpc::ProfilingId profilingId);
30
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
31
+ static std::unique_ptr<RpcWithProfilingResp> fromMessage(
32
+ const rpc::Message& message);
33
+ // Retrieve remote Events
34
+ std::vector<torch::autograd::profiler::LegacyEvent> getProfiledEvents() const;
35
+ // Retrieve the globally unique profiling ID corresponding to this command.
36
+ const rpc::ProfilingId& getProfilingId() const;
37
+ // Retrieve the original RPC which this ProfilingRPC wraps.
38
+ RpcCommandBase& wrappedRpc();
39
+ // Destructively move the wrapped RPC.
40
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
41
+ // Message type of the wrapped RPC
42
+ rpc::MessageType wrappedMessageType() const;
43
+ // Set the wrapped RPC for this RPC.
44
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
45
+
46
+ private:
47
+ // message type
48
+ const rpc::MessageType messageType_;
49
+ // wrapped message
50
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
51
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
52
+ rpc::MessageType wrappedMessageType_;
53
+ std::vector<torch::Tensor> tensors_;
54
+ const std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents_;
55
+ const rpc::ProfilingId profilingId_;
56
+ };
57
+ } // namespace autograd
58
+ } // namespace distributed
59
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+ #include <torch/csrc/distributed/rpc/types.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Internal system RPC to invoke distributed backward pass on remote nodes when
12
+ // 'rref.backward()' is invoked.
13
+ class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
14
+ public:
15
+ RRefBackwardReq(
16
+ const rpc::RRefId& rrefId,
17
+ int64_t autogradContextId,
18
+ bool retainGraph = false);
19
+
20
+ const rpc::RRefId& getRRefId() const;
21
+
22
+ int64_t getAutogradContextId() const;
23
+
24
+ bool retainGraph() const;
25
+
26
+ // Serialization and deserialization methods.
27
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
28
+ static std::unique_ptr<RRefBackwardReq> fromMessage(
29
+ const rpc::Message& message);
30
+
31
+ private:
32
+ const rpc::RRefId rrefId_;
33
+ const int64_t autogradContextId_;
34
+ const bool retainGraph_;
35
+ };
36
+
37
+ } // namespace autograd
38
+ } // namespace distributed
39
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Response for the RRefBackwardReq.
11
+ class TORCH_API RRefBackwardResp : public rpc::RpcCommandBase {
12
+ public:
13
+ RRefBackwardResp() = default;
14
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
15
+ static std::unique_ptr<RRefBackwardResp> fromMessage(
16
+ const rpc::Message& message);
17
+ };
18
+
19
+ } // namespace autograd
20
+ } // namespace distributed
21
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <condition_variable>
4
+ #include <memory>
5
+ #include <mutex>
6
+ #include <stdexcept>
7
+ #include <unordered_map>
8
+ #include <utility>
9
+ #include <vector>
10
+
11
+ #include <ATen/ATen.h>
12
+ #include <c10/macros/Macros.h>
13
+
14
+ #include <torch/csrc/distributed/c10d/Types.hpp>
15
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
16
+ #include <torch/csrc/distributed/c10d/Work.hpp>
17
+ #include <torch/csrc/distributed/c10d/debug.h>
18
+
19
+ constexpr auto kBackendDefaultTimeout =
20
+ std::chrono::milliseconds(30 * 60 * 1000);
21
+
22
+ namespace c10d {
23
+
24
+ class TORCH_API Backend : public torch::CustomClassHolder {
25
+ public:
26
+ // Backend Options is a base struct that defines the basic options
27
+ // when constructing a Backend. Each Backend subclass should
28
+ // extend this struct and define its options if it wants to provide more
29
+ // config options (beyond basic ones defined here) to end user.
30
+ struct TORCH_API Options : torch::CustomClassHolder {
31
+ explicit Options(
32
+ std::string backend,
33
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout)
34
+ : timeout(timeout), backend(std::move(backend)) {}
35
+ ~Options() override = default;
36
+
37
+ std::chrono::milliseconds timeout;
38
+
39
+ // backend name
40
+ const std::string backend;
41
+ };
42
+
43
+ explicit Backend(int rank, int size);
44
+ ~Backend() override = 0;
45
+
46
+ int getRank() const {
47
+ return rank_;
48
+ }
49
+
50
+ int getSize() const {
51
+ return size_;
52
+ }
53
+
54
+ // Returns an unique opaque ID of this backend that can be used to correlate
55
+ // with its collectives.
56
+ int64_t getID() const {
57
+ return reinterpret_cast<std::intptr_t>(this);
58
+ }
59
+
60
+ virtual bool supportsSplitting() const {
61
+ return false;
62
+ }
63
+
64
+ virtual void startCoalescing() {
65
+ TORCH_CHECK(
66
+ false,
67
+ c10::str(
68
+ "Backend ",
69
+ getBackendName(),
70
+ " does not implement startCoalescing"));
71
+ }
72
+
73
+ virtual c10::intrusive_ptr<Work> endCoalescing() {
74
+ TORCH_CHECK(
75
+ false,
76
+ c10::str(
77
+ "Backend ", getBackendName(), " does not implement endCoalescing"));
78
+ }
79
+
80
+ // Subclasses must override this method to return the backend name
81
+ virtual const std::string getBackendName() const {
82
+ TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented.");
83
+ };
84
+
85
+ virtual c10::intrusive_ptr<Work> broadcast(
86
+ std::vector<at::Tensor>& /* tensors */,
87
+ const BroadcastOptions& /* opts */ = BroadcastOptions()) {
88
+ TORCH_CHECK(
89
+ false,
90
+ c10::str("Backend ", getBackendName(), " does not support broadcast"));
91
+ }
92
+
93
+ virtual c10::intrusive_ptr<Work> allreduce(
94
+ std::vector<at::Tensor>& /* tensors */,
95
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
96
+ TORCH_CHECK(
97
+ false,
98
+ c10::str("Backend ", getBackendName(), " does not support allreduce"));
99
+ }
100
+
101
+ virtual c10::intrusive_ptr<Work> allreduce_sparse(
102
+ std::vector<at::Tensor>& /* tensors */,
103
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
104
+ TORCH_CHECK(
105
+ false,
106
+ c10::str(
107
+ "Backend ",
108
+ getBackendName(),
109
+ " does not support allreduce sparse"));
110
+ }
111
+
112
+ virtual c10::intrusive_ptr<Work> allreduce_coalesced(
113
+ std::vector<at::Tensor>& /* tensors */,
114
+ const AllreduceCoalescedOptions& /* opts */ =
115
+ AllreduceCoalescedOptions()) {
116
+ TORCH_CHECK(
117
+ false,
118
+ c10::str(
119
+ "Backend ",
120
+ getBackendName(),
121
+ " does not support allreduce_coalesced"));
122
+ }
123
+
124
+ virtual c10::intrusive_ptr<Work> reduce(
125
+ std::vector<at::Tensor>& /* tensors */,
126
+ const ReduceOptions& /* opts */ = ReduceOptions()) {
127
+ TORCH_CHECK(
128
+ false,
129
+ c10::str("Backend ", getBackendName(), " does not support reduce"));
130
+ }
131
+
132
+ virtual c10::intrusive_ptr<Work> allgather(
133
+ std::vector<std::vector<at::Tensor>>& /* outputTensors */,
134
+ std::vector<at::Tensor>& /* inputTensors */,
135
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
136
+ TORCH_CHECK(
137
+ false,
138
+ c10::str("Backend ", getBackendName(), " does not support allgather"));
139
+ }
140
+
141
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
142
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
143
+ // For implementers of ProcessGroup API and advanced users only.
144
+ // Note: this function will be deprecated in near future.
145
+ virtual c10::intrusive_ptr<Work> _allgather_base(
146
+ at::Tensor& /* outputBuffer */,
147
+ at::Tensor& /* inputBuffer */,
148
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
149
+ TORCH_CHECK(
150
+ false,
151
+ c10::str(
152
+ "Backend ", getBackendName(), " does not support _allgather_base"));
153
+ }
154
+
155
+ // This function is deprecated and will be moved out of Backend to comms:
156
+ // * do not add dependencies on this function,
157
+ // * do not implement it in your Backend, implement _allgather_base
158
+ // instead.
159
+ virtual c10::intrusive_ptr<Work> allgather_coalesced(
160
+ std::vector<std::vector<at::Tensor>>& /* outputTensorLists */,
161
+ std::vector<at::Tensor>& /* inputTensors */,
162
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
163
+ TORCH_CHECK(
164
+ false,
165
+ c10::str(
166
+ "Backend ",
167
+ getBackendName(),
168
+ " does not support allgather_coalesced"));
169
+ }
170
+
171
+ // This function is a coalesced version of `allgather_into_tensor` (currently
172
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
173
+ // an input/output of one `allgather_into_tensor` operation.
174
+ virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
175
+ std::vector<at::Tensor>& /* outputs */,
176
+ std::vector<at::Tensor>& /* inputs */,
177
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
178
+ TORCH_CHECK(
179
+ false,
180
+ c10::str(
181
+ "Backend ",
182
+ getBackendName(),
183
+ " does not support allgather_into_tensor_coalesced"));
184
+ }
185
+
186
+ virtual c10::intrusive_ptr<Work> gather(
187
+ std::vector<std::vector<at::Tensor>>& /* outputTensors */,
188
+ std::vector<at::Tensor>& /* inputTensors */,
189
+ const GatherOptions& /* opts */ = GatherOptions()) {
190
+ TORCH_CHECK(
191
+ false,
192
+ c10::str("Backend ", getBackendName(), " does not support gather"));
193
+ }
194
+
195
+ virtual c10::intrusive_ptr<Work> scatter(
196
+ std::vector<at::Tensor>& /* outputTensors */,
197
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
198
+ const ScatterOptions& /* opts */ = ScatterOptions()) {
199
+ TORCH_CHECK(
200
+ false,
201
+ c10::str("Backend ", getBackendName(), " does not support scatter"));
202
+ }
203
+
204
+ virtual c10::intrusive_ptr<Work> reduce_scatter(
205
+ std::vector<at::Tensor>& /* outputTensors */,
206
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
207
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
208
+ TORCH_CHECK(
209
+ false,
210
+ c10::str(
211
+ "Backend ", getBackendName(), " does not support reduce_scatter"));
212
+ }
213
+
214
+ virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
215
+ at::Tensor& /* outputBuffer */,
216
+ at::Tensor& /* inputBuffer */,
217
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
218
+ TORCH_CHECK(
219
+ false,
220
+ c10::str(
221
+ "Backend ",
222
+ getBackendName(),
223
+ " does not support _reduce_scatter_base"));
224
+ }
225
+
226
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
227
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
228
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
229
+ virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
230
+ std::vector<at::Tensor>& /* outputs */,
231
+ std::vector<at::Tensor>& /* inputs */,
232
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
233
+ TORCH_CHECK(
234
+ false,
235
+ c10::str(
236
+ "Backend ",
237
+ getBackendName(),
238
+ " does not support reduce_scatter_tensor_coalesced"));
239
+ }
240
+
241
+ virtual c10::intrusive_ptr<Work> alltoall_base(
242
+ at::Tensor& /* outputBuffer */,
243
+ at::Tensor& /* inputBuffer */,
244
+ std::vector<int64_t>& /* outputSplitSizes */,
245
+ std::vector<int64_t>& /* inputSplitSizes */,
246
+ const AllToAllOptions& /* opts */ = AllToAllOptions()) {
247
+ TORCH_CHECK(
248
+ false,
249
+ c10::str(
250
+ "Backend ", getBackendName(), " does not support alltoall_base"));
251
+ }
252
+
253
+ virtual c10::intrusive_ptr<Work> alltoall(
254
+ std::vector<at::Tensor>& /* outputTensors */,
255
+ std::vector<at::Tensor>& /* inputTensors */,
256
+ const AllToAllOptions& opts = AllToAllOptions()) {
257
+ TORCH_CHECK(
258
+ false,
259
+ c10::str("Backend ", getBackendName(), " does not support alltoall"));
260
+ }
261
+
262
+ virtual void monitoredBarrier(
263
+ const BarrierOptions& /* unused */,
264
+ bool /* unused */ = false) {
265
+ auto backendName = getBackendName();
266
+ TORCH_CHECK(
267
+ false,
268
+ c10::str(
269
+ "Backend ",
270
+ backendName,
271
+ " does not support monitoredBarrier, only GLOO supports monitored barrier."));
272
+ }
273
+
274
+ // Agrees on an initial sequence number for the whole group by having rank 0
275
+ // create it and broadcast it to other ranks using the store. Only implemented
276
+ // for GLOO and NCCL backends currently.
277
+ virtual void setSequenceNumberForGroup() {
278
+ auto backendName = getBackendName();
279
+ TORCH_CHECK(
280
+ false,
281
+ c10::str(
282
+ "Backend ",
283
+ backendName,
284
+ " does not yet support sequence numbers."));
285
+ }
286
+
287
+ // Retrieves the current sequence number for the whole group, which should be
288
+ // in sync. If the returned number is not consistent across the group, it
289
+ // may indicate that there is some sort of collective desynchronization.
290
+ virtual uint64_t getSequenceNumberForGroup() {
291
+ auto backendName = getBackendName();
292
+ TORCH_CHECK(
293
+ false,
294
+ c10::str(
295
+ "Backend ",
296
+ backendName,
297
+ " does not yet support sequence numbers."));
298
+ }
299
+
300
+ virtual c10::intrusive_ptr<Work> send(
301
+ std::vector<at::Tensor>& /* tensors */,
302
+ int /* dstRank */,
303
+ int /* tag */) {
304
+ TORCH_CHECK(
305
+ false,
306
+ c10::str("Backend ", getBackendName(), " does not support send"));
307
+ }
308
+
309
+ virtual c10::intrusive_ptr<Work> recv(
310
+ std::vector<at::Tensor>& /* tensors */,
311
+ int /* srcRank */,
312
+ int /* tag */) {
313
+ TORCH_CHECK(
314
+ false,
315
+ c10::str("Backend ", getBackendName(), " does not support recv"));
316
+ }
317
+
318
+ virtual c10::intrusive_ptr<Work> recvAnysource(
319
+ std::vector<at::Tensor>& /* tensors */,
320
+ int /* tag */) {
321
+ TORCH_CHECK(
322
+ false,
323
+ c10::str(
324
+ "Backend ", getBackendName(), " does not support recvAnysource"));
325
+ }
326
+
327
+ virtual c10::intrusive_ptr<Work> barrier(
328
+ const BarrierOptions& /* opts */ = BarrierOptions()) {
329
+ TORCH_CHECK(
330
+ false,
331
+ c10::str("Backend ", getBackendName(), " does not support barrier"));
332
+ }
333
+
334
+ virtual void registerOnCompletionHook(
335
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
336
+ TORCH_CHECK(
337
+ false,
338
+ "Only ProcessGrouppNCCL supports onCompletion hook, but got ",
339
+ getBackendName(),
340
+ " backend.");
341
+ }
342
+
343
+ virtual void waitForPendingWorks() {
344
+ TORCH_CHECK(
345
+ false,
346
+ "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ",
347
+ getBackendName(),
348
+ " backend.");
349
+ }
350
+
351
+ virtual void enableCollectivesTiming() {
352
+ TORCH_CHECK(
353
+ false,
354
+ "Backend ",
355
+ getBackendName(),
356
+ " is missing implementation of enableCollectivesTiming.");
357
+ }
358
+
359
+ bool hasHooks() const {
360
+ return onCompletionHook_ != nullptr;
361
+ }
362
+
363
+ // Do not call this directly, use ProcessGroup::setGroupName instead.
364
+ void setGroupName(const std::string& name) {
365
+ pg_name_ = name;
366
+ }
367
+
368
+ const std::string& getGroupName() const {
369
+ return pg_name_;
370
+ }
371
+
372
+ // See similar functions in ProcessGroup.hpp for context.
373
+ c10::optional<at::Device> getBoundDeviceId() const {
374
+ return bound_device_id_;
375
+ }
376
+
377
+ // Perform an eager connect to the specified device if the backend supports
378
+ // it.
379
+ virtual void eagerConnectSingleDevice(at::Device device) {
380
+ // no-op in the default case; this is an optimization some
381
+ // backends may perform
382
+ }
383
+
384
+ void setBoundDeviceId(c10::optional<at::Device> device) {
385
+ if (device) {
386
+ TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index");
387
+ }
388
+ bound_device_id_ = device;
389
+ }
390
+
391
+ protected:
392
+ // Implementations of this interface need to call this to setup
393
+ // appropriate logging etc.
394
+ void init();
395
+
396
+ const int rank_;
397
+ const int size_;
398
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
399
+ // remains the same across use of this process group.
400
+ DebugLevel dist_debug_level_;
401
+ std::string pg_name_;
402
+
403
+ std::function<void(std::shared_ptr<WorkInfo>)> onCompletionHook_;
404
+
405
+ c10::optional<at::Device> bound_device_id_;
406
+ };
407
+
408
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FakeProcessGroup.hpp ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
4
+
5
+ namespace c10d {
6
+
7
+ class FakeWork : public Work {
8
+ public:
9
+ bool wait(std::chrono::milliseconds timeout) override {
10
+ return true;
11
+ }
12
+
13
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
14
+ auto fut = c10::make_intrusive<c10::ivalue::Future>(c10::NoneType::get());
15
+ fut->markCompleted();
16
+ return fut;
17
+ }
18
+ };
19
+
20
+ class FakeProcessGroup : public Backend {
21
+ public:
22
+ FakeProcessGroup(int rank, int size) : Backend(rank, size) {}
23
+
24
+ c10::intrusive_ptr<Work> broadcast(
25
+ std::vector<at::Tensor>& /* tensors */,
26
+ const BroadcastOptions& /* opts */ = BroadcastOptions()) override {
27
+ return c10::make_intrusive<FakeWork>();
28
+ }
29
+
30
+ c10::intrusive_ptr<Work> allreduce(
31
+ std::vector<at::Tensor>& /* tensors */,
32
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) override {
33
+ return c10::make_intrusive<FakeWork>();
34
+ }
35
+
36
+ c10::intrusive_ptr<Work> allreduce_sparse(
37
+ std::vector<at::Tensor>& /* tensors */,
38
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) override {
39
+ return c10::make_intrusive<FakeWork>();
40
+ }
41
+
42
+ c10::intrusive_ptr<Work> allreduce_coalesced(
43
+ std::vector<at::Tensor>& /* tensors */,
44
+ const AllreduceCoalescedOptions& /* opts */ =
45
+ AllreduceCoalescedOptions()) override {
46
+ return c10::make_intrusive<FakeWork>();
47
+ }
48
+
49
+ c10::intrusive_ptr<Work> reduce(
50
+ std::vector<at::Tensor>& /* tensors */,
51
+ const ReduceOptions& /* opts */ = ReduceOptions()) override {
52
+ return c10::make_intrusive<FakeWork>();
53
+ }
54
+
55
+ // NOTE [allgather on FakeProcessGroup]
56
+ // Assume each rank have the same input tensor so we just copy to the results
57
+ // since it's not a real allgather, we simply make this copying logic to let
58
+ // some simple validation works (i.e. calling allgather to see if each rank
59
+ // have the same tensor or not).
60
+ //
61
+ // NOTE: in general it's not good form to try to make FakeProcessGroup work
62
+ // with real data, but the reasoning here is that we want FakeProcessGroup to
63
+ // work with DeviceMesh's init code that have the data validation, which
64
+ // makes it worth the tradeoff.
65
+ c10::intrusive_ptr<Work> allgather(
66
+ std::vector<std::vector<at::Tensor>>& outputTensors,
67
+ std::vector<at::Tensor>& inputTensors,
68
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) override {
69
+ for (auto& tensor : outputTensors[0]) {
70
+ tensor.copy_(inputTensors[0]);
71
+ }
72
+ return c10::make_intrusive<FakeWork>();
73
+ }
74
+
75
+ c10::intrusive_ptr<Work> _allgather_base(
76
+ at::Tensor& outputBuffer,
77
+ at::Tensor& inputBuffer,
78
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) override {
79
+ auto chunks = outputBuffer.chunk(size_);
80
+ for (auto& tensor : chunks) {
81
+ tensor.copy_(inputBuffer);
82
+ }
83
+ return c10::make_intrusive<FakeWork>();
84
+ }
85
+
86
+ c10::intrusive_ptr<Work> allgather_coalesced(
87
+ std::vector<std::vector<at::Tensor>>& /* outputTensorLists */,
88
+ std::vector<at::Tensor>& /* inputTensors */,
89
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) override {
90
+ return c10::make_intrusive<FakeWork>();
91
+ }
92
+
93
+ c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
94
+ std::vector<at::Tensor>& outputs,
95
+ std::vector<at::Tensor>& inputs,
96
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) override {
97
+ for (size_t i = 0; i < outputs.size(); ++i) {
98
+ auto chunks = outputs[i].chunk(size_);
99
+ for (auto& chunk : chunks) {
100
+ chunk.copy_(inputs[i]);
101
+ }
102
+ }
103
+ return c10::make_intrusive<FakeWork>();
104
+ }
105
+
106
+ c10::intrusive_ptr<Work> gather(
107
+ std::vector<std::vector<at::Tensor>>& /* outputTensors */,
108
+ std::vector<at::Tensor>& /* inputTensors */,
109
+ const GatherOptions& /* opts */ = GatherOptions()) override {
110
+ return c10::make_intrusive<FakeWork>();
111
+ }
112
+
113
+ c10::intrusive_ptr<Work> scatter(
114
+ std::vector<at::Tensor>& /* outputTensors */,
115
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
116
+ const ScatterOptions& /* opts */ = ScatterOptions()) override {
117
+ return c10::make_intrusive<FakeWork>();
118
+ }
119
+
120
+ c10::intrusive_ptr<Work> reduce_scatter(
121
+ std::vector<at::Tensor>& /* outputTensors */,
122
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
123
+ const ReduceScatterOptions& /* opts */ =
124
+ ReduceScatterOptions()) override {
125
+ return c10::make_intrusive<FakeWork>();
126
+ }
127
+
128
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
129
+ at::Tensor& /* outputBuffer */,
130
+ at::Tensor& /* inputBuffer */,
131
+ const ReduceScatterOptions& /* opts */ =
132
+ ReduceScatterOptions()) override {
133
+ return c10::make_intrusive<FakeWork>();
134
+ }
135
+
136
+ c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
137
+ std::vector<at::Tensor>& /* outputs */,
138
+ std::vector<at::Tensor>& /* inputs */,
139
+ const ReduceScatterOptions& /* opts */ =
140
+ ReduceScatterOptions()) override {
141
+ return c10::make_intrusive<FakeWork>();
142
+ }
143
+
144
+ c10::intrusive_ptr<Work> alltoall_base(
145
+ at::Tensor& /* outputBuffer */,
146
+ at::Tensor& /* inputBuffer */,
147
+ std::vector<int64_t>& /* outputSplitSizes */,
148
+ std::vector<int64_t>& /* inputSplitSizes */,
149
+ const AllToAllOptions& /* opts */ = AllToAllOptions()) override {
150
+ return c10::make_intrusive<FakeWork>();
151
+ }
152
+
153
+ c10::intrusive_ptr<Work> alltoall(
154
+ std::vector<at::Tensor>& /* outputTensors */,
155
+ std::vector<at::Tensor>& /* inputTensors */,
156
+ const AllToAllOptions& opts = AllToAllOptions()) override {
157
+ return c10::make_intrusive<FakeWork>();
158
+ }
159
+
160
+ c10::intrusive_ptr<Work> send(
161
+ std::vector<at::Tensor>& /* tensors */,
162
+ int /* dstRank */,
163
+ int /* tag */) override {
164
+ return c10::make_intrusive<FakeWork>();
165
+ }
166
+
167
+ c10::intrusive_ptr<Work> recv(
168
+ std::vector<at::Tensor>& /* tensors */,
169
+ int /* srcRank */,
170
+ int /* tag */) override {
171
+ return c10::make_intrusive<FakeWork>();
172
+ }
173
+
174
+ c10::intrusive_ptr<Work> recvAnysource(
175
+ std::vector<at::Tensor>& /* tensors */,
176
+ int /* tag */) override {
177
+ return c10::make_intrusive<FakeWork>();
178
+ }
179
+
180
+ c10::intrusive_ptr<Work> barrier(
181
+ const BarrierOptions& /* opts */ = BarrierOptions()) override {
182
+ return c10::make_intrusive<FakeWork>();
183
+ }
184
+ };
185
+
186
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sys/types.h>
4
+
5
+ #include <mutex>
6
+ #include <unordered_map>
7
+
8
+ #include <torch/csrc/distributed/c10d/Store.hpp>
9
+
10
+ namespace c10d {
11
+
12
+ class TORCH_API FileStore : public Store {
13
+ public:
14
+ explicit FileStore(std::string path, int numWorkers);
15
+
16
+ ~FileStore() override;
17
+
18
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
19
+
20
+ std::vector<uint8_t> compareSet(
21
+ const std::string& key,
22
+ const std::vector<uint8_t>& expectedValue,
23
+ const std::vector<uint8_t>& desiredValue) override;
24
+
25
+ std::vector<uint8_t> get(const std::string& key) override;
26
+
27
+ int64_t add(const std::string& key, int64_t value) override;
28
+
29
+ int64_t getNumKeys() override;
30
+
31
+ bool deleteKey(const std::string& key) override;
32
+
33
+ bool check(const std::vector<std::string>& keys) override;
34
+
35
+ void wait(const std::vector<std::string>& keys) override;
36
+
37
+ void wait(
38
+ const std::vector<std::string>& keys,
39
+ const std::chrono::milliseconds& timeout) override;
40
+
41
+ // Returns the path used by the FileStore.
42
+ const std::string& getPath() const noexcept {
43
+ return path_;
44
+ }
45
+
46
+ protected:
47
+ int64_t addHelper(const std::string& key, int64_t i);
48
+
49
+ std::string path_;
50
+ off_t pos_{0};
51
+
52
+ int numWorkers_;
53
+ const std::string cleanupKey_;
54
+ const std::string refCountKey_;
55
+ const std::string regularPrefix_;
56
+ const std::string deletePrefix_;
57
+
58
+ std::unordered_map<std::string, std::vector<uint8_t>> cache_;
59
+
60
+ std::mutex activeFileOpLock_;
61
+ };
62
+
63
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+
5
+ namespace c10d {
6
+
7
+ C10_EXPORT void set_thread_isolation_mode(bool enable);
8
+
9
+ bool get_thread_isolation_mode();
10
+
11
+ C10_EXPORT void register_process_group(
12
+ const std::string& group_name,
13
+ c10::intrusive_ptr<c10d::ProcessGroup> group);
14
+
15
+ C10_EXPORT c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
16
+ const std::string& group_name);
17
+
18
+ C10_EXPORT void unregister_process_group(const std::string& group_name);
19
+
20
+ C10_EXPORT void unregister_all_process_groups();
21
+
22
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sys/types.h>
4
+
5
+ #include <condition_variable>
6
+ #include <mutex>
7
+ #include <unordered_map>
8
+
9
+ #include <torch/csrc/distributed/c10d/Store.hpp>
10
+
11
+ namespace c10d {
12
+
13
+ class TORCH_API HashStore : public Store {
14
+ public:
15
+ ~HashStore() override = default;
16
+
17
+ void set(const std::string& key, const std::vector<uint8_t>& data) override;
18
+
19
+ std::vector<uint8_t> compareSet(
20
+ const std::string& key,
21
+ const std::vector<uint8_t>& expectedValue,
22
+ const std::vector<uint8_t>& desiredValue) override;
23
+
24
+ std::vector<uint8_t> get(const std::string& key) override;
25
+
26
+ void wait(const std::vector<std::string>& keys) override {
27
+ wait(keys, Store::kDefaultTimeout);
28
+ }
29
+
30
+ void wait(
31
+ const std::vector<std::string>& keys,
32
+ const std::chrono::milliseconds& timeout) override;
33
+
34
+ int64_t add(const std::string& key, int64_t value) override;
35
+
36
+ int64_t getNumKeys() override;
37
+
38
+ bool check(const std::vector<std::string>& keys) override;
39
+
40
+ bool deleteKey(const std::string& key) override;
41
+
42
+ void append(const std::string& key, const std::vector<uint8_t>& value)
43
+ override;
44
+
45
+ std::vector<std::vector<uint8_t>> multiGet(
46
+ const std::vector<std::string>& keys) override;
47
+
48
+ void multiSet(
49
+ const std::vector<std::string>& keys,
50
+ const std::vector<std::vector<uint8_t>>& values) override;
51
+
52
+ // Returns true if this store support append, multiGet and multiSet
53
+ bool hasExtendedApi() const override;
54
+
55
+ protected:
56
+ std::unordered_map<std::string, std::vector<uint8_t>> map_;
57
+ std::mutex m_;
58
+ std::condition_variable cv_;
59
+ };
60
+
61
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_NCCL
4
+
5
+ #include <stdio.h>
6
+ #include <stdlib.h>
7
+
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <thread>
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/Optional.h>
15
+ #include <nccl.h>
16
+
17
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
18
+ (NCCL_MINOR >= 14)
19
+ #define NCCL_HAS_COMM_NONBLOCKING
20
+ #endif
21
+
22
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
23
+ (NCCL_MINOR >= 18)
24
+ #define NCCL_HAS_COMM_SPLIT
25
+ #endif
26
+
27
+ // ncclGetLastError() is enabled only for NCCL versions 2.13+
28
+ // ncclRemoteError only exists in NCCL versions 2.13+
29
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
30
+ (NCCL_MINOR >= 13)
31
+ #define ENABLE_NCCL_GET_LAST_ERROR
32
+ #define NCCL_REMOTE_ERROR
33
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
34
+ #define ENABLE_NCCL_GET_LAST_ERROR
35
+ #define NCCL_REMOTE_ERROR
36
+ #endif
37
+
38
+ // Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort()
39
+ // and ncclCommGetAsyncError() are not supported in earlier versions.
40
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
41
+ (NCCL_MINOR >= 4)
42
+ #define ENABLE_NCCL_ERROR_CHECKING
43
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
44
+ #define ENABLE_NCCL_ERROR_CHECKING
45
+ #endif
46
+
47
+ // P2P is enabled only for NCCL versions 2.7+ since ncclSend()
48
+ // and ncclRecv() are not supported in earlier versions.
49
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
50
+ (NCCL_MINOR >= 7)
51
+ #define ENABLE_NCCL_P2P_SUPPORT
52
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
53
+ #define ENABLE_NCCL_P2P_SUPPORT
54
+ #endif
55
+
56
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
57
+ (NCCL_MINOR >= 11)
58
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
59
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
60
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
61
+ #endif
62
+
63
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
64
+ (NCCL_MINOR >= 17)
65
+ #define NCCL_HAS_COMM_CTA_CGA
66
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
67
+ #define NCCL_HAS_COMM_CTA_CGA
68
+ #endif
69
+
70
+ #if defined(NCCL_REGISTRATION_SUPPORTED) || \
71
+ ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
72
+ (NCCL_MINOR >= 19)))
73
+ #define NCCL_HAS_COMM_REGISTER
74
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
75
+ #define NCCL_HAS_COMM_REGISTER
76
+ #endif
77
+
78
+ // Macro to throw on a non-successful NCCL return value.
79
+ #define C10D_NCCL_CHECK(cmd, failureReason) \
80
+ do { \
81
+ ncclResult_t result = cmd; \
82
+ if (result != ncclSuccess) { \
83
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
84
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
85
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
86
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
87
+ } \
88
+ } while (0)
89
+
90
+ // Macro to throw on a non-successful NCCL return value for NONBLOCKING calls.
91
+ #define C10D_NCCL_CHECK_NONBLOCKING(cmd, failureReason) \
92
+ do { \
93
+ ncclResult_t result = cmd; \
94
+ if (result != ncclSuccess && result != ncclInProgress) { \
95
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
96
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
97
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
98
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
99
+ } \
100
+ } while (0)
101
+
102
+ // Macro to throw on a non-successful NCCL return value, non-blocking.
103
+ #define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \
104
+ ncclResult_t result = cmd; \
105
+ auto startTimepoint = std::chrono::steady_clock::now(); \
106
+ while (result == ncclInProgress) { \
107
+ if (nccl_nonblocking_timeout() > 0) { \
108
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
109
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
110
+ currentTimepoint - startTimepoint) \
111
+ .count(); \
112
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
113
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \
114
+ std::to_string(__LINE__) + ", " + \
115
+ ncclGetErrorWithVersion(result) + "\n" + \
116
+ getNcclErrorDetailStr(result, failureReason); \
117
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
118
+ } \
119
+ } \
120
+ ncclCommGetAsyncError(comm, &result); \
121
+ } \
122
+ if (result != ncclSuccess) { \
123
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
124
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
125
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
126
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
127
+ }
128
+
129
+ #define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comm, failureReason) \
130
+ ncclResult_t state = cmd; \
131
+ auto startTimepoint = std::chrono::steady_clock::now(); \
132
+ if (state == ncclInProgress) { \
133
+ do { \
134
+ if (nccl_nonblocking_timeout() > 0) { \
135
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
136
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
137
+ currentTimepoint - startTimepoint) \
138
+ .count(); \
139
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
140
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + \
141
+ ":" + std::to_string(__LINE__) + ", " + \
142
+ ncclGetErrorWithVersion(state) + "\n" + \
143
+ getNcclErrorDetailStr(state, failureReason); \
144
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
145
+ } \
146
+ } \
147
+ ncclCommGetAsyncError(comm->getNcclComm(), &state); \
148
+ } while (state == ncclInProgress); \
149
+ } \
150
+ if (state != ncclSuccess) { \
151
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
152
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \
153
+ "\n" + getNcclErrorDetailStr(state, failureReason); \
154
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
155
+ }
156
+
157
+ // Macro to print and abort on a non-successful NCCL return value.
158
+ #define C10D_NCCL_ASSERT(cmd) \
159
+ do { \
160
+ ncclResult_t result = cmd; \
161
+ if (result != ncclSuccess) { \
162
+ std::string err = ncclGetErrorWithVersion(result); \
163
+ fprintf( \
164
+ stderr, \
165
+ "NCCL error in: %s:%d, %s\n", \
166
+ __FILE__, \
167
+ __LINE__, \
168
+ err.c_str()); \
169
+ abort(); \
170
+ } \
171
+ } while (0)
172
+
173
+ namespace c10d {
174
+
175
+ TORCH_API size_t hashTensors(const std::vector<at::Tensor>& tensors);
176
+ std::string getNcclVersion();
177
+ std::string ncclGetErrorWithVersion(ncclResult_t error);
178
+ bool nccl_use_nonblocking();
179
+ int nccl_nonblocking_timeout();
180
+
181
+ // Provides additional detail into NCCL error codes based on when these are
182
+ // thrown in the NCCL codebase.
183
+ std::string getNcclErrorDetailStr(
184
+ ncclResult_t error,
185
+ c10::optional<std::string> processGroupFailureReason = c10::nullopt);
186
+
187
+ // Write NCCL debug info to local disk or any storage users define.
188
+ // There are some constrains we set for the debug info writer:
189
+ // 1. The writer should only be registered once.
190
+ // 2. Once registered, users cannot change it including un-register.
191
+ // 3. It is recommended to register the customized writer in the trainer setup,
192
+ // If users don't register before calling launchAsyncDebugDump, then users
193
+ // lose the chance to register (and the default writer will be
194
+ // auto-registered).
195
+ class TORCH_API DebugInfoWriter {
196
+ public:
197
+ virtual ~DebugInfoWriter();
198
+ virtual void write(const std::string& ncclTrace);
199
+ static DebugInfoWriter& getWriter(int rank);
200
+ static void registerWriter(std::unique_ptr<DebugInfoWriter> writer);
201
+
202
+ protected:
203
+ DebugInfoWriter(std::string namePrefix, int rank) {
204
+ filename_ = c10::str(namePrefix, rank);
205
+ }
206
+ std::string filename_;
207
+
208
+ private:
209
+ static std::unique_ptr<DebugInfoWriter> writer_;
210
+ static std::atomic<bool> hasWriterRegistered_;
211
+ };
212
+
213
+ // RAII wrapper for NCCL communicator
214
+ class NCCLComm {
215
+ public:
216
+ explicit NCCLComm(ncclComm_t ncclComm)
217
+ : ncclComm_(ncclComm),
218
+ aborted_(false),
219
+ ncclAsyncErr_(ncclSuccess),
220
+ commFailureReason_(c10::nullopt),
221
+ initialized_(false) {}
222
+
223
+ NCCLComm() : NCCLComm(nullptr) {}
224
+
225
+ ~NCCLComm() noexcept {
226
+ // Add lock in this destructor, as aborted_ needs to be read after memory
227
+ // barrier here.
228
+ std::unique_lock<std::mutex> lock(mutex_);
229
+ if (ncclComm_ && !aborted_) {
230
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
231
+ // Use ncclCommAbort instead of ncclCommDestroy here since
232
+ // ncclCommDestroy could block forever waiting for work to complete on
233
+ // the communicator.
234
+ C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_));
235
+ #else
236
+ C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_));
237
+ #endif
238
+ }
239
+ }
240
+
241
+ static std::shared_ptr<NCCLComm> create(
242
+ int numRanks,
243
+ int rank,
244
+ ncclUniqueId commId) {
245
+ auto comm = std::make_shared<NCCLComm>();
246
+ C10D_NCCL_CHECK(
247
+ ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank),
248
+ c10::nullopt);
249
+ comm->ncclId_ = commId;
250
+ comm->rank_ = rank;
251
+ comm->initialized_ = true;
252
+ return comm;
253
+ }
254
+
255
+ #ifdef NCCL_HAS_COMM_NONBLOCKING
256
+ static std::shared_ptr<NCCLComm> create(
257
+ int numRanks,
258
+ int rank,
259
+ ncclUniqueId commId,
260
+ ncclConfig_t& config) {
261
+ auto comm = std::make_shared<NCCLComm>();
262
+ bool isInitialized = false;
263
+ if (nccl_use_nonblocking()) {
264
+ config.blocking = 0;
265
+ LOG(INFO) << "Rank " << rank
266
+ << ": creating NCCL communicator in nonblocking mode";
267
+ C10D_NCCL_CHECK_NONBLOCKING(
268
+ ncclCommInitRankConfig(
269
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
270
+ c10::nullopt);
271
+ } else {
272
+ C10D_NCCL_CHECK(
273
+ ncclCommInitRankConfig(
274
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
275
+ c10::nullopt);
276
+ // under blocking mode, comm is initialized after NCCL CHECK
277
+ isInitialized = true;
278
+ }
279
+ comm->ncclId_ = commId;
280
+ comm->rank_ = rank;
281
+ comm->initialized_ = isInitialized;
282
+ return comm;
283
+ }
284
+ #endif
285
+
286
+ #ifdef NCCL_HAS_COMM_SPLIT
287
+ static std::shared_ptr<NCCLComm> split(
288
+ NCCLComm* source,
289
+ int color_id,
290
+ int rank,
291
+ ncclConfig_t& config) {
292
+ auto comm = std::make_shared<NCCLComm>();
293
+ C10D_NCCL_CHECK(
294
+ ncclCommSplit(
295
+ source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config),
296
+ c10::nullopt);
297
+ ++source->ncclCommSplitCounter_;
298
+ comm->rank_ = rank;
299
+ return comm;
300
+ }
301
+ #endif
302
+
303
+ #if defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP)
304
+ std::unordered_map<std::string, std::string> ncclCommDump() {
305
+ std::unordered_map<std::string, std::string> dump;
306
+ if (isAborted()) {
307
+ LOG(INFO) << "Communicator was aborted before trying to dump its state.";
308
+ return dump;
309
+ }
310
+ C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), c10::nullopt);
311
+ return dump;
312
+ }
313
+ #endif
314
+
315
+ ncclUniqueId getNcclId() {
316
+ return ncclId_;
317
+ }
318
+
319
+ // Must not be copyable
320
+ NCCLComm(const NCCLComm&) = delete;
321
+ NCCLComm& operator=(const NCCLComm&) = delete;
322
+
323
+ // Do not support move assignment as there is no valid use case
324
+ NCCLComm& operator=(NCCLComm&& other) = delete;
325
+
326
+ // Move constructable
327
+ NCCLComm(NCCLComm&& other) {
328
+ // Using other's lock, as it reads other's states
329
+ // Can not use this.mutex_, as this object is being constructed.
330
+ std::unique_lock<std::mutex> lock(other.mutex_);
331
+ std::swap(ncclComm_, other.ncclComm_);
332
+ std::swap(aborted_, other.aborted_);
333
+ std::swap(ncclAsyncErr_, other.ncclAsyncErr_);
334
+ std::swap(initialized_, other.initialized_);
335
+ }
336
+
337
+ ncclComm_t getNcclComm();
338
+
339
+ c10::optional<std::string> getNcclCommFailureReason() const {
340
+ std::unique_lock<std::mutex> lock(mutex_);
341
+ return commFailureReason_;
342
+ }
343
+
344
+ void ncclCommAbort(
345
+ c10::optional<std::string> commFailureReason = c10::nullopt) {
346
+ std::unique_lock<std::mutex> lock(mutex_);
347
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
348
+ if (aborted_) {
349
+ // Should not abort twice.
350
+ return;
351
+ }
352
+
353
+ #ifdef NCCL_HAS_COMM_REGISTER
354
+ // Deregister all registered segments before aborting.
355
+ for (auto& it : registeredSegmentHandles_) {
356
+ void* handle = it.second;
357
+ C10D_NCCL_CHECK(
358
+ ::ncclCommDeregister(ncclComm_, handle),
359
+ c10::str(
360
+ "Failed to deregister segment handle ",
361
+ handle,
362
+ " on ncclComm_ ",
363
+ ncclComm_));
364
+ }
365
+ registeredSegmentHandles_.clear();
366
+ #endif
367
+
368
+ // Set true failure reason if provided by ProcessGroupNCCL (e.g. work
369
+ // timeout)
370
+ commFailureReason_ = commFailureReason;
371
+ LOG(INFO) << "Aborting ncclComm_ " << ncclComm_ << " with reason: "
372
+ << (commFailureReason ? *commFailureReason
373
+ : "No abort reason provided.");
374
+ #ifndef NCCL_HAS_COMM_NONBLOCKING
375
+ C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_);
376
+ #else
377
+ C10D_NCCL_CHECK_TIMEOUT(
378
+ ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_);
379
+ #endif
380
+ aborted_ = true;
381
+ ncclComm_ = nullptr;
382
+
383
+ // Set an appropriate error so that we avoid using the communicator.
384
+ if (ncclAsyncErr_ == ncclSuccess) {
385
+ ncclAsyncErr_ = ncclSystemError;
386
+ }
387
+ #else
388
+ // This is a NOOP, if error checks are disabled.
389
+ return;
390
+ #endif
391
+ }
392
+
393
+ bool isAborted() const {
394
+ std::unique_lock<std::mutex> lock(mutex_);
395
+ return aborted_;
396
+ }
397
+
398
+ uint64_t getCommSplitCounter() const {
399
+ return ncclCommSplitCounter_;
400
+ }
401
+
402
+ ncclResult_t checkForNcclError() {
403
+ std::unique_lock<std::mutex> lock(mutex_);
404
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
405
+ if (ncclAsyncErr_ != ncclSuccess) {
406
+ return ncclAsyncErr_;
407
+ }
408
+ C10D_NCCL_CHECK(
409
+ ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_);
410
+ return ncclAsyncErr_;
411
+ #else
412
+ // Always return success, if error checks are disabled.
413
+ return ncclSuccess;
414
+ #endif
415
+ }
416
+
417
+ ncclResult_t registerSegment(void* ptr, size_t size) {
418
+ std::unique_lock<std::mutex> lock(mutex_);
419
+ #ifdef NCCL_HAS_COMM_REGISTER
420
+ // We register only segments from cache allocator
421
+ // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always
422
+ // maps to a unique handle and should not be registered before the current
423
+ // ptr is deregistered and freed.
424
+ TORCH_CHECK(
425
+ registeredSegmentHandles_.count(ptr) == 0,
426
+ "Segment with ptr ",
427
+ ptr,
428
+ " has already been registered on ncclComm_ ",
429
+ ncclComm_);
430
+
431
+ void* handle;
432
+ C10D_NCCL_CHECK(
433
+ ncclCommRegister(ncclComm_, ptr, size, &handle),
434
+ c10::str(
435
+ "Failed to register segment with ptr ",
436
+ ptr,
437
+ ", size ",
438
+ size,
439
+ " on ncclComm_ ",
440
+ ncclComm_));
441
+ registeredSegmentHandles_[ptr] = handle;
442
+ return ncclSuccess;
443
+ #else
444
+ return ncclInvalidUsage;
445
+ #endif
446
+ }
447
+
448
+ ncclResult_t deregisterSegment(void* ptr) {
449
+ std::unique_lock<std::mutex> lock(mutex_);
450
+ #ifdef NCCL_HAS_COMM_REGISTER
451
+ TORCH_CHECK(
452
+ registeredSegmentHandles_.count(ptr) == 1,
453
+ "Segment with ptr ",
454
+ ptr,
455
+ " is not registered on ncclComm_ ",
456
+ ncclComm_);
457
+
458
+ void* handle = registeredSegmentHandles_[ptr];
459
+ C10D_NCCL_CHECK(
460
+ ncclCommDeregister(ncclComm_, handle),
461
+ c10::str(
462
+ "Failed to deregister segment handle ",
463
+ handle,
464
+ ", with ptr ",
465
+ ptr,
466
+ " on ncclComm_ ",
467
+ ncclComm_));
468
+ registeredSegmentHandles_.erase(ptr);
469
+ return ncclSuccess;
470
+ #else
471
+ return ncclInvalidUsage;
472
+ #endif
473
+ }
474
+
475
+ friend class ProcessGroupNCCL;
476
+
477
+ protected:
478
+ // a helper function to wait until the communicator is initialized;
479
+ void waitUntilInitialized(int timeoutSecs);
480
+ ncclComm_t ncclComm_;
481
+ // Unique nccl_id for this communicator.
482
+ ncclUniqueId ncclId_;
483
+ bool aborted_;
484
+ uint64_t ncclCommSplitCounter_{0};
485
+ ncclResult_t ncclAsyncErr_;
486
+ mutable std::mutex mutex_;
487
+ // Rank that this communicator corresponds to.
488
+ int rank_;
489
+ // Optional reason for communicator failure, provided by ProcessGroupNCCL for
490
+ // better error messaging.
491
+ c10::optional<std::string> commFailureReason_;
492
+ bool initialized_{false};
493
+ #ifdef NCCL_HAS_COMM_REGISTER
494
+ // Stores handlers for tensors registered by NCCL
495
+ std::unordered_map<void*, void*> registeredSegmentHandles_;
496
+ #endif
497
+ };
498
+
499
+ // Helper that automatically cleans up premul sums.
500
+ struct ncclRedOpRAII {
501
+ ncclRedOpRAII() = default;
502
+ ncclRedOpRAII(ncclRedOp_t op) : op_(op) {}
503
+ ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm)
504
+ : op_(op), comm_(comm), premul_sum_(true) {}
505
+ ncclRedOpRAII(const ncclRedOpRAII&) = delete;
506
+ ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete;
507
+ ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() {
508
+ std::swap(tmp.op_, this->op_);
509
+ std::swap(tmp.comm_, this->comm_);
510
+ std::swap(tmp.premul_sum_, this->premul_sum_);
511
+ }
512
+ #if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT)
513
+ ~ncclRedOpRAII() {
514
+ if (premul_sum_) {
515
+ ncclRedOpDestroy(op_, comm_);
516
+ }
517
+ }
518
+ #endif
519
+ operator ncclRedOp_t() const {
520
+ return op_;
521
+ }
522
+ ncclRedOp_t op_;
523
+ ncclComm_t comm_;
524
+ bool premul_sum_ = false;
525
+ };
526
+
527
+ } // namespace c10d
528
+
529
+ #endif // USE_C10D_NCCL
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/record_function.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/ThreadLocalDebugInfo.h>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+
12
+ class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase {
13
+ public:
14
+ ParamCommsDebugInfo() = default;
15
+ ParamCommsDebugInfo(
16
+ int pgId,
17
+ int rank,
18
+ std::string&& colName,
19
+ int inNelems,
20
+ int outNelems,
21
+ at::ScalarType dType,
22
+ std::vector<int64_t> inSplitSizes,
23
+ std::vector<int64_t> outSplitSizes,
24
+ int globalRankStart,
25
+ int globalRankStride,
26
+ int worldSize);
27
+
28
+ ~ParamCommsDebugInfo() override = default;
29
+
30
+ int getProcessGroupId() const {
31
+ return pgId_;
32
+ }
33
+
34
+ int getRank() const {
35
+ return rank_;
36
+ }
37
+
38
+ int getWorldSize() const {
39
+ return worldSize_;
40
+ }
41
+
42
+ int getGlobalRankStart() const {
43
+ return globalRankStart_;
44
+ }
45
+
46
+ int getGlobalRankStride() const {
47
+ return globalRankStride_;
48
+ }
49
+
50
+ const std::string getColumnName() const {
51
+ return columnName_;
52
+ }
53
+
54
+ int getInMessageNelems() const {
55
+ return inMessageNelems_;
56
+ }
57
+
58
+ int getOutMessageNelems() const {
59
+ return outMessageNelems_;
60
+ }
61
+
62
+ at::ScalarType getDType() const {
63
+ return dType_;
64
+ }
65
+
66
+ const std::vector<int64_t>& getInputSplitSizes() const {
67
+ return inputSplitSizes_;
68
+ }
69
+
70
+ const std::vector<int64_t>& getOutputSplitSizes() const {
71
+ return outputSplitSizes_;
72
+ }
73
+
74
+ const std::vector<int64_t>& getGroupRanks() const {
75
+ return groupRanks_;
76
+ }
77
+
78
+ private:
79
+ int pgId_{};
80
+ int rank_{};
81
+ int worldSize_{};
82
+ std::string columnName_;
83
+ int inMessageNelems_{};
84
+ int outMessageNelems_{};
85
+ at::ScalarType dType_ = at::kByte;
86
+ std::vector<int64_t> inputSplitSizes_;
87
+ std::vector<int64_t> outputSplitSizes_;
88
+ int globalRankStart_;
89
+ int globalRankStride_;
90
+ std::vector<int64_t> groupRanks_{};
91
+ };
92
+
93
+ #define RECORD_PARAM_COMMS( \
94
+ seq, \
95
+ pgId, \
96
+ rank, \
97
+ colName, \
98
+ inNelems, \
99
+ outNelems, \
100
+ dType, \
101
+ inSplitSizes, \
102
+ outSplitSizes, \
103
+ globalRankStart, \
104
+ globalRankStride, \
105
+ worldSize) \
106
+ auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
107
+ pgId, \
108
+ rank, \
109
+ colName, \
110
+ inNelems, \
111
+ outNelems, \
112
+ dType, \
113
+ inSplitSizes, \
114
+ outSplitSizes, \
115
+ globalRankStart, \
116
+ globalRankStride, \
117
+ worldSize); \
118
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
119
+ std::initializer_list<const c10::IValue> paramList = { \
120
+ c10::IValue(seq), \
121
+ pgId, \
122
+ rank, \
123
+ colName, \
124
+ inSplitSizes, \
125
+ outSplitSizes, \
126
+ globalRankStart, \
127
+ globalRankStride, \
128
+ worldSize}; \
129
+ c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
130
+ RECORD_FUNCTION(at::kParamCommsCallName, paramInputs);
131
+
132
+ #define RECORD_PARAM_COMMS_DATA( \
133
+ seq, \
134
+ pgId, \
135
+ InputTensors, \
136
+ OutputTensors, \
137
+ rank, \
138
+ colName, \
139
+ inNelems, \
140
+ outNelems, \
141
+ dType, \
142
+ inSplitSizes, \
143
+ outSplitSizes, \
144
+ globalRankStart, \
145
+ globalRankStride, \
146
+ worldSize) \
147
+ auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
148
+ pgId, \
149
+ rank, \
150
+ colName, \
151
+ inNelems, \
152
+ outNelems, \
153
+ dType, \
154
+ inSplitSizes, \
155
+ outSplitSizes, \
156
+ globalRankStart, \
157
+ globalRankStride, \
158
+ worldSize); \
159
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
160
+ std::initializer_list<const c10::IValue> paramList = { \
161
+ c10::IValue(InputTensors), \
162
+ c10::IValue(seq), \
163
+ pgId, \
164
+ rank, \
165
+ colName, \
166
+ inSplitSizes, \
167
+ outSplitSizes, \
168
+ globalRankStart, \
169
+ globalRankStride, \
170
+ worldSize}; \
171
+ c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
172
+ RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \
173
+ at::kParamCommsCallName, \
174
+ paramInputs, \
175
+ std::vector<c10::IValue>(1, c10::IValue(OutputTensors)));
176
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
4
+ #include <condition_variable>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <stdexcept>
8
+ #include <unordered_map>
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <ATen/core/dispatch/Dispatcher.h>
14
+ #include <c10/macros/Macros.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Work.hpp>
17
+ // *************************************************************************
18
+ // PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN
19
+ // versions 1.7 and 1.8.
20
+ // PLEASE DO NOT ADD ANY DEPENDENCIES.
21
+ // SEE RFC: https://github.com/pytorch/pytorch/issues/39662
22
+ // *************************************************************************
23
+
24
+ constexpr auto kProcessGroupDefaultTimeout =
25
+ std::chrono::milliseconds(30 * 60 * 1000);
26
+
27
+ namespace c10d {
28
+
29
+ // ProcessGroup is a base class that captures collective and point to
30
+ // point communication in a fixed set of processes.
31
+ //
32
+ // The functions specified in the class below describe the API alone;
33
+ // implementations are provided in subclasses.
34
+ //
35
+ // Every function that performs I/O is executed asynchronously by a
36
+ // thread pool owned by the ProcessGroup (by default). They return an
37
+ // object that can be used to wait for completion or error.
38
+ //
39
+ // The ProcessGroup can instantiate subgroups with fewer or an equal
40
+ // number of members. Implementations must take care that multiple
41
+ // process groups can be used in parallel and synchronize accordingly.
42
+ //
43
+ // The ProcessGroup assumes a fixed set of processes. If the set
44
+ // changes, existing instances must be destructed and instantiation
45
+ // and initialization must start from scratch. For members of the
46
+ // process group to find each other (referred to as rendezvous from
47
+ // hereon)
48
+ //
49
+ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
50
+ public:
51
+ // ProcessGroup Options is a base struct that defines the basic options
52
+ // when constructing a ProcessGroup. Each ProcessGroup subclass should
53
+ // extend this struct and define its options if it wants to provide more
54
+ // config options (beyond basic ones defined here) to end user.
55
+ struct TORCH_API Options : torch::CustomClassHolder {
56
+ explicit Options(
57
+ std::string backend,
58
+ std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout)
59
+ : timeout(timeout), backend(std::move(backend)) {}
60
+ ~Options() override = default;
61
+
62
+ std::chrono::milliseconds timeout;
63
+
64
+ // backend name
65
+ const std::string backend;
66
+ };
67
+
68
+ enum BackendType {
69
+ UNDEFINED = 0,
70
+ GLOO = 1,
71
+ NCCL = 2,
72
+ UCC = 3,
73
+ MPI = 4,
74
+ CUSTOM = 5,
75
+ };
76
+
77
+ // Not used, set for backwards compatibility and only used for TypeDef in
78
+ // Ops.cpp
79
+ explicit ProcessGroup(int rank, int size);
80
+
81
+ explicit ProcessGroup(
82
+ const c10::intrusive_ptr<::c10d::Store>& store,
83
+ int rank,
84
+ int size,
85
+ c10::intrusive_ptr<Options> options);
86
+ ~ProcessGroup() override;
87
+
88
+ int getRank() const {
89
+ return rank_;
90
+ }
91
+
92
+ int getSize() const {
93
+ return size_;
94
+ }
95
+
96
+ // Returns an unique opaque ID of this process group object.
97
+ int64_t getID() const {
98
+ return reinterpret_cast<std::intptr_t>(this);
99
+ }
100
+
101
+ // Returns an unique opaque ID of a backend for the specific backend type
102
+ // that can correlate with this process group's collectives.
103
+ int64_t getBackendID(BackendType backend_type) const {
104
+ return reinterpret_cast<std::intptr_t>(getBackend(backend_type).get());
105
+ }
106
+
107
+ virtual const std::string getBackendName() const {
108
+ return options_->backend;
109
+ };
110
+
111
+ BackendType getBackendType() const {
112
+ return backendType_;
113
+ };
114
+
115
+ virtual void startCoalescing(c10::DeviceType deviceType) {
116
+ // only nccl has implemented startCoalescing so only execute for nccl
117
+ // backends
118
+ auto backend = getBackend(deviceType);
119
+ backend->startCoalescing();
120
+ }
121
+
122
+ virtual c10::intrusive_ptr<Work> endCoalescing(c10::DeviceType deviceType) {
123
+ // only nccl has implemented endCoalescing so only execute for nccl
124
+ // backends
125
+ auto backend = getBackend(deviceType);
126
+ auto work = backend->endCoalescing();
127
+ return work;
128
+ }
129
+
130
+ virtual c10::intrusive_ptr<Work> broadcast(
131
+ std::vector<at::Tensor>& tensors,
132
+ const BroadcastOptions& opts = BroadcastOptions()) {
133
+ static auto op =
134
+ c10::Dispatcher::singleton()
135
+ .findSchemaOrThrow("c10d::broadcast_", "")
136
+ .typed<
137
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
138
+ at::TensorList,
139
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
140
+ int64_t,
141
+ int64_t,
142
+ bool,
143
+ int64_t)>();
144
+ // It's awakward to unbox the opts here and box them again in the custom C++
145
+ // op. But it's also complicated to make opts as a CustomClassHolder. Leave
146
+ // it as it is now.
147
+ return std::get<1>(op.call(
148
+ tensors,
149
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
150
+ opts.rootRank,
151
+ opts.rootTensor,
152
+ opts.asyncOp,
153
+ opts.timeout.count()));
154
+ }
155
+
156
+ virtual c10::intrusive_ptr<Work> allreduce(
157
+ std::vector<at::Tensor>& tensors,
158
+ const AllreduceOptions& opts = AllreduceOptions()) {
159
+ static auto op =
160
+ c10::Dispatcher::singleton()
161
+ .findSchemaOrThrow("c10d::allreduce_", "")
162
+ .typed<
163
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
164
+ at::TensorList,
165
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
166
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
167
+ const c10::optional<at::Tensor>& sparse_indices,
168
+ int64_t)>();
169
+
170
+ return std::get<1>(op.call(
171
+ tensors,
172
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
173
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
174
+ opts.sparseIndices,
175
+ opts.timeout.count()));
176
+ }
177
+
178
+ virtual c10::intrusive_ptr<Work> allreduce_coalesced(
179
+ std::vector<at::Tensor>& tensors,
180
+ const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) {
181
+ static auto op = c10::Dispatcher::singleton()
182
+ .findSchemaOrThrow("c10d::allreduce_coalesced_", "")
183
+ .typed<c10::intrusive_ptr<::c10d::Work>(
184
+ at::TensorList,
185
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
186
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
187
+ int64_t)>();
188
+
189
+ return op.call(
190
+ tensors,
191
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
192
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
193
+ opts.timeout.count());
194
+ }
195
+
196
+ virtual c10::intrusive_ptr<Work> reduce(
197
+ std::vector<at::Tensor>& tensors,
198
+ const ReduceOptions& opts = ReduceOptions()) {
199
+ static auto op = c10::Dispatcher::singleton()
200
+ .findSchemaOrThrow("c10d::reduce_", "")
201
+ .typed<c10::intrusive_ptr<::c10d::Work>(
202
+ at::TensorList,
203
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
204
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
205
+ int64_t,
206
+ int64_t,
207
+ int64_t)>();
208
+ return op.call(
209
+ tensors,
210
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
211
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
212
+ opts.rootRank,
213
+ opts.rootTensor,
214
+ opts.timeout.count());
215
+ }
216
+
217
+ virtual c10::intrusive_ptr<Work> allgather(
218
+ std::vector<std::vector<at::Tensor>>& outputTensors,
219
+ std::vector<at::Tensor>& inputTensors,
220
+ const AllgatherOptions& opts = AllgatherOptions()) {
221
+ static auto op = c10::Dispatcher::singleton()
222
+ .findSchemaOrThrow("c10d::allgather_", "")
223
+ .typed<std::tuple<
224
+ std::vector<std::vector<at::Tensor>>,
225
+ c10::intrusive_ptr<Work>>(
226
+ const std::vector<std::vector<at::Tensor>>&,
227
+ at::TensorList,
228
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
229
+ int64_t)>();
230
+
231
+ return std::get<1>(op.call(
232
+ outputTensors,
233
+ inputTensors,
234
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
235
+ opts.timeout.count()));
236
+ }
237
+
238
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
239
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
240
+ // For implementers of ProcessGroup API and advanced users only.
241
+ // Note: this function will be deprecated in near future.
242
+ virtual c10::intrusive_ptr<Work> _allgather_base(
243
+ at::Tensor& outputBuffer,
244
+ at::Tensor& inputBuffer,
245
+ const AllgatherOptions& opts = AllgatherOptions()) {
246
+ static auto op =
247
+ c10::Dispatcher::singleton()
248
+ .findSchemaOrThrow("c10d::_allgather_base_", "")
249
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
250
+ at::Tensor&,
251
+ at::Tensor&,
252
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
253
+ bool,
254
+ int64_t)>();
255
+
256
+ return std::get<1>(op.call(
257
+ outputBuffer,
258
+ inputBuffer,
259
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
260
+ opts.asyncOp,
261
+ opts.timeout.count()));
262
+ }
263
+
264
+ // This function is deprecated and will be moved out of ProcessGroup to comms:
265
+ // * do not add dependencies on this function,
266
+ // * do not implement it in your ProcessGroup, implement _allgather_base
267
+ // instead.
268
+ virtual c10::intrusive_ptr<Work> allgather_coalesced(
269
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
270
+ std::vector<at::Tensor>& inputTensors,
271
+ const AllgatherOptions& opts = AllgatherOptions()) {
272
+ static auto op =
273
+ c10::Dispatcher::singleton()
274
+ .findSchemaOrThrow("c10d::allgather_coalesced_", "")
275
+ .typed<c10::intrusive_ptr<Work>(
276
+ const std::vector<std::vector<at::Tensor>>&,
277
+ const at::TensorList&,
278
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
279
+
280
+ return op.call(
281
+ outputTensorLists,
282
+ inputTensors,
283
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
284
+ }
285
+
286
+ // This function is a coalesced version of `allgather_into_tensor` (currently
287
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
288
+ // an input/output of one `allgather_into_tensor` operation.
289
+ virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
290
+ std::vector<at::Tensor>& outputTensors,
291
+ std::vector<at::Tensor>& inputTensors,
292
+ const AllgatherOptions& opts = AllgatherOptions()) {
293
+ static auto op =
294
+ c10::Dispatcher::singleton()
295
+ .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "")
296
+ .typed<c10::intrusive_ptr<Work>(
297
+ const at::TensorList,
298
+ const at::TensorList,
299
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
300
+
301
+ return op.call(
302
+ outputTensors,
303
+ inputTensors,
304
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
305
+ }
306
+
307
+ virtual c10::intrusive_ptr<Work> gather(
308
+ std::vector<std::vector<at::Tensor>>& outputTensors,
309
+ std::vector<at::Tensor>& inputTensors,
310
+ const GatherOptions& opts = GatherOptions()) {
311
+ static auto op = c10::Dispatcher::singleton()
312
+ .findSchemaOrThrow("c10d::gather_", "")
313
+ .typed<c10::intrusive_ptr<::c10d::Work>(
314
+ const std::vector<std::vector<at::Tensor>>&,
315
+ const at::TensorList&,
316
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
317
+ int64_t,
318
+ int64_t)>();
319
+ return op.call(
320
+ outputTensors,
321
+ inputTensors,
322
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
323
+ opts.rootRank,
324
+ opts.timeout.count());
325
+ }
326
+
327
+ virtual c10::intrusive_ptr<Work> scatter(
328
+ std::vector<at::Tensor>& outputTensors,
329
+ std::vector<std::vector<at::Tensor>>& inputTensors,
330
+ const ScatterOptions& opts = ScatterOptions()) {
331
+ static auto op =
332
+ c10::Dispatcher::singleton()
333
+ .findSchemaOrThrow("c10d::scatter_", "")
334
+ .typed<
335
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
336
+ const at::TensorList&,
337
+ const std::vector<std::vector<at::Tensor>>&,
338
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
339
+ int64_t,
340
+ bool,
341
+ int64_t)>();
342
+ return std::get<1>(op.call(
343
+ outputTensors,
344
+ inputTensors,
345
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
346
+ opts.rootRank,
347
+ opts.asyncOp,
348
+ opts.timeout.count()));
349
+ }
350
+
351
+ virtual c10::intrusive_ptr<Work> reduce_scatter(
352
+ std::vector<at::Tensor>& outputTensors,
353
+ std::vector<std::vector<at::Tensor>>& inputTensors,
354
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
355
+ static auto op =
356
+ c10::Dispatcher::singleton()
357
+ .findSchemaOrThrow("c10d::reduce_scatter_", "")
358
+ .typed<
359
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
360
+ const at::TensorList&,
361
+ const std::vector<std::vector<at::Tensor>>&,
362
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
363
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
364
+ int64_t)>();
365
+ return std::get<1>(op.call(
366
+ outputTensors,
367
+ inputTensors,
368
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
369
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
370
+ opts.timeout.count()));
371
+ }
372
+
373
+ virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
374
+ at::Tensor& outputBuffer,
375
+ at::Tensor& inputBuffer,
376
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
377
+ static auto op =
378
+ c10::Dispatcher::singleton()
379
+ .findSchemaOrThrow("c10d::_reduce_scatter_base_", "")
380
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
381
+ at::Tensor&,
382
+ at::Tensor&,
383
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
384
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
385
+ bool,
386
+ int64_t)>();
387
+ return std::get<1>(op.call(
388
+ outputBuffer,
389
+ inputBuffer,
390
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
391
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
392
+ opts.asyncOp,
393
+ opts.timeout.count()));
394
+ }
395
+
396
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
397
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
398
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
399
+ virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
400
+ std::vector<at::Tensor>& outputTensors,
401
+ std::vector<at::Tensor>& inputTensors,
402
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
403
+ static auto op =
404
+ c10::Dispatcher::singleton()
405
+ .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "")
406
+ .typed<c10::intrusive_ptr<Work>(
407
+ const at::TensorList,
408
+ const at::TensorList,
409
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
410
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
411
+ int64_t)>();
412
+
413
+ return op.call(
414
+ outputTensors,
415
+ inputTensors,
416
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
417
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
418
+ opts.timeout.count());
419
+ }
420
+
421
+ virtual c10::intrusive_ptr<Work> alltoall_base(
422
+ at::Tensor& outputBuffer,
423
+ at::Tensor& inputBuffer,
424
+ std::vector<int64_t>& outputSplitSizes,
425
+ std::vector<int64_t>& inputSplitSizes,
426
+ const AllToAllOptions& opts = AllToAllOptions()) {
427
+ static auto op = c10::Dispatcher::singleton()
428
+ .findSchemaOrThrow("c10d::alltoall_base_", "")
429
+ .typed<c10::intrusive_ptr<::c10d::Work>(
430
+ at::Tensor&,
431
+ at::Tensor&,
432
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
433
+ std::vector<int64_t>,
434
+ std::vector<int64_t>,
435
+ int64_t)>();
436
+ return op.call(
437
+ outputBuffer,
438
+ inputBuffer,
439
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
440
+ outputSplitSizes,
441
+ inputSplitSizes,
442
+ opts.timeout.count());
443
+ }
444
+
445
+ virtual c10::intrusive_ptr<Work> alltoall(
446
+ std::vector<at::Tensor>& outputTensors,
447
+ std::vector<at::Tensor>& inputTensors,
448
+ const AllToAllOptions& opts = AllToAllOptions()) {
449
+ static auto op =
450
+ c10::Dispatcher::singleton()
451
+ .findSchemaOrThrow("c10d::alltoall_", "")
452
+ .typed<
453
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
454
+ const at::TensorList&,
455
+ const at::TensorList&,
456
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
457
+ int64_t)>();
458
+ return std::get<1>(op.call(
459
+ outputTensors,
460
+ inputTensors,
461
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
462
+ opts.timeout.count()));
463
+ }
464
+
465
+ virtual void monitoredBarrier(
466
+ const BarrierOptions& opts,
467
+ bool wait_all_ranks = false) {
468
+ static auto op = c10::Dispatcher::singleton()
469
+ .findSchemaOrThrow("c10d::monitored_barrier_", "")
470
+ .typed<void(
471
+ at::Tensor,
472
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
473
+ const std::vector<int64_t>&,
474
+ int64_t,
475
+ bool)>();
476
+ // Default to using cpu implementation, monitored barrier is only for GLOO
477
+ at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU));
478
+ op.call(
479
+ tensor,
480
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
481
+ opts.device_ids,
482
+ opts.timeout.count(),
483
+ wait_all_ranks);
484
+ }
485
+
486
+ // Agrees on an initial sequence number for the whole group by having rank 0
487
+ // create it and broadcast it to other ranks using the store. Only implemented
488
+ // for GLOO and NCCL backends currently.
489
+ virtual void setSequenceNumberForGroup() {
490
+ auto backendType = getBackendType();
491
+ // TODO: HACK for backend name to get sequence number for that backend.
492
+ if (backendType == ProcessGroup::BackendType::GLOO ||
493
+ backendType == ProcessGroup::BackendType::NCCL ||
494
+ backendType == ProcessGroup::BackendType::UCC) {
495
+ getDefaultBackend()->setSequenceNumberForGroup();
496
+ } else {
497
+ TORCH_CHECK(
498
+ false,
499
+ c10::str(
500
+ "ProcessGroup ",
501
+ getBackendName(),
502
+ " does not yet support sequence numbers."));
503
+ }
504
+ }
505
+
506
+ // Retrieves the current sequence number for the whole group, which should be
507
+ // in sync. If the returned number is not consistent across the group, it
508
+ // may indicate that there is some sort of collective desynchronization.
509
+ virtual uint64_t getSequenceNumberForGroup() {
510
+ auto backendType = getBackendType();
511
+
512
+ // TODO: HACK for backend name to get sequence number for that backend.
513
+ if (backendType == ProcessGroup::BackendType::GLOO ||
514
+ backendType == ProcessGroup::BackendType::NCCL ||
515
+ backendType == ProcessGroup::BackendType::UCC) {
516
+ return getDefaultBackend()->getSequenceNumberForGroup();
517
+ } else {
518
+ TORCH_CHECK(
519
+ false,
520
+ c10::str(
521
+ "ProcessGroup ",
522
+ getBackendName(),
523
+ " does not yet support sequence numbers."));
524
+ }
525
+ }
526
+
527
+ virtual c10::intrusive_ptr<Work> send(
528
+ std::vector<at::Tensor>& tensors,
529
+ int dstRank,
530
+ int tag) {
531
+ static auto op = c10::Dispatcher::singleton()
532
+ .findSchemaOrThrow("c10d::send", "")
533
+ .typed<c10::intrusive_ptr<::c10d::Work>(
534
+ at::TensorList,
535
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
536
+ int64_t,
537
+ int64_t)>();
538
+ return op.call(
539
+ tensors,
540
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
541
+ dstRank,
542
+ tag);
543
+ }
544
+
545
+ virtual c10::intrusive_ptr<Work> recv(
546
+ std::vector<at::Tensor>& tensors,
547
+ int srcRank,
548
+ int tag) {
549
+ static auto op = c10::Dispatcher::singleton()
550
+ .findSchemaOrThrow("c10d::recv_", "")
551
+ .typed<c10::intrusive_ptr<::c10d::Work>(
552
+ at::TensorList,
553
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
554
+ int64_t,
555
+ int64_t)>();
556
+ return op.call(
557
+ tensors,
558
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
559
+ srcRank,
560
+ tag);
561
+ }
562
+
563
+ virtual c10::intrusive_ptr<Work> recvAnysource(
564
+ std::vector<at::Tensor>& tensors,
565
+ int tag) {
566
+ static auto op = c10::Dispatcher::singleton()
567
+ .findSchemaOrThrow("c10d::recv_any_source_", "")
568
+ .typed<c10::intrusive_ptr<::c10d::Work>(
569
+ at::TensorList,
570
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
571
+ int64_t)>();
572
+ return op.call(
573
+ tensors,
574
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
575
+ tag);
576
+ }
577
+
578
+ virtual c10::intrusive_ptr<Work> barrier(
579
+ const BarrierOptions& opts = BarrierOptions()) {
580
+ static at::Tensor tensor;
581
+ // TODO: if nccl was specified then use it
582
+ auto device = opts.device;
583
+ if (device.has_value()) {
584
+ // set device tensor from argument
585
+ tensor = at::empty(
586
+ {1}, at::TensorOptions().device(device.value()).dtype(at::kByte));
587
+ } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) {
588
+ // set cuda tensor
589
+ tensor = at::empty(
590
+ {1},
591
+ at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte));
592
+ } else {
593
+ // Default to using cpu implementation
594
+ tensor = at::empty(
595
+ {1},
596
+ at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte));
597
+ }
598
+
599
+ static auto op = c10::Dispatcher::singleton()
600
+ .findSchemaOrThrow("c10d::barrier", "")
601
+ .typed<c10::intrusive_ptr<::c10d::Work>(
602
+ at::Tensor,
603
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
604
+ const std::vector<int64_t>&,
605
+ int64_t)>();
606
+
607
+ return op.call(
608
+ tensor,
609
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
610
+ opts.device_ids,
611
+ opts.timeout.count());
612
+ }
613
+
614
+ c10::intrusive_ptr<Options> getOptions() {
615
+ return options_;
616
+ }
617
+
618
+ bool hasBackends() {
619
+ return !deviceTypeToBackendType_.empty();
620
+ }
621
+
622
+ void setBackend(
623
+ c10::DeviceType deviceType,
624
+ BackendType backendType,
625
+ const c10::optional<c10::intrusive_ptr<Backend>>& backend) {
626
+ // TODO: should we add these entries after the backend setting succeeds?
627
+ deviceTypeToBackendType_[deviceType] = backendType;
628
+ deviceTypes_.insert(deviceType);
629
+ // if the backendType is already set then reuse it for this device
630
+ if (backendTypeToBackend_.find(backendType) !=
631
+ backendTypeToBackend_.end()) {
632
+ auto existingBackend = backendTypeToBackend_.at(backendType);
633
+ deviceTypeToBackend_[deviceType] = existingBackend;
634
+ TORCH_CHECK(
635
+ existingBackend->getBoundDeviceId() ==
636
+ (*backend)->getBoundDeviceId());
637
+ } else {
638
+ // check if backend has value
639
+ if (backend.has_value()) {
640
+ deviceTypeToBackend_[deviceType] = backend.value();
641
+ backendTypeToBackend_[backendType] = backend.value();
642
+ (*backend)->setBoundDeviceId(bound_device_id_);
643
+ }
644
+ }
645
+ }
646
+
647
+ c10::intrusive_ptr<Backend> getDefaultBackend() const {
648
+ TORCH_CHECK(
649
+ backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(),
650
+ "Could not find the default backend type ",
651
+ backendType_,
652
+ " for Process Group with name ",
653
+ getBackendName(),
654
+ ".");
655
+ return backendTypeToBackend_.at(backendType_);
656
+ }
657
+
658
+ c10::intrusive_ptr<Backend> getBackend(c10::DeviceType deviceType);
659
+
660
+ c10::intrusive_ptr<Backend> getBackend(BackendType backendType) const {
661
+ TORCH_CHECK(
662
+ backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(),
663
+ "Could not find backend type ",
664
+ backendType,
665
+ ".");
666
+ return backendTypeToBackend_.at(backendType);
667
+ }
668
+
669
+ // Return device types supported by this ProcessGroup.
670
+ // Note: the return type is `Device` rather than `DeviceType` for the purpose
671
+ // of easy comparison at Python level. The `Device` will have default index
672
+ // (-1).
673
+ std::vector<c10::Device> getDeviceTypes() const {
674
+ std::vector<c10::Device> devices;
675
+ devices.reserve(deviceTypes_.size());
676
+ for (auto& dt : deviceTypes_) {
677
+ devices.push_back(c10::Device(dt));
678
+ }
679
+ return devices;
680
+ }
681
+
682
+ void registerOnCompletionHook(
683
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
684
+ getDefaultBackend()->registerOnCompletionHook(std::move(hook));
685
+ }
686
+
687
+ void waitForPendingWorks() {
688
+ getDefaultBackend()->waitForPendingWorks();
689
+ }
690
+
691
+ bool hasHooks() const {
692
+ return getDefaultBackend()->hasHooks();
693
+ }
694
+
695
+ const std::string& getGroupName() const;
696
+ void setGroupName(const std::string& name);
697
+ void enableCollectivesTiming();
698
+
699
+ void release_resources() override;
700
+
701
+ // ProcessGroups optionally can be "bound" to a specific device.
702
+ // Currently this is only for nccl and allows for some opt-in
703
+ // optimizations such as automatic use of ncclCommSplit. The device
704
+ // is specified in `init_process_group` and eventually makes it
705
+ // here and then down into the actual backend instances.
706
+ c10::optional<at::Device> getBoundDeviceId() const {
707
+ return bound_device_id_;
708
+ }
709
+
710
+ void setBoundDeviceId(c10::optional<at::Device> device) {
711
+ if (device) {
712
+ TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index");
713
+ }
714
+ bound_device_id_ = device;
715
+ }
716
+
717
+ protected:
718
+ // Implementations of this interface need to call this to setup
719
+ // appropriate logging etc.
720
+ void init();
721
+
722
+ c10::intrusive_ptr<c10d::Store> store_;
723
+ const int rank_;
724
+ const int size_;
725
+ const c10::intrusive_ptr<Options> options_;
726
+ const BackendType backendType_;
727
+
728
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
729
+ // remains the same across use of this process group.
730
+ DebugLevel dist_debug_level_;
731
+
732
+ // Backend classes for this ProcessGroup
733
+ std::unordered_set<c10::DeviceType> deviceTypes_;
734
+ std::unordered_map<c10::DeviceType, BackendType> deviceTypeToBackendType_;
735
+ std::unordered_map<c10::DeviceType, c10::intrusive_ptr<Backend>>
736
+ deviceTypeToBackend_;
737
+ std::unordered_map<BackendType, c10::intrusive_ptr<Backend>>
738
+ backendTypeToBackend_;
739
+
740
+ c10::optional<at::Device> bound_device_id_;
741
+ };
742
+
743
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <mutex>
8
+ #include <thread>
9
+ #include <unordered_map>
10
+ #include <vector>
11
+
12
+ #include <gloo/algorithm.h>
13
+ #include <gloo/common/error.h>
14
+ #include <gloo/context.h>
15
+ #include <gloo/rendezvous/store.h>
16
+ #include <gloo/transport/device.h>
17
+
18
+ #include <c10/util/hash.h>
19
+
20
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
21
+ #include <torch/csrc/distributed/c10d/Store.hpp>
22
+ #include <torch/csrc/distributed/c10d/Types.hpp>
23
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
24
+
25
+ namespace c10d {
26
+
27
+ constexpr const char* GLOO_BACKEND_NAME = "gloo";
28
+
29
+ // ProcessGroupGloo implements Gloo bindings for c10d.
30
+ //
31
+ // All functions on this class are expected to be called in the same
32
+ // order across processes in the group. This is the only way that we
33
+ // can guarantee to match up the same calls across processes. For
34
+ // multi-threaded usage of process groups, you can use consider using
35
+ // multiple process group instances.
36
+ //
37
+ // The Gloo algorithms that this class calls into are cached by their
38
+ // signature (see description of AlgorithmKey above). This cache works
39
+ // as follows: every function call instantiates an AlgorithmKey and
40
+ // looks in the cache for existing entries. If there is one, it is
41
+ // removed from the cache and returned to the caller. If there are
42
+ // none, a new entry is created and returned. If an entry was created
43
+ // before, but is still in use, the call will block and wait until the
44
+ // entry is returned to the cache.
45
+ //
46
+ // In the future, we hope to extend this to allow multiple entries per
47
+ // key, to enable parallelism for a single key. The number of entries
48
+ // per key must always be identical for all processes. This maximum
49
+ // number can be automatically tuned, but only if we let a single
50
+ // process take charge, and have it broadcast the limits.
51
+ //
52
+ class TORCH_API ProcessGroupGloo : public Backend {
53
+ public:
54
+ // AsyncWork is the Gloo specific superclass for asynchronous work items.
55
+ // We can split asynchronous work into 3 phases:
56
+ // 1) Sanity checks and prepare input (e.g. memcpy)
57
+ // 2) Run operation on background thread
58
+ // 3) Synchronize with completion on foreground thread
59
+ //
60
+ // There is state to be shared between these 3 phases and all of this state
61
+ // is captured in the AsyncWork class and its derivatives.
62
+ //
63
+ // Note: while we are porting operations to use new style collectives, there
64
+ // is a split between operations using the existing caching approach and
65
+ // operations using the new AsyncWork base class. Over time we will port
66
+ // all operations and perform needed cleanup.
67
+ //
68
+ // FIXME: This probably should be called WorkGloo since the work is executed
69
+ // in sync mode by a background thread.
70
+ class TORCH_API AsyncWork : public Work {
71
+ public:
72
+ explicit AsyncWork(
73
+ std::vector<std::vector<at::Tensor>> outputTensors,
74
+ OpType opType,
75
+ uint64_t seq,
76
+ const char* profilingTitle = nullptr,
77
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
78
+ c10::nullopt);
79
+
80
+ ~AsyncWork() override = default;
81
+
82
+ static void execute(c10::intrusive_ptr<AsyncWork> work);
83
+
84
+ virtual void run() = 0;
85
+
86
+ std::vector<at::Tensor> result() override;
87
+
88
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
89
+ uint64_t getSequencenumber() const override;
90
+
91
+ protected:
92
+ friend class ProcessGroupGloo;
93
+
94
+ private:
95
+ void finishWorkGloo();
96
+ void finishWorkGlooError(std::exception_ptr eptr);
97
+ inline void recordAsyncWorkProfilingInfo(
98
+ const char* profilingTitle,
99
+ const c10::optional<std::vector<at::Tensor>>& inputTensors);
100
+
101
+ const std::vector<std::vector<at::Tensor>> outputTensors_;
102
+ c10::intrusive_ptr<at::ivalue::Future> future_;
103
+ std::function<void()> recordFunctionBeforeCallback_;
104
+ const uint64_t seq_;
105
+ };
106
+
107
+ // Wrap c10d store as Gloo store
108
+ class TORCH_API GlooStore : public ::gloo::rendezvous::Store {
109
+ public:
110
+ GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {}
111
+
112
+ void setUint(const std::string& key, const std::vector<uint8_t>& value) {
113
+ store_->set(key, value);
114
+ }
115
+
116
+ void set(const std::string& key, const std::vector<char>& value) override {
117
+ std::vector<uint8_t> tmp(value.begin(), value.end());
118
+ store_->set(key, tmp);
119
+ }
120
+
121
+ std::vector<uint8_t> getUint(const std::string& key) {
122
+ auto value = store_->get(key);
123
+ return value;
124
+ }
125
+
126
+ std::vector<char> get(const std::string& key) override {
127
+ auto value = store_->get(key);
128
+ return std::vector<char>(value.begin(), value.end());
129
+ }
130
+
131
+ void wait(const std::vector<std::string>& keys) override {
132
+ store_->wait(keys, ::c10d::Store::kDefaultTimeout);
133
+ }
134
+
135
+ void wait(
136
+ const std::vector<std::string>& keys,
137
+ const std::chrono::milliseconds& timeout) override {
138
+ store_->wait(keys, timeout);
139
+ }
140
+
141
+ #ifdef GLOO_STORE_HAS_STORE_V2
142
+ bool has_v2_support() override {
143
+ return store_->hasExtendedApi();
144
+ }
145
+
146
+ std::vector<std::vector<char>> multi_get(
147
+ const std::vector<std::string>& keys) override {
148
+ std::vector<std::vector<char>> res;
149
+ for (auto& value : store_->multiGet(keys)) {
150
+ res.emplace_back(std::vector<char>(value.begin(), value.end()));
151
+ }
152
+ return res;
153
+ }
154
+
155
+ void multi_set(
156
+ const std::vector<std::string>& keys,
157
+ const std::vector<std::vector<char>>& values) override {
158
+ std::vector<std::vector<uint8_t>> u_values;
159
+ for (auto& value : values) {
160
+ u_values.emplace_back(std::vector<uint8_t>(value.begin(), value.end()));
161
+ }
162
+ store_->multiSet(keys, u_values);
163
+ }
164
+
165
+ void append(const std::string& key, const std::vector<char>& value)
166
+ override {
167
+ std::vector<uint8_t> tmp(value.begin(), value.end());
168
+ return store_->append(key, tmp);
169
+ }
170
+
171
+ int64_t add(const std::string& key, int64_t value) override {
172
+ return store_->add(key, value);
173
+ }
174
+ #endif
175
+
176
+ protected:
177
+ c10::intrusive_ptr<::c10d::Store> store_;
178
+ };
179
+
180
+ // For send and recv operations there is no need to pass them to the
181
+ // thread pool as they are entirely completed by the device thread.
182
+ // This work object is used to synchronize completion of the send or
183
+ // recv operation. It keeps a reference to the tensor it is
184
+ // operating on to prevent it from being deallocated while the
185
+ // operation is still in flight.
186
+ class TORCH_API SendWork : public Work {
187
+ public:
188
+ explicit SendWork(
189
+ at::Tensor& tensor,
190
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
191
+ uint64_t seq);
192
+
193
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
194
+
195
+ void abort() override;
196
+
197
+ uint64_t getSequencenumber() const override;
198
+
199
+ protected:
200
+ at::Tensor tensor_;
201
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
202
+ const uint64_t seq_;
203
+ };
204
+
205
+ class TORCH_API RecvWork : public Work {
206
+ public:
207
+ explicit RecvWork(
208
+ at::Tensor& tensor,
209
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
210
+ OpType opType,
211
+ uint64_t seq,
212
+ const char* profilingTitle = nullptr);
213
+
214
+ int sourceRank() const override;
215
+
216
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
217
+
218
+ void abort() override;
219
+
220
+ uint64_t getSequencenumber() const override;
221
+
222
+ protected:
223
+ at::Tensor tensor_;
224
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
225
+ int srcRank_;
226
+ const uint64_t seq_;
227
+ };
228
+
229
+ struct TORCH_API Options : public Backend::Options {
230
+ explicit Options(
231
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout);
232
+
233
+ // return intrusive_ptr of the object
234
+ static c10::intrusive_ptr<Options> create(
235
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout) {
236
+ return c10::make_intrusive<Options>(timeout);
237
+ }
238
+
239
+ std::vector<std::shared_ptr<::gloo::transport::Device>> devices;
240
+ int threads;
241
+ };
242
+
243
+ const std::string getBackendName() const override {
244
+ return std::string(GLOO_BACKEND_NAME);
245
+ }
246
+
247
+ // Helper functions to create a new device object.
248
+ // They are static functions on this class to keep them logically
249
+ // separate from the rest of the code base (e.g. torch/csrc/distributed).
250
+
251
+ // Create new device instance for specific interface.
252
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
253
+ const std::string& interface);
254
+
255
+ // Create new device instance for specific hostname or address.
256
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
257
+ const std::string& hostname);
258
+
259
+ // Create new device instance.
260
+ // It tries to resolve this machine's hostname and bind to that address.
261
+ // If that fails (i.e. the hostname doesn't resolve to an address), it
262
+ // falls back to binding to the loopback address.
263
+ static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();
264
+
265
+ // Create ProcessGroupGloo instance.
266
+ static c10::intrusive_ptr<ProcessGroupGloo> createProcessGroupGloo(
267
+ const c10::intrusive_ptr<Store>& store,
268
+ int rank,
269
+ int size,
270
+ std::chrono::milliseconds timeout);
271
+
272
+ explicit ProcessGroupGloo(
273
+ const c10::intrusive_ptr<Store>& store,
274
+ int rank,
275
+ int size,
276
+ c10::intrusive_ptr<Options> options = Options::create());
277
+
278
+ ~ProcessGroupGloo() override;
279
+
280
+ c10::intrusive_ptr<Options> getOptions() {
281
+ return options_;
282
+ }
283
+
284
+ c10::intrusive_ptr<Work> broadcast(
285
+ std::vector<at::Tensor>& tensors,
286
+ const BroadcastOptions& opts = BroadcastOptions()) override;
287
+
288
+ c10::intrusive_ptr<Work> allreduce(
289
+ std::vector<at::Tensor>& tensors,
290
+ const AllreduceOptions& opts = AllreduceOptions()) override;
291
+
292
+ c10::intrusive_ptr<Work> allreduce_sparse(
293
+ std::vector<at::Tensor>& tensors,
294
+ const AllreduceOptions& opts = AllreduceOptions()) override;
295
+
296
+ c10::intrusive_ptr<Work> allreduce_coalesced(
297
+ std::vector<at::Tensor>& tensors,
298
+ const AllreduceCoalescedOptions& opts =
299
+ AllreduceCoalescedOptions()) override;
300
+
301
+ c10::intrusive_ptr<Work> reduce(
302
+ std::vector<at::Tensor>& tensors,
303
+ const ReduceOptions& opts = ReduceOptions()) override;
304
+
305
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
306
+ at::Tensor& outputTensor,
307
+ at::Tensor& inputTensor,
308
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
309
+
310
+ c10::intrusive_ptr<Work> _allgather_base(
311
+ at::Tensor& output_tensor,
312
+ at::Tensor& input_tensor,
313
+ const AllgatherOptions& opts = AllgatherOptions()) override;
314
+
315
+ c10::intrusive_ptr<Work> allgather(
316
+ std::vector<std::vector<at::Tensor>>& outputs,
317
+ std::vector<at::Tensor>& inputs,
318
+ const AllgatherOptions& opts = AllgatherOptions()) override;
319
+
320
+ c10::intrusive_ptr<Work> allgather_coalesced(
321
+ std::vector<std::vector<at::Tensor>>& output_lists,
322
+ std::vector<at::Tensor>& input_list,
323
+ const AllgatherOptions& opts = AllgatherOptions()) override;
324
+
325
+ c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
326
+ std::vector<at::Tensor>& outputs,
327
+ std::vector<at::Tensor>& inputs,
328
+ const AllgatherOptions& opts = AllgatherOptions()) override;
329
+
330
+ c10::intrusive_ptr<Work> gather(
331
+ std::vector<std::vector<at::Tensor>>& outputs,
332
+ std::vector<at::Tensor>& inputs,
333
+ const GatherOptions& opts = GatherOptions()) override;
334
+
335
+ c10::intrusive_ptr<Work> scatter(
336
+ std::vector<at::Tensor>& outputs,
337
+ std::vector<std::vector<at::Tensor>>& inputs,
338
+ const ScatterOptions& opts = ScatterOptions()) override;
339
+
340
+ c10::intrusive_ptr<Work> reduce_scatter(
341
+ std::vector<at::Tensor>& outputs,
342
+ std::vector<std::vector<at::Tensor>>& inputs,
343
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
344
+
345
+ c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
346
+ std::vector<at::Tensor>& outputTensors,
347
+ std::vector<at::Tensor>& inputTensors,
348
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
349
+
350
+ c10::intrusive_ptr<Work> alltoall_base(
351
+ at::Tensor& outputTensor,
352
+ at::Tensor& inputTensor,
353
+ std::vector<int64_t>& outputCounts,
354
+ std::vector<int64_t>& inputCounts,
355
+ const AllToAllOptions& opts = AllToAllOptions()) override;
356
+
357
+ c10::intrusive_ptr<Work> send(
358
+ std::vector<at::Tensor>& tensors,
359
+ int dstRank,
360
+ int tag) override;
361
+
362
+ c10::intrusive_ptr<Work> recv(
363
+ std::vector<at::Tensor>& tensors,
364
+ int srcRank,
365
+ int tag) override;
366
+
367
+ c10::intrusive_ptr<Work> recvAnysource(
368
+ std::vector<at::Tensor>& tensors,
369
+ int tag) override;
370
+
371
+ c10::intrusive_ptr<Work> barrier(
372
+ const BarrierOptions& opts = BarrierOptions()) override;
373
+
374
+ void enableCollectivesTiming() override;
375
+
376
+ const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const {
377
+ return store_;
378
+ }
379
+
380
+ // Similar to barrier(), but blocks rank 0 until all other ranks have
381
+ // acknowledged that they are alive (through send/recv from rank 0). Rank 0
382
+ // is able to report all failed ranks if waitAllRanks = true, otherwise
383
+ // reports the first rank it detected as failed.
384
+ void monitoredBarrier(
385
+ const BarrierOptions& opts = BarrierOptions(),
386
+ bool waitAllRanks = false) override;
387
+
388
+ // Agrees on an initial sequence number for the whole group by having rank 0
389
+ // create it and broadcast it to other ranks using the store.
390
+ void setSequenceNumberForGroup() override;
391
+
392
+ // Retrieves the current sequence number for the whole group, which should be
393
+ // in sync. If the returned number is not consistent across the group, it
394
+ // may indicate that there is some sort of collective desynchronization.
395
+ uint64_t getSequenceNumberForGroup() override;
396
+
397
+ int getNumThreads() {
398
+ return options_->threads;
399
+ }
400
+
401
+ protected:
402
+ std::unique_ptr<::gloo::rendezvous::Store> store_;
403
+ const c10::intrusive_ptr<Options> options_;
404
+
405
+ // Every Gloo context represents a set of connections to its peers.
406
+ // In order to use more than one device (or allow for parallelism on
407
+ // a single device), you need multiple contexts.
408
+ std::vector<std::shared_ptr<::gloo::Context>> contexts_;
409
+ std::vector<std::thread> threads_;
410
+ bool stop_;
411
+
412
+ // Incremented for every collective we kick off.
413
+ // The value is used as tag for collective operations. Collectives are kicked
414
+ // off in identical order across processes. Therefore the tag can be used
415
+ // to match up operations during concurrent execution.
416
+ uint32_t collectiveCounter_;
417
+
418
+ // Returns next collective tag to use (uses collectiveCounter_).
419
+ uint32_t nextTag();
420
+
421
+ // Returns the context to use for the specified tag.
422
+ // With `nextTag` returning an increasing number, this should lead
423
+ // to contexts being used in a round-robin fashion.
424
+ std::shared_ptr<::gloo::Context> getContext(uint32_t tag);
425
+
426
+ // Entrypoint for worker threads.
427
+ void runLoop(int workerIndex);
428
+
429
+ // Queue work to run on worker thread.
430
+ void enqueue(c10::intrusive_ptr<AsyncWork> work);
431
+
432
+ // Keep both a queue of pending work, and a vector with in progress work.
433
+ // Both of these can only be mutated when holding the queue lock.
434
+ // We keep both around instead of just the queue, so we can grab a weak_ptr
435
+ // to all in progress and pending work when executing a barrier.
436
+ // When executing a barrier, we need to ensure that all prior work
437
+ // has completed before completing itself.
438
+ std::deque<c10::intrusive_ptr<AsyncWork>> workQueue_;
439
+ std::vector<c10::intrusive_ptr<AsyncWork>> workInProgress_;
440
+ std::mutex workMutex_;
441
+ std::condition_variable workProduceCV_;
442
+ std::condition_variable workConsumeCV_;
443
+ uint64_t seq_{0};
444
+ };
445
+
446
+ } // namespace c10d
447
+
448
+ #endif // USE_C10D_GLOO
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(__linux__)
4
+ #include <fcntl.h>
5
+ #include <sys/stat.h>
6
+ #include <sys/types.h>
7
+ #include <unistd.h>
8
+ #endif
9
+
10
+ #ifdef USE_C10D_NCCL
11
+
12
+ #include <atomic>
13
+ #include <chrono>
14
+ #include <future>
15
+ #include <iostream>
16
+ #include <list>
17
+ #include <mutex>
18
+ #include <thread>
19
+ #include <unordered_map>
20
+
21
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
22
+ #include <torch/csrc/distributed/c10d/NCCLUtils.hpp>
23
+ #include <torch/csrc/distributed/c10d/PrefixStore.hpp>
24
+ #include <torch/csrc/distributed/c10d/Store.hpp>
25
+ #include <torch/csrc/distributed/c10d/intra_node_comm.hpp>
26
+
27
+ #include <ATen/DynamicLibrary.h>
28
+ #include <ATen/cuda/CUDAContext.h>
29
+ #include <ATen/cuda/CUDAEvent.h>
30
+ #include <c10/core/Stream.h>
31
+ #include <c10/core/StreamGuard.h>
32
+ #include <c10/cuda/CUDACachingAllocator.h>
33
+ #include <c10/cuda/CUDAGuard.h>
34
+ #include <c10/cuda/CUDAStream.h>
35
+
36
+ #include <torch/custom_class.h>
37
+
38
+ namespace c10d {
39
+
40
+ // Control whether or not wait() is blocking or non-blocking.
41
+ static std::vector<std::string> TORCH_NCCL_BLOCKING_WAIT = {
42
+ "TORCH_NCCL_BLOCKING_WAIT",
43
+ "NCCL_BLOCKING_WAIT"};
44
+
45
+ // Control whether or not we perform Async Error Handling with NCCL.
46
+ static std::vector<std::string> TORCH_NCCL_ASYNC_ERROR_HANDLING = {
47
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING",
48
+ "NCCL_ASYNC_ERROR_HANDLING"};
49
+
50
+ // Control whether dumping debug info on watchdog
51
+ // timeout is enabled. This variable must be set together with
52
+ // TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0.
53
+ static std::vector<std::string> TORCH_NCCL_DUMP_ON_TIMEOUT = {
54
+ "TORCH_NCCL_DUMP_ON_TIMEOUT"};
55
+
56
+ // Control whether Desync Debug is enabled. This variable must be set
57
+ // together with TORCH_NCCL_ASYNC_ERROR_HANDLING.
58
+ static std::vector<std::string> TORCH_NCCL_DESYNC_DEBUG = {
59
+ "TORCH_NCCL_DESYNC_DEBUG",
60
+ "NCCL_DESYNC_DEBUG"};
61
+
62
+ // Enable recording start-events for all ProcessGroupNCCL collectives, and
63
+ // compute accurate collective timing per-collective. (Note: end-events are
64
+ // recorded by default. Turn on this flag can increase chances of a watchdog
65
+ // hang due to performing a CUDA event query which eventually calls
66
+ // cudaEventElapsedTime() API.
67
+ static std::vector<std::string> TORCH_NCCL_ENABLE_TIMING = {
68
+ "TORCH_NCCL_ENABLE_TIMING",
69
+ "NCCL_ENABLE_TIMING"};
70
+
71
+ // Enable monitoring thread which aborts the process when the ProcessGroupNCCL
72
+ // Watchdog thread gets stuck and no heartbeat is detected after
73
+ // TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC. This can happen due to calling CUDA/NCCL
74
+ // APIs that may hang. It is Useful to prevent jobs being stuck for a prolonged
75
+ // time than necessary tying up cluster resources.
76
+ static std::vector<std::string> TORCH_NCCL_ENABLE_MONITORING = {
77
+ "TORCH_NCCL_ENABLE_MONITORING"};
78
+
79
+ // Control the watchdog heartbeat timeout period after which the monitoring
80
+ // thread will abort the process.
81
+ static std::vector<std::string> TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = {
82
+ "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"};
83
+
84
+ // The maximum number of events we store in the flight recorder's ring buffer.
85
+ // (One event could be the start or end of a collective, for example).
86
+ static std::vector<std::string> TORCH_NCCL_TRACE_BUFFER_SIZE = {
87
+ "TORCH_NCCL_TRACE_BUFFER_SIZE"};
88
+
89
+ // Control how much extra time we will wait for dumping the debugging info
90
+ // before we exit and throws timeout exception.
91
+ static std::vector<std::string> TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC = {
92
+ "TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC"};
93
+
94
+ // Control the interval inside the watchdog thread to check the coordinated
95
+ // signal from other ranks, e.g. to dump the debugging information.
96
+ static std::vector<std::string> TORCH_NCCL_COORD_CHECK_MILSEC = {
97
+ "TORCH_NCCL_COORD_CHECK_MILSEC"};
98
+
99
+ // Whether to abort the communicators when users call destroy_process_group().
100
+ // If yes, communicators will be aborted when destroy_process_group is called,
101
+ // but not in destructor.
102
+ static std::vector<std::string> TORCH_NCCL_ABORT_IN_DESTROY_PG = {
103
+ "TORCH_NCCL_ABORT_IN_DESTROY_PG"};
104
+
105
+ constexpr const char* NCCL_BACKEND_NAME = "nccl";
106
+
107
+ constexpr const char* TIMEOUT_DUMP = "timeout_dump";
108
+
109
+ constexpr const int kWorkStatusUpdatePeriodMs = 10 * 1000; // 10 seconds
110
+
111
+ constexpr auto kProcessGroupNCCLDefaultTimeout =
112
+ std::chrono::milliseconds(10 * 60 * 1000);
113
+
114
+ // NoHandling: do not handle asynchronous NCCL errors
115
+ // TearDown: tear down process upon error, see `WorkNCCL::handleException`
116
+ // CleanUpOnly: just clean up collectives and abort communicators without
117
+ // tearing down process SkipCleanUp: (this is a temporary option and can be
118
+ // removed in future) tear down process without cleaning up NCCL communicators.
119
+ // This should be used as a last resort in case `ncclCommAbort` itself is
120
+ // hanging
121
+ enum ErrorHandlingMode {
122
+ NoHandling = 0,
123
+ TearDown = 1,
124
+ CleanUpOnly = 2,
125
+ SkipCleanUp = 3
126
+ };
127
+
128
+ #define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp)
129
+
130
+ #define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly)
131
+
132
+ #define PRINT_COLLECTIVE_HASH_SIGNATURE(phase, opType, numel, hashValue) \
133
+ LOG(WARNING) << logPrefix() << "Hash of " << phase << " to NCCL " << opType \
134
+ << " with size " << numel << " is " << hashValue;
135
+
136
+ // If set, ProcessGroupNCCL doesn't use recordStream calls to ensure
137
+ // caching allocator safety for tensors used on both user-facing and
138
+ // internal comm streams.
139
+ // Instead, it stashes live references to those tensors until after
140
+ // user-facing streams are synced with comm streams.
141
+ // See stashed_for_allocator_safety_ below.
142
+ static std::vector<std::string> TORCH_NCCL_AVOID_RECORD_STREAMS = {
143
+ "TORCH_NCCL_AVOID_RECORD_STREAMS"};
144
+
145
+ // If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache
146
+ // allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL
147
+ // can register/deregister the tensor on all available NCCL communicators.
148
+ static std::vector<std::string> TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK =
149
+ {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK",
150
+ "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"};
151
+
152
+ #if defined(__linux__)
153
+ struct DumpPipe {
154
+ DumpPipe(int rank) {
155
+ std::string fileStem =
156
+ getCvarString({"TORCH_NCCL_DEBUG_INFO_PIPE_FILE"}, "");
157
+ if (fileStem.empty() ||
158
+ getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0) <= 0) {
159
+ return;
160
+ }
161
+ TORCH_CHECK(!fileStem.empty(), "TORCH_NCCL_DEBUG_INFO_TEMP_FILE is empty");
162
+ std::string filename = c10::str(fileStem, rank, ".pipe");
163
+ TORCH_CHECK(
164
+ unlink(filename.c_str()) != -1 || errno == ENOENT,
165
+ "Error removing existing named pipe ",
166
+ filename);
167
+ TORCH_CHECK(
168
+ mkfifo(filename.c_str(), 0666) != -1,
169
+ "Error creating named pipe ",
170
+ filename);
171
+ fd_ = open(filename.c_str(), O_RDONLY | O_NONBLOCK);
172
+ LOG(INFO) << "Pipe file " << filename
173
+ << " has been opened, write to it to trigger NCCL Debug Dump.";
174
+ TORCH_CHECK(fd_ != -1, "Error opening named pipe ", filename);
175
+ }
176
+ bool shouldDump() {
177
+ if (fd_ == -1) {
178
+ return false;
179
+ }
180
+ char buf[128];
181
+ // non-blocking from O_NONBLOCK above.
182
+ // Ignore EINTR because we already will poll this
183
+ // again later.
184
+ ssize_t bytesRead = read(fd_, &buf, 128);
185
+ return bytesRead > 0;
186
+ }
187
+ ~DumpPipe() {
188
+ if (fd_ != -1) {
189
+ close(fd_);
190
+ }
191
+ }
192
+
193
+ private:
194
+ int fd_ = -1;
195
+ };
196
+ #else
197
+ struct DumpPipe {
198
+ DumpPipe(int rank) {}
199
+ bool shouldDump() {
200
+ return false;
201
+ }
202
+ };
203
+ #endif
204
+
205
+ // ProcessGroupNCCL implements NCCL bindings for c10d.
206
+ //
207
+ // All functions of the class are expected to be called in the same order
208
+ // across all processes in the process group. This is the only way that we
209
+ // can guarantee to match up the same calls among all processes.
210
+ //
211
+ // All NCCL functions provided by this class are asynchronous functions. More
212
+ // specifically, each NCCL call is scheduled on a separate CUDA stream that is
213
+ // different from the current CUDA stream. This is for the purpose of
214
+ // achieving potentially concurrency and better performance. As a result,
215
+ // it is the callers' responsibility to make sure that the CUDA stream their
216
+ // code works on needs to wait for the NCCL operation from
217
+ // this class.
218
+ //
219
+ // This can be done by calling:
220
+ //
221
+ // either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same
222
+ // functionality and are synonyms.
223
+ //
224
+ // Also note that WorkNCCL::finishedGPUExecution() is a helper function only
225
+ // provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has
226
+ // finished execution on the GPU (not just scheduled).
227
+ //
228
+ // Example on using the NCCL process group
229
+ //
230
+ // ProcessGroupNCCL pg(store, rank, size);
231
+ // std::shared_ptr<WorkNCCL> work = pg.allreduce(tensors);
232
+ //
233
+ // // At this point, NCCL kernel has already by queued successfully
234
+ // // Now, let current stream wait for the NCCL to finish, this function is
235
+ // // async operation as well
236
+ //
237
+ // work->wait()
238
+ //
239
+ // // Now continue on other work in the current stream.
240
+ class TORCH_API ProcessGroupNCCL : public Backend {
241
+ public:
242
+ class WorkNCCL : public Work, public std::enable_shared_from_this<WorkNCCL> {
243
+ public:
244
+ friend struct WorkInfo;
245
+
246
+ // Constructor takes a list of CUDA devices
247
+ WorkNCCL(
248
+ at::Device& device,
249
+ int rank,
250
+ OpType opType,
251
+ uint64_t seq,
252
+ const char* profilingTitle = nullptr,
253
+ const c10::optional<std::vector<at::Tensor>>& inputs = c10::nullopt,
254
+ bool desyncDebug = false,
255
+ bool enableTiming = false,
256
+ DebugLevel distDebugLevel = DebugLevel::Off);
257
+ // Copy constructor doing partial copy without outputs_. Cleanup thread
258
+ // monitors and removes finished works. However it will deadlock when
259
+ // destructs outputs_ tensors who are view tensors in autograd graph.
260
+ WorkNCCL(const WorkNCCL& w);
261
+
262
+ ~WorkNCCL() override;
263
+
264
+ // Checks if the NCCL kernel has started to execute.
265
+ bool isStarted();
266
+
267
+ // Checks if request has completed. In this specific case of NCCL, it checks
268
+ // if the NCCL operation has completed on the GPU in its own NCCL stream.
269
+ // Non-blocking operation.
270
+ bool isCompleted() override;
271
+
272
+ bool isSuccess() const override;
273
+
274
+ // Same as calling synchronize() for NCCL work.
275
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
276
+
277
+ void abort() override;
278
+
279
+ // Let current stream wait on the completing of the NCCL work
280
+ // Throws on exceptions. Blocking operation, which will wait for work
281
+ // completion.
282
+ void synchronize() override;
283
+
284
+ // Synchronize streams by blocking each on the NCCL stream
285
+ void synchronizeStream();
286
+
287
+ // Helper function to handle exception (throw if needed).
288
+ void handleException(ErrorHandlingMode asyncErrorHandling);
289
+
290
+ // Helper function that checks if the NCCL kernels have finished
291
+ // execution on the GPUs
292
+ bool finishedGPUExecution();
293
+
294
+ // Get a Future object that will be marked as completed internally.
295
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
296
+
297
+ float getDuration() const override;
298
+
299
+ uint64_t getSequencenumber() const override;
300
+
301
+ const std::string& logPrefix() const;
302
+
303
+ // Helper function that sets an exception_ptr on the WorkNCCL object.
304
+ void setException(std::exception_ptr exception_ptr);
305
+
306
+ // Helper function that returns True if the WorkNCCL object has timed out
307
+ // and False otherwise.
308
+ // In case of timeout, set exception on the WorkNCCL object.
309
+ bool checkTimeout(
310
+ c10::optional<std::chrono::milliseconds> timeout = c10::nullopt);
311
+
312
+ std::vector<at::Tensor> result() override;
313
+
314
+ protected:
315
+ // The cached list of CUDA devices to operate on
316
+ at::Device device_;
317
+
318
+ // The start CUDA event of NCCL operator tracking this work item. These
319
+ // start CUDA events are needed by desync debugging if enabled.
320
+ std::shared_ptr<at::cuda::CUDAEvent> ncclStartEvent_;
321
+
322
+ // The end CUDA event of NCCL operator tracking this work item.
323
+ std::shared_ptr<at::cuda::CUDAEvent> ncclEndEvent_;
324
+
325
+ // The NCCL communicator used for this work item.
326
+ std::shared_ptr<NCCLComm> ncclComm_;
327
+
328
+ // Tensors used for barrier op
329
+ at::Tensor barrierTensor_;
330
+
331
+ // Clone of blockingWait_ from ProcessGroupNCCL.
332
+ bool blockingWait_ = false;
333
+
334
+ // Clone of avoidRecordStreams_ from ProcessGroupNCCL.
335
+ bool avoidRecordStreams_ = false;
336
+
337
+ // Clone of opTimeout_ from ProcessGroupNCCL.
338
+ std::chrono::milliseconds opTimeout_;
339
+
340
+ // Time point representing when the work started.
341
+ std::chrono::time_point<std::chrono::steady_clock> workStartTime_;
342
+
343
+ // Record the collective sequential number.
344
+ uint64_t seq_;
345
+
346
+ // Indicates if the nccl start event has been updated to the store trace.
347
+ // This will be used by desync debug.
348
+ bool startTraceUpdated_{false};
349
+
350
+ // Record collective sizes for debug. We only record the size on the first
351
+ // device as multi-device per process is deprecated
352
+ size_t numelIn_ = -1;
353
+ size_t numelOut_ = -1;
354
+
355
+ // Wrapper method for the static checkForNCCLErrors which can be overridden
356
+ // for tests.
357
+ virtual std::exception_ptr checkForNCCLErrors();
358
+
359
+ friend std::ostream& operator<<(
360
+ std::ostream& output,
361
+ const WorkNCCL& workNCCL);
362
+
363
+ private:
364
+ // Helper function for synchronize
365
+ void synchronizeInternal(std::chrono::milliseconds timeout);
366
+
367
+ // Checks for NCCL errors and sets an appropriate exception_ptr.
368
+ void checkAndSetException();
369
+
370
+ // Just checks whether GPU execution has started, without modifying
371
+ // exception_ptr.
372
+ bool startedGPUExecutionInternal() const;
373
+
374
+ // Just checks whether GPU execution has completed, without modifying
375
+ // exception_ptr.
376
+ bool finishedGPUExecutionInternal() const;
377
+
378
+ // Reference to the store so that we can write aborted communicators
379
+ // to the store.
380
+ c10::intrusive_ptr<Store> store_;
381
+
382
+ // Store a reference to NCCL collective's outputs, used by result and to
383
+ // give a more descriptive message when representing the Work as a string.
384
+ std::shared_ptr<std::vector<at::Tensor>> outputs_;
385
+
386
+ // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper.
387
+ // Stores references to participating non-output tensors (ie inputs,
388
+ // flattened intermediates).
389
+ // We'll clear this list in synchronizeStream, just after user-facing
390
+ // stream(s) are synced with the nccl work stream(s).
391
+ // By keeping these refs (as well as outputs_) alive until after the
392
+ // collective's work rejoins the user-facing streams, we achieve
393
+ // caching allocator safety without any recordStream calls.
394
+ // For in-place collectives, some refs stashed here may alias outputs_,
395
+ // but that doesn't do any harm.
396
+ std::shared_ptr<std::vector<at::Tensor>> stashed_for_allocator_safety_;
397
+
398
+ // The future returned by getFuture.
399
+ c10::intrusive_ptr<at::ivalue::Future> future_;
400
+
401
+ bool timingEnabled_;
402
+ // unique id used to tell the trace buffer that this
403
+ // work has completed
404
+ c10::optional<uint64_t> trace_id_;
405
+ DebugLevel distDebugLevel_;
406
+ friend class ProcessGroupNCCL;
407
+ };
408
+
409
+ struct Options : Backend::Options {
410
+ // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for
411
+ // operations. This is only used when blockingWait_ is enabled.
412
+ explicit Options(bool is_high_priority_stream = false);
413
+
414
+ // return intrusive_ptr of the object
415
+ static c10::intrusive_ptr<Options> create(
416
+ bool is_high_priority_stream = false) {
417
+ return c10::make_intrusive<Options>(is_high_priority_stream);
418
+ }
419
+
420
+ // Schedule NCCL operations on high priority CUDA streams
421
+ bool is_high_priority_stream;
422
+
423
+ #ifdef NCCL_HAS_COMM_NONBLOCKING
424
+ // Configure ranks
425
+ ncclConfig_t config = NCCL_CONFIG_INITIALIZER;
426
+ #endif
427
+
428
+ // Optional "parent" backend and color to create communicators from
429
+ // via `ncclCommSplit`
430
+ std::shared_ptr<ProcessGroupNCCL> split_from;
431
+ int64_t split_color{0};
432
+ std::vector<uint64_t> global_ranks_in_group;
433
+ };
434
+
435
+ // If you wish to create multiple process groups, each with a potentially
436
+ // different rank and size, you can do so by passing a new store instance
437
+ // to each one. If you have only a single store object, you can
438
+ // use the `c10d::PrefixStore` to derive scoped instances.
439
+ // This is also what the Python API in torch.distributed does.
440
+ //
441
+ // The process group instance keeps a reference to the store because
442
+ // it may be used long after the constructor runs. In fact, the constructor
443
+ // doesn't create any NCCL communicators. A single NCCL communicator can
444
+ // only be used on a specific set of devices, and are therefore created
445
+ // on-demand when a collective runs. If another collective is executed later,
446
+ // against a different set of devices, the process group creates another NCCL
447
+ // communicator. These NCCL communicators are cached and reused if possible.
448
+ //
449
+ ProcessGroupNCCL(
450
+ const c10::intrusive_ptr<Store>& store,
451
+ int rank,
452
+ int size,
453
+ c10::intrusive_ptr<Options> options = Options::create());
454
+
455
+ // This constructor includes the deprecated `groupName` argument.
456
+ // If you have existing code that uses the `groupName`, you can replace
457
+ // it by specifying a `c10d::PrefixStore(groupName, store)` for store.
458
+ C10_DEPRECATED ProcessGroupNCCL(
459
+ const c10::intrusive_ptr<Store>& store,
460
+ int rank,
461
+ int size,
462
+ const std::string& groupName,
463
+ c10::intrusive_ptr<Options> options = Options::create())
464
+ : ProcessGroupNCCL(store, rank, size, options) {}
465
+
466
+ ~ProcessGroupNCCL() override;
467
+
468
+ uint64_t getUid() {
469
+ return static_cast<uint64_t>(uid_);
470
+ }
471
+
472
+ c10::intrusive_ptr<Options> getOptions() {
473
+ return options_;
474
+ }
475
+
476
+ const std::string getBackendName() const override {
477
+ return std::string(NCCL_BACKEND_NAME);
478
+ }
479
+
480
+ bool supportsSplitting() const override {
481
+ return true;
482
+ }
483
+
484
+ void startCoalescing() override;
485
+
486
+ c10::intrusive_ptr<Work> endCoalescing() override;
487
+
488
+ // For specifying a composite optype, such as ALLGATHER and REDUCE_SCATTER
489
+ c10::intrusive_ptr<Work> endCoalescing(OpType optype);
490
+
491
+ c10::intrusive_ptr<Work> broadcast(
492
+ std::vector<at::Tensor>& tensors,
493
+ const BroadcastOptions& opts = BroadcastOptions()) override;
494
+
495
+ c10::intrusive_ptr<Work> _broadcast_oop(
496
+ at::Tensor& outputTensors,
497
+ at::Tensor& inputTensors,
498
+ const BroadcastOptions& opts = BroadcastOptions());
499
+
500
+ c10::intrusive_ptr<Work> allreduce_sparse(
501
+ std::vector<at::Tensor>& tensors,
502
+ const AllreduceOptions& opts = AllreduceOptions()) override;
503
+
504
+ c10::intrusive_ptr<Work> allreduce(
505
+ std::vector<at::Tensor>& tensors,
506
+ const AllreduceOptions& opts = AllreduceOptions()) override;
507
+
508
+ c10::intrusive_ptr<Work> allreduce_coalesced(
509
+ std::vector<at::Tensor>& tensors,
510
+ const AllreduceCoalescedOptions& opts =
511
+ AllreduceCoalescedOptions()) override;
512
+
513
+ c10::intrusive_ptr<Work> reduce(
514
+ std::vector<at::Tensor>& tensors,
515
+ const ReduceOptions& opts = ReduceOptions()) override;
516
+
517
+ c10::intrusive_ptr<Work> _reduce_oop(
518
+ at::Tensor& outputTensors,
519
+ at::Tensor& inputTensors,
520
+ const ReduceOptions& opts = ReduceOptions());
521
+
522
+ c10::intrusive_ptr<Work> allgather(
523
+ std::vector<std::vector<at::Tensor>>& outputTensors,
524
+ std::vector<at::Tensor>& inputTensors,
525
+ const AllgatherOptions& opts = AllgatherOptions()) override;
526
+
527
+ c10::intrusive_ptr<Work> _allgather_base(
528
+ at::Tensor& outputbuffer,
529
+ at::Tensor& inputbuffer,
530
+ const AllgatherOptions& opts = AllgatherOptions()) override;
531
+
532
+ c10::intrusive_ptr<Work> allgather_coalesced(
533
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
534
+ std::vector<at::Tensor>& inputTensors,
535
+ const AllgatherOptions& opts = AllgatherOptions()) override;
536
+
537
+ c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
538
+ std::vector<at::Tensor>& outputs,
539
+ std::vector<at::Tensor>& inputs,
540
+ const AllgatherOptions& opts = AllgatherOptions()) override;
541
+
542
+ c10::intrusive_ptr<Work> reduce_scatter(
543
+ std::vector<at::Tensor>& outputTensors,
544
+ std::vector<std::vector<at::Tensor>>& inputTensors,
545
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
546
+
547
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
548
+ at::Tensor& outputTensor,
549
+ at::Tensor& inputTensor,
550
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
551
+
552
+ c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
553
+ std::vector<at::Tensor>& outputs,
554
+ std::vector<at::Tensor>& inputs,
555
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
556
+
557
+ c10::intrusive_ptr<Work> barrier(
558
+ const BarrierOptions& opts = BarrierOptions()) override;
559
+
560
+ c10::intrusive_ptr<Work> alltoall_base(
561
+ at::Tensor& outputTensor,
562
+ at::Tensor& inputTensor,
563
+ std::vector<int64_t>& outputSplitSizes,
564
+ std::vector<int64_t>& inputSplitSizes,
565
+ const AllToAllOptions& opts = AllToAllOptions()) override;
566
+
567
+ c10::intrusive_ptr<Work> alltoall(
568
+ std::vector<at::Tensor>& outputTensors,
569
+ std::vector<at::Tensor>& inputTensors,
570
+ const AllToAllOptions& opts = AllToAllOptions()) override;
571
+
572
+ c10::intrusive_ptr<Work> send(
573
+ std::vector<at::Tensor>& tensors,
574
+ int dstRank,
575
+ int tag) override;
576
+
577
+ c10::intrusive_ptr<Work> recv(
578
+ std::vector<at::Tensor>& tensors,
579
+ int srcRank,
580
+ int tag) override;
581
+
582
+ void groupStart();
583
+
584
+ void groupEnd();
585
+
586
+ void groupEndNonblocking(std::shared_ptr<NCCLComm> comm);
587
+
588
+ c10::intrusive_ptr<Work> gather(
589
+ std::vector<std::vector<at::Tensor>>& outputTensors,
590
+ std::vector<at::Tensor>& inputTensors,
591
+ const GatherOptions& opts = GatherOptions()) override;
592
+
593
+ c10::intrusive_ptr<Work> scatter(
594
+ std::vector<at::Tensor>& outputTensors,
595
+ std::vector<std::vector<at::Tensor>>& inputTensors,
596
+ const ScatterOptions& opts = ScatterOptions()) override;
597
+
598
+ // Unsupported Ops
599
+ c10::intrusive_ptr<Work> recvAnysource(
600
+ std::vector<at::Tensor>& tensors,
601
+ int tag) override;
602
+
603
+ // Agrees on an initial sequence number for the whole group by having rank 0
604
+ // create it and broadcast it to other ranks using the store.
605
+ void setSequenceNumberForGroup() override;
606
+
607
+ // Retrieves the current sequence number for the whole group, which should be
608
+ // in sync. If the returned number is not consistent across the group, it
609
+ // may indicate that there is some sort of collective desynchronization.
610
+ uint64_t getSequenceNumberForGroup() override;
611
+
612
+ // Return the total number of splits the communicators held by this process
613
+ // group have performed.
614
+ uint64_t getCommSplitCounter() const;
615
+
616
+ void registerOnCompletionHook(
617
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) override;
618
+ void waitForPendingWorks() override;
619
+
620
+ void enableCollectivesTiming() override;
621
+
622
+ // Helper function for iteratively aborting communicators in the provided map
623
+ void abortCommsFromMap(
624
+ std::unordered_map<std::string, std::shared_ptr<NCCLComm>>& ncclCommsMap,
625
+ c10::optional<std::string> abortReason);
626
+
627
+ c10::intrusive_ptr<intra_node_comm::IntraNodeComm> initIntraNodeComm();
628
+
629
+ // Provides an API to abort the ProcessGroup (similar to ncclCommAbort)
630
+ // instead of relying on ProcessGroupNCCL destructor.
631
+ // return true if abort is successful, otherwise false
632
+ bool abort(c10::optional<std::string> abortReason = c10::nullopt);
633
+
634
+ void shutdown(c10::optional<std::string> reason = c10::nullopt);
635
+
636
+ void eagerConnectSingleDevice(at::Device device) override;
637
+
638
+ void performNocolorSplit(at::Device device);
639
+
640
+ protected:
641
+ // Helper that broadcasts nccl unique ID to all ranks through the store
642
+ void broadcastUniqueNCCLID(
643
+ ncclUniqueId* ncclID,
644
+ bool isSingleP2POp,
645
+ const std::string& devicesKey,
646
+ int p2pRank);
647
+
648
+ // Helper that either looks up the cached NCCL communicators or creates
649
+ // a new set of NCCL communicators as a cache entry
650
+ std::shared_ptr<NCCLComm> getNCCLComm(
651
+ const std::string& deviceKey,
652
+ at::Device& device,
653
+ OpType opType,
654
+ int p2pRank = 0,
655
+ bool isSendRecvSelf = false);
656
+
657
+ // Wrapper method which can be overridden for tests.
658
+ virtual std::exception_ptr checkForNCCLErrors(
659
+ std::shared_ptr<NCCLComm>& ncclComm);
660
+
661
+ // Ensure thaht if record is True, the work obj will be enqueued via
662
+ // workEnqueue
663
+ virtual c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL> initWork(
664
+ at::Device& device,
665
+ int rank,
666
+ OpType opType,
667
+ const char* profilingTitle = nullptr,
668
+ const std::vector<at::Tensor>& inputs = {},
669
+ const std::vector<at::Tensor>& outputs = {},
670
+ bool record = false);
671
+
672
+ // In the timeout case and we will dump debug info such as the NCCL flight
673
+ // recorder to storage. Down the road, if we have more complicated or blocking
674
+ // operations, we might need to use a side thread to do it.
675
+ bool dumpDebuggingInfo();
676
+
677
+ private:
678
+ int globalRankStart;
679
+ int globalRankStride;
680
+
681
+ // Helper that encapsulates work shared across all collective communication
682
+ // primitives. The callbacks have the following signatures:
683
+ //
684
+ // ncclResult_t fn(at::Tensor& input, at::Tensor& output,
685
+ // ncclComm_t, at::cuda::CUDAStream&);
686
+ // void {pre,post}(std::vector<at::cuda::CUDAStream&>);
687
+ template <typename Fn>
688
+ c10::intrusive_ptr<Work> collective(
689
+ at::Tensor& input,
690
+ at::Tensor& output,
691
+ Fn fn,
692
+ OpType opType,
693
+ const char* profilingTitle = nullptr,
694
+ bool avoidRecordStreams = false);
695
+
696
+ template <typename Fn, typename PreProcess, typename PostProcess>
697
+ c10::intrusive_ptr<Work> collective(
698
+ at::Tensor& input,
699
+ at::Tensor& output,
700
+ Fn fn,
701
+ PreProcess pre,
702
+ PostProcess post,
703
+ OpType opType,
704
+ const char* profilingTitle = nullptr,
705
+ bool avoidRecordStreams = false);
706
+
707
+ template <typename Fn>
708
+ c10::intrusive_ptr<Work> collectiveCoalesced(
709
+ std::vector<at::Tensor>& input,
710
+ std::vector<at::Tensor>& output,
711
+ Fn fn,
712
+ OpType opType,
713
+ const char* profilingTitle = nullptr,
714
+ bool avoidRecordStreams = false);
715
+
716
+ // Helper that encapsulates work shared across point-to-point communication
717
+ // primitives. It is the same structure as the helper used for collective
718
+ // communication primitives.
719
+ template <typename Fn>
720
+ c10::intrusive_ptr<Work> pointToPoint(
721
+ at::Tensor& tensor,
722
+ Fn fn,
723
+ int peer,
724
+ OpType opType,
725
+ const char* profilingTitle = nullptr);
726
+
727
+ template <typename Fn, typename PreProcess, typename PostProcess>
728
+ c10::intrusive_ptr<Work> pointToPoint(
729
+ at::Tensor& tensor,
730
+ Fn fn,
731
+ int peer,
732
+ OpType opType,
733
+ PreProcess pre,
734
+ PostProcess post,
735
+ const char* profilingTitle);
736
+
737
+ c10::intrusive_ptr<Work> allreduce_impl(
738
+ at::Tensor& tensor,
739
+ const AllreduceOptions& opts = AllreduceOptions());
740
+
741
+ // Checks for NCCL errors on each of the communicators and returns an
742
+ // appropriate exception_ptr (nullptr if no errors).
743
+ static std::exception_ptr checkForNCCLErrorsInternal(
744
+ std::shared_ptr<NCCLComm>& ncclComm);
745
+
746
+ // Function that runs as part of a separate thread and checks for errors on
747
+ // NCCL communicators. We need a separate thread to check for NCCL errors
748
+ // since we can't rely on the user calling certain methods like wait(),
749
+ // isCompleted() etc. to detect and remediate errors. In addition to this, we
750
+ // need a mechanism to safely abort and remove NCCL communicators from our
751
+ // cache. This can be done cleanly by having a thread for the ProcessGroupNCCL
752
+ // class. Attempting to modify the communicator cache from the WorkNCCL class
753
+ // might run into issues with object lifetime since the ProcessGroupNCCL
754
+ // object might get destroyed before the WorkNCCL object.
755
+ void ncclCommWatchdog();
756
+
757
+ // Return the CUDA device most likely associated with this backend.
758
+ // If we aren't bound to a specific device, there is no strict
759
+ // guarantee that this heuristic is the correct assignment of ranks
760
+ // to GPUs that Python layers use, but in practice it tends to be.
761
+ // Fortunately we don't rely on this for correctness of any tensor
762
+ // operations, just for ancillary uses like barriers.
763
+ at::Device guessDeviceForRank() const;
764
+
765
+ // Destroys initialized NCCL communicators in devNCCLComMap_ given by input
766
+ // key. Throws if there are no communicators to destroy. Also removes
767
+ // communicators from the cache and clears used device indices.
768
+ void destroyNCCLComms(const std::string& devNCCLCommMapKey);
769
+
770
+ // Watchdog's inside loop.
771
+ // Takes care of cleaning up completed work, and aborting upon failure or
772
+ // timeout.
773
+ void watchdogHandler();
774
+
775
+ void runHookLoop();
776
+
777
+ // Desync debug helper
778
+ void logWorkStart(WorkNCCL& work);
779
+
780
+ // Desync debug helper
781
+ void logWorkEnd(WorkNCCL& work);
782
+
783
+ // Generates a prefix that is unique to this process group and rank, for
784
+ // disambiguating logs
785
+ std::string createLogPrefix() const;
786
+
787
+ // Returns the unique prefix created in createLogPrefix
788
+ const std::string& logPrefix() const;
789
+
790
+ // Returns the global rank of the device. This function assumes that users
791
+ // always create a default global process group(PG) which includes all
792
+ // devices. It is called in the constructor of ProcessGroupNCCL, so it always
793
+ // return the rank_ of the the very first PG created, aka, default global PG.
794
+ const int& globalRank() const;
795
+
796
+ // Returns the global ranks of a PG.
797
+ const std::vector<uint64_t>& groupRanks() const;
798
+
799
+ protected:
800
+ // Function that runs as part of a separate thread aside from watchdog
801
+ // thread because we need to check the heartbeat from watchdog thread
802
+ // so that when we get stuck in some NCCL/CUDA calls,
803
+ // we can dump the debugging information and abort the process.
804
+ virtual void heartbeatMonitor();
805
+
806
+ // Function that directly trigger std::abort so that the whole process
807
+ // gets terminated.
808
+ virtual void terminateProcess(std::string errMsg);
809
+
810
+ // A helper function to wait for a future to complete or timeout.
811
+ void waitForFutureOrTimeout(
812
+ std::future<bool>& fut,
813
+ const std::chrono::milliseconds& timeOutMilSec,
814
+ const std::string& futDescription,
815
+ bool throwException = false);
816
+
817
+ // When watchdog timeout, this function will be called and return debug info
818
+ // for users. For now we only get information from retrieveDesyncReport.
819
+ // We are working on enabling more useful debug information for watchdog
820
+ // timeout.
821
+ virtual std::string getNCCLWatchdogDebugInfo();
822
+
823
+ static const int64_t kWatchdogThreadSleepMillis;
824
+
825
+ // The store is used to broadcast the NCCL unique ID of rank 0. This store
826
+ // comes with prefix and it is different across ProcessGroup NCCL instances
827
+ // (aka, different ProcessGroups).
828
+ c10::intrusive_ptr<Store> store_;
829
+
830
+ // Reference to the store without prefix so that keys are same across all
831
+ // ProcessGroup NCCL instances and (key, value) pairs written to the store are
832
+ // global.
833
+ c10::intrusive_ptr<Store> globalStore_;
834
+
835
+ bool storeError_{false};
836
+
837
+ const c10::intrusive_ptr<Options> options_;
838
+
839
+ // The number of NCCL communicators that have been created during
840
+ // the lifetime of this process group. This sequence number is
841
+ // used to scope keys used in the store.
842
+ uint64_t ncclCommCounter_{0};
843
+
844
+ // The store keys to trace the last NCCL collective kernel CUDA events - start
845
+ // event and end event respectively. These are used to do desync root cause
846
+ // analysis.
847
+ const std::string traceKeyStart_;
848
+ const std::string traceKeyEnd_;
849
+
850
+ // The NCCL communicator that the process group has cached.
851
+ //
852
+ // For collective operations:
853
+ // The key is a list of GPU devices that an operation is operating on
854
+ // The GPU devices are stored in a device sequence and the cache NCCL
855
+ // communicator is associated with this GPU device sequence
856
+ //
857
+ // e.g. If the process group op only uses device 0, then the value of
858
+ // the used device string stored (value of the hashmap) would be "0".
859
+ //
860
+ // If the process group op uses device 0 - 7 and the each tensor of the
861
+ // input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately,
862
+ // then the value of the used device string (key) stored would be
863
+ // "0,1,2,3,4,5,6,7"
864
+ //
865
+ // If the process group op uses device 0 - 7 and the each tensor of the
866
+ // input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately,
867
+ // then the value of the used device string stored would be
868
+ // "0,4,5,6,7,1,2,3"
869
+ //
870
+ // Note that the order of the device for the tensor list matters.
871
+ //
872
+ // For point-to-point operations:
873
+ // The key is a string of my current rank and the peer process rank.
874
+ // e.g. If process 1 and process 2 are involved in a point-to-point
875
+ // communication, the key will be "1:2" on both processes. Note: this is for
876
+ // the scenario where there is only 1 GPU per process. When it comes to
877
+ // multiple GPUs per process, this part may need to redesigned.
878
+ std::unordered_map<std::string, std::shared_ptr<NCCLComm>> devNCCLCommMap_;
879
+
880
+ // The NCCL communicators currently in process of being initialized.
881
+ std::unordered_map<std::string, std::shared_ptr<NCCLComm>>
882
+ inInitializationCommMap_;
883
+
884
+ // Map from ncclUniqueId to appropriate communicator.
885
+ std::unordered_map<std::string, std::shared_ptr<NCCLComm>> ncclIdToCommMap_;
886
+
887
+ // Mutex to guard maps like devNCCLCommMap_ and ncclIdToCommMap_.
888
+ std::mutex mutex_;
889
+
890
+ // Heartbeat of watchdog thread.
891
+ std::atomic_uint64_t heartbeat_;
892
+
893
+ // The time interval used for deciding whether there is no watchdog heartbeat.
894
+ int heartbeatTimeoutInSec_;
895
+
896
+ // timeout for the dump to finish.
897
+ int waitTimeoutDumpInMilSec_;
898
+
899
+ // Interval of check coordinated signals in ProcessGroupNCCL from other ranks
900
+ // e.g., trigger the dump of the debugging info for timeout when notified.
901
+ int coordCheckIntervalMilSec_;
902
+
903
+ // Size of ring buffer where we store NCCL Traces for debugging.
904
+ int ncclTraceBufferSize_;
905
+
906
+ // We gate the heartbeat monitor thread so that we can roll it out gradually.
907
+ std::atomic<bool> monitorThreadEnabled_;
908
+
909
+ // Monitor thread which checks the heartbeat of Watchdog thread.
910
+ // If the monitor thread finds there is no heartbeat, it will dump debug info
911
+ // and then kill the watchdog thread to avoid hang.
912
+ std::thread ncclHeartbeatMonitorThread_;
913
+
914
+ // Watchdog thread which looks for errors on the cached NCCL communicators.
915
+ std::thread ncclCommWatchdogThread_;
916
+
917
+ std::thread onCompletionHookThread_;
918
+
919
+ // Whether or not we should terminate the watchdog and workCleanup threads.
920
+ std::atomic<bool> terminateProcessGroup_;
921
+
922
+ // Whether or not we should terminate the heartbeat monitoring threads.
923
+ std::atomic<bool> terminateHeartbeatMonitorThread_;
924
+
925
+ // Whether we are in the shutdown mode when we are trying to get debug info,
926
+ // such as desync report.
927
+ std::atomic<bool> collectiveDebugInfoMode_;
928
+
929
+ // Whether there are hooks pending to be fired
930
+ std::atomic<bool> hasPendingHooks_;
931
+
932
+ // This is the signal from watchdog threads to indicate whether the monitor
933
+ // thread should dump. Making it static so that it is accessiable from all the
934
+ // PGs. With this flag, monitor thread would dump debug info under any one of
935
+ // the 3 conditions: 1: this flag is set to true by the watchdog thread when
936
+ // it detects a timeout. 2: timeout signal is received from
937
+ // other ranks through tcpstore 3: no heartbeat of watchdog Note that only the
938
+ // monitor thread from PG0 should dump the debug info and only once
939
+ static std::atomic<bool> shouldDump_;
940
+
941
+ // Mutex to Guard workMetaList_
942
+ std::mutex workMetaListMutex_;
943
+
944
+ // Mutex to Guard monitorWakeUpCV_
945
+ std::mutex monitorMutex_;
946
+
947
+ bool writeDebugInfo_ = false;
948
+
949
+ // Condition Variable for watchdog thread sleep
950
+ std::condition_variable workMetaListCV_;
951
+
952
+ // Condition Variable for monitor thread to wake up early
953
+ std::condition_variable monitorWakeUpCV_;
954
+
955
+ // Vector to Store WorkNCCL pointers
956
+ std::list<ProcessGroupNCCL::WorkNCCL> workMetaList_;
957
+
958
+ std::chrono::time_point<std::chrono::steady_clock> lastWorkListUpdateTime_;
959
+
960
+ // Mutex to Guard workMetaList_
961
+ std::mutex completedWorkListMutex_;
962
+
963
+ // Condition Variable for watchdog thread sleep
964
+ std::condition_variable completedWorkListCV_;
965
+
966
+ std::list<ProcessGroupNCCL::WorkNCCL> completedWorkList_;
967
+
968
+ // Add Work Pointer to workVector
969
+ void workEnqueue(c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL>);
970
+
971
+ // The CUDA streams used by NCCL kernels
972
+ std::unordered_map<std::string, at::cuda::CUDAStream> ncclStreams_;
973
+
974
+ // The CUDA events used to sync NCCL streams
975
+ std::unordered_map<std::string, at::cuda::CUDAEvent> ncclEvents_;
976
+
977
+ // Device Indexes used for all collectives in this group
978
+ std::set<int> usedDeviceIdxs_;
979
+
980
+ // Flag to denote if a coalescing groupStart/groupEnd block is active
981
+ int coalescing_state_ = 0;
982
+
983
+ // Stores device indexes for all collectives run inside a coalescing block
984
+ std::vector<at::Device> coalescedDevices_;
985
+
986
+ // Stores communicators for all collectives run inside a coalescing block
987
+ std::vector<std::shared_ptr<NCCLComm>> coalescedComms_;
988
+
989
+ // map from the key: "group name + pg counter (ID)" to the
990
+ // unique NCCL ID count. This needs to be group and pg specific
991
+ //
992
+ // For each process group, we need a uniform unique NCCL ID counter to ensure
993
+ // that NCCL operation in this process group can be completed successfully.
994
+ // Since each process group ID belongs to a group name, the key to this map
995
+ // is a combination of group name and ProcessGroupNCCL ID.
996
+ static std::unordered_map<std::string, ssize_t> pgUniqueNCCLIDCnt_;
997
+
998
+ // map from group name to the pg counter (ID) within that group
999
+ //
1000
+ // For each group with the "group name" (which is the key), we need to
1001
+ // keep track of a unique process group ID when creating a new
1002
+ // ProcessGroupNCCL for this "group name". Therefore, the value of this
1003
+ // map keeps the unique ProcessGroupNCCL's ID for a specific group with
1004
+ // the "group name". The reason we need a per-group process group ID counter
1005
+ // is that different group can have different ranks and we need ensure that
1006
+ // each group has its own uniform process group ID for all its ranks.
1007
+ static std::unordered_map<std::string, ssize_t> processGroupCounterMap_;
1008
+
1009
+ // Whether or not wait() and synchronize() are blocking operations that wait
1010
+ // for the operation to complete.
1011
+ bool blockingWait_ = false;
1012
+
1013
+ // Whether to abort the communicators when users call destroy_process_group().
1014
+ // If yes, communicators will be aborted when destroy_process_group is called,
1015
+ // but not in destructor.
1016
+ bool abortInDestroyProcessGroup_ = false;
1017
+
1018
+ // Whether or not to hook the cache allocator to register all allocated
1019
+ // tensors
1020
+ bool useTensorRegisterAllocatorHook_ = false;
1021
+
1022
+ // Whether or not the workCleanupThread is used to perform async error
1023
+ // handling.
1024
+ ErrorHandlingMode asyncErrorHandling_ = NoHandling;
1025
+
1026
+ // Whether or not to enable timeout root cause analysis.
1027
+ bool desyncDebug_;
1028
+
1029
+ // Whether or not to dump debug info on timeout
1030
+ bool dumpOnTimeout_;
1031
+
1032
+ // Whether or not to create start CUDAEvent and enable timing for start
1033
+ // and end events. Note that enableTiming_ is always true if desyncDebug_
1034
+ // is set to true.
1035
+ std::atomic<bool> enableTiming_;
1036
+
1037
+ // Flag to enable the print of hash value of input/output of collectives for
1038
+ // verification.
1039
+ std::atomic<bool> enableCollecticeHashDebug_;
1040
+
1041
+ // Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set
1042
+ bool avoidRecordStreams_ = false;
1043
+
1044
+ // Set of communicators that this process group has aborted and their
1045
+ // ncclUniqueId has been written to the store. We don't need a lock
1046
+ // for this map since only the watchdog thread accesses this set. The
1047
+ // set contains the string representation of ncclUniqueId.
1048
+ std::unordered_set<std::string> abortedComms_;
1049
+
1050
+ // The number of active ncclGroupStart() calls. This counter will be increased
1051
+ // by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd()
1052
+ // is called.
1053
+ static thread_local uint64_t ncclActiveGroupCounter_;
1054
+
1055
+ // Counting for the sequential number of NCCL collective call.
1056
+ // (specifically, how many actual kernels we launched, which differs from
1057
+ // op_id_ when coalescing is enabled)
1058
+ uint64_t seq_{0};
1059
+
1060
+ // Incrementing counter for logical operations (collective or p2p) issued on
1061
+ // the ProcessGroup
1062
+ uint64_t op_id_{0};
1063
+
1064
+ // the sequential number of the last colletive enqueued into workMetaList_
1065
+ // This is useful for indentifying a rank that has not join a collective
1066
+ uint64_t lastEnqueuedSeq_;
1067
+
1068
+ // the sequential number of the last colletive completed marked by
1069
+ // the watchdog thread
1070
+ uint64_t lastCompletedSeq_;
1071
+
1072
+ std::exception_ptr watchDogException_ = nullptr;
1073
+
1074
+ size_t uid_;
1075
+
1076
+ std::string logPrefix_;
1077
+
1078
+ c10::intrusive_ptr<intra_node_comm::IntraNodeComm> intraNodeComm_;
1079
+ };
1080
+
1081
+ TORCH_API std::string dump_nccl_trace();
1082
+
1083
+ // Gets a mutable reference to a global optional function. Heartbeat Monitor
1084
+ // will query this function and if available, call it to dump traces. Inside
1085
+ // fbcode, we store a function here that uses an internal tool for process
1086
+ // tracing
1087
+ TORCH_API c10::optional<std::function<std::string()>>& get_cpp_trace_dumper();
1088
+
1089
+ // Similar to get_cpp_trace_dumper, this stores a function defined in
1090
+ // torch-python layer that lets us check whether the GIL can be acquired,
1091
+ // helpful for instrumenting in cases where a hang was observed.
1092
+ typedef bool (*gil_checker_t)();
1093
+
1094
+ TORCH_API gil_checker_t& get_gil_checker();
1095
+ } // namespace c10d
1096
+
1097
+ #endif // USE_C10D_NCCL
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
6
+
7
+ namespace c10d {
8
+
9
+ constexpr const char* ROUND_ROBIN_BACKEND_NAME = "round_robin";
10
+
11
+ // ProcessGroupRoundRobin implements simple load balancing.
12
+ //
13
+ // It is constructed with multiple processes groups. Each call is dispatched to
14
+ // one of the specified process groups in a round robin fashion. Each process
15
+ // group instance must have the same rank and size.
16
+ //
17
+ // All functions of the class are expected to be called in the same order
18
+ // across all processes in the process group. This is the only way that we
19
+ // can guarantee to match up the same calls among all processes.
20
+ //
21
+ class TORCH_API ProcessGroupRoundRobin final : public ProcessGroup {
22
+ public:
23
+ explicit ProcessGroupRoundRobin(
24
+ int rank,
25
+ int size,
26
+ std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups);
27
+
28
+ ~ProcessGroupRoundRobin() override;
29
+
30
+ const std::string getBackendName() const override {
31
+ return std::string(ROUND_ROBIN_BACKEND_NAME);
32
+ }
33
+
34
+ c10::intrusive_ptr<Work> broadcast(
35
+ std::vector<at::Tensor>& tensors,
36
+ const BroadcastOptions& opts = BroadcastOptions()) override;
37
+
38
+ c10::intrusive_ptr<Work> allreduce(
39
+ std::vector<at::Tensor>& tensors,
40
+ const AllreduceOptions& opts = AllreduceOptions()) override;
41
+
42
+ c10::intrusive_ptr<Work> allreduce_coalesced(
43
+ std::vector<at::Tensor>& tensors,
44
+ const AllreduceCoalescedOptions& opts =
45
+ AllreduceCoalescedOptions()) override;
46
+
47
+ c10::intrusive_ptr<Work> reduce(
48
+ std::vector<at::Tensor>& tensors,
49
+ const ReduceOptions& opts = ReduceOptions()) override;
50
+
51
+ c10::intrusive_ptr<Work> allgather(
52
+ std::vector<std::vector<at::Tensor>>& outputs,
53
+ std::vector<at::Tensor>& inputs,
54
+ const AllgatherOptions& opts = AllgatherOptions()) override;
55
+
56
+ c10::intrusive_ptr<Work> _allgather_base(
57
+ at::Tensor& outputBuffer,
58
+ at::Tensor& inputBuffer,
59
+ const AllgatherOptions& opts = AllgatherOptions()) override;
60
+
61
+ c10::intrusive_ptr<Work> allgather_coalesced(
62
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
63
+ std::vector<at::Tensor>& inputTensors,
64
+ const AllgatherOptions& opts = AllgatherOptions()) override;
65
+
66
+ c10::intrusive_ptr<Work> gather(
67
+ std::vector<std::vector<at::Tensor>>& outputs,
68
+ std::vector<at::Tensor>& inputs,
69
+ const GatherOptions& opts = GatherOptions()) override;
70
+
71
+ c10::intrusive_ptr<Work> scatter(
72
+ std::vector<at::Tensor>& outputs,
73
+ std::vector<std::vector<at::Tensor>>& inputs,
74
+ const ScatterOptions& opts = ScatterOptions()) override;
75
+
76
+ c10::intrusive_ptr<Work> reduce_scatter(
77
+ std::vector<at::Tensor>& outputs,
78
+ std::vector<std::vector<at::Tensor>>& inputs,
79
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
80
+
81
+ c10::intrusive_ptr<Work> alltoall_base(
82
+ at::Tensor& outputTensor,
83
+ at::Tensor& inputTensor,
84
+ std::vector<int64_t>& outputSplitSizes,
85
+ std::vector<int64_t>& inputSplitSizes,
86
+ const AllToAllOptions& opts = AllToAllOptions()) override;
87
+
88
+ c10::intrusive_ptr<Work> send(
89
+ std::vector<at::Tensor>& tensors,
90
+ int dstRank,
91
+ int tag) override;
92
+
93
+ c10::intrusive_ptr<Work> recv(
94
+ std::vector<at::Tensor>& tensors,
95
+ int srcRank,
96
+ int tag) override;
97
+
98
+ c10::intrusive_ptr<Work> recvAnysource(
99
+ std::vector<at::Tensor>& tensors,
100
+ int tag) override;
101
+
102
+ c10::intrusive_ptr<Work> barrier(
103
+ const BarrierOptions& opts = BarrierOptions()) override;
104
+
105
+ private:
106
+ std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups_;
107
+ std::vector<c10::intrusive_ptr<ProcessGroup>>::const_iterator iterator_;
108
+
109
+ // Returns the next ProcessGroup to use.
110
+ const c10::intrusive_ptr<ProcessGroup>& next();
111
+ };
112
+
113
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
6
+ #include <torch/csrc/distributed/c10d/Types.hpp>
7
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
8
+
9
+ namespace c10d {
10
+
11
+ class TORCH_API ProcessGroupWrapper : public Backend {
12
+ public:
13
+ explicit ProcessGroupWrapper(
14
+ c10::intrusive_ptr<Backend> backend,
15
+ c10::intrusive_ptr<Backend> glooBackend);
16
+
17
+ const std::string getBackendName() const override;
18
+
19
+ c10::intrusive_ptr<Work> broadcast(
20
+ std::vector<at::Tensor>& data,
21
+ const BroadcastOptions& opts = BroadcastOptions()) override;
22
+
23
+ c10::intrusive_ptr<Work> allreduce(
24
+ std::vector<at::Tensor>& data,
25
+ const AllreduceOptions& opts = AllreduceOptions()) override;
26
+
27
+ c10::intrusive_ptr<Work> allreduce_coalesced(
28
+ std::vector<at::Tensor>& tensors,
29
+ const AllreduceCoalescedOptions& opts =
30
+ AllreduceCoalescedOptions()) override;
31
+
32
+ c10::intrusive_ptr<Work> reduce(
33
+ std::vector<at::Tensor>& tensors,
34
+ const ReduceOptions& opts = ReduceOptions()) override;
35
+
36
+ c10::intrusive_ptr<Work> allgather(
37
+ std::vector<std::vector<at::Tensor>>& outputTensors,
38
+ std::vector<at::Tensor>& inputTensors,
39
+ const AllgatherOptions& opts = AllgatherOptions()) override;
40
+
41
+ c10::intrusive_ptr<Work> _allgather_base(
42
+ at::Tensor& outputBuffer,
43
+ at::Tensor& inputBuffer,
44
+ const AllgatherOptions& opts = AllgatherOptions()) override;
45
+
46
+ // This function is deprecated and will be moved out of ProcessGroup to comms:
47
+ // * do not add dependencies on this function,
48
+ // * do not implement it in your ProcessGroup, implement _allgather_base
49
+ // instead.
50
+ c10::intrusive_ptr<Work> allgather_coalesced(
51
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
52
+ std::vector<at::Tensor>& inputTensors,
53
+ const AllgatherOptions& opts = AllgatherOptions()) override;
54
+
55
+ c10::intrusive_ptr<Work> gather(
56
+ std::vector<std::vector<at::Tensor>>& outputTensors,
57
+ std::vector<at::Tensor>& inputTensors,
58
+ const GatherOptions& opts = GatherOptions()) override;
59
+
60
+ c10::intrusive_ptr<Work> scatter(
61
+ std::vector<at::Tensor>& outputTensors,
62
+ std::vector<std::vector<at::Tensor>>& inputTensors,
63
+ const ScatterOptions& opts = ScatterOptions()) override;
64
+
65
+ c10::intrusive_ptr<Work> reduce_scatter(
66
+ std::vector<at::Tensor>& outputTensors,
67
+ std::vector<std::vector<at::Tensor>>& inputTensors,
68
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
69
+
70
+ c10::intrusive_ptr<Work> alltoall_base(
71
+ at::Tensor& outputTensor,
72
+ at::Tensor& inputTensor,
73
+ std::vector<int64_t>& outputSplitSizes,
74
+ std::vector<int64_t>& inputSplitSizes,
75
+ const AllToAllOptions& opts = AllToAllOptions()) override;
76
+
77
+ c10::intrusive_ptr<Work> alltoall(
78
+ std::vector<at::Tensor>& outputTensors,
79
+ std::vector<at::Tensor>& inputTensors,
80
+ const AllToAllOptions& opts = AllToAllOptions()) override;
81
+
82
+ void monitoredBarrier(const BarrierOptions& opts, bool waitAllRanks = false)
83
+ override;
84
+
85
+ // Agrees on an initial sequence number for the whole group by having rank 0
86
+ // create it and broadcast it to other ranks using the store. Only implemented
87
+ // for GLOO and NCCL backends currently.
88
+ // dont implement this
89
+ void setSequenceNumberForGroup() override;
90
+
91
+ // Retrieves the current sequence number for the whole group, which should be
92
+ // in sync. If the returned number is not consistent across the group, it
93
+ // may indicate that there is some sort of collective desynchronization.
94
+ uint64_t getSequenceNumberForGroup() override; // just call underlying
95
+
96
+ c10::intrusive_ptr<Work> send(
97
+ std::vector<at::Tensor>& tensors,
98
+ int dstRank,
99
+ int tag) override;
100
+
101
+ c10::intrusive_ptr<Work> recv(
102
+ std::vector<at::Tensor>& tensors,
103
+ int srcRank,
104
+ int tag) override;
105
+
106
+ c10::intrusive_ptr<Work> recvAnysource(
107
+ std::vector<at::Tensor>& tensors,
108
+ int tag) override;
109
+
110
+ c10::intrusive_ptr<Work> barrier(
111
+ const BarrierOptions& opts = BarrierOptions()) override;
112
+
113
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
114
+ at::Tensor& outputBuffer,
115
+ at::Tensor& inputBuffer,
116
+ const ReduceScatterOptions& opts) override;
117
+
118
+ void startCoalescing() override;
119
+
120
+ c10::intrusive_ptr<Work> endCoalescing() override;
121
+
122
+ c10::intrusive_ptr<Backend> getWrappedPg() const;
123
+
124
+ private:
125
+ // Underlying process group that actual application collectives will be
126
+ // dispatched to
127
+ c10::intrusive_ptr<Backend> backend_;
128
+ // Gloo process group responsible for internal coordination such as monitored
129
+ // barrier, sequence number checking, collective fingerprint collecting.
130
+ c10::intrusive_ptr<Backend> glooBackend_;
131
+ // Conducts several checks to ensure that the underlying collective is well
132
+ // formed with the goal of notifying the user about incorrect collective use
133
+ // in the application.
134
+ void runCollectiveChecks(
135
+ OpType op_type,
136
+ const std::vector<at::Tensor>& tensors);
137
+ };
138
+ } // namespace c10d
139
+
140
+ #endif // USE_C10D_GLOO
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace c10d {
8
+
9
+ // PyProcessGroup is a pybind11 trampoline class to allow a Python
10
+ // class to inherit from torch.distributed.ProcessGroup
11
+ class PyProcessGroup : public ProcessGroup {
12
+ public:
13
+ // PyWork is a pybind11 trampoline class to allow a Python
14
+ // class to inherit from torch.distributed.Work
15
+ class PyWork : public Work {
16
+ public:
17
+ PyWork() = default;
18
+
19
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
20
+ PYBIND11_OVERRIDE(
21
+ bool, /* Return type */
22
+ Work, /* Parent class */
23
+ wait, /* Name of function in C++ */
24
+ timeout);
25
+ }
26
+
27
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
28
+ // We cannot use PYBIND11_OVERRIDE because:
29
+ // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and
30
+ // 2. The python name is get_future
31
+ pybind11::gil_scoped_acquire gil;
32
+ auto override =
33
+ pybind11::get_override(static_cast<const Work*>(this), "get_future");
34
+
35
+ if (override) {
36
+ py::object o = override();
37
+ auto futWrapper =
38
+ o.cast<std::shared_ptr<torch::jit::PythonFutureWrapper>>();
39
+ return futWrapper->fut;
40
+ }
41
+
42
+ return Work::getFuture();
43
+ }
44
+ };
45
+
46
+ using ProcessGroup::ProcessGroup;
47
+
48
+ const std::string getBackendName() const override {
49
+ PYBIND11_OVERRIDE_PURE(
50
+ std::string, /* Return type */
51
+ ProcessGroup, /* Parent class */
52
+ getBackendName, /* Name of function in C++ */
53
+ );
54
+ }
55
+
56
+ c10::intrusive_ptr<Work> allgather(
57
+ std::vector<std::vector<at::Tensor>>& outputTensors,
58
+ std::vector<at::Tensor>& inputTensors,
59
+ const AllgatherOptions& opts = AllgatherOptions()) override {
60
+ PYBIND11_OVERRIDE(
61
+ c10::intrusive_ptr<Work>, /* Return type */
62
+ ProcessGroup, /* Parent class */
63
+ allgather, /* Name of function in C++ */
64
+ outputTensors,
65
+ inputTensors,
66
+ opts);
67
+ }
68
+
69
+ c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
70
+ std::vector<at::Tensor>& outputTensors,
71
+ std::vector<at::Tensor>& inputTensors,
72
+ const AllgatherOptions& opts = AllgatherOptions()) override {
73
+ PYBIND11_OVERRIDE(
74
+ c10::intrusive_ptr<Work>, /* Return type */
75
+ ProcessGroup, /* Parent class */
76
+ allgather_into_tensor_coalesced, /* Name of function in C++ */
77
+ outputTensors,
78
+ inputTensors,
79
+ opts);
80
+ }
81
+
82
+ c10::intrusive_ptr<Work> allreduce(
83
+ std::vector<at::Tensor>& tensors,
84
+ const AllreduceOptions& opts = AllreduceOptions()) override {
85
+ PYBIND11_OVERRIDE(
86
+ c10::intrusive_ptr<Work>, /* Return type */
87
+ ProcessGroup, /* Parent class */
88
+ allreduce, /* Name of function in C++ */
89
+ tensors,
90
+ opts);
91
+ }
92
+
93
+ c10::intrusive_ptr<Work> allreduce_coalesced(
94
+ std::vector<at::Tensor>& tensors,
95
+ const AllreduceCoalescedOptions& opts =
96
+ AllreduceCoalescedOptions()) override {
97
+ PYBIND11_OVERRIDE(
98
+ c10::intrusive_ptr<Work>, /* Return type */
99
+ ProcessGroup, /* Parent class */
100
+ allreduce_coalesced, /* Name of function in C++ */
101
+ tensors,
102
+ opts);
103
+ }
104
+
105
+ c10::intrusive_ptr<Work> barrier(
106
+ const BarrierOptions& opts = BarrierOptions()) override {
107
+ PYBIND11_OVERRIDE(
108
+ c10::intrusive_ptr<Work>, /* Return type */
109
+ ProcessGroup, /* Parent class */
110
+ barrier, /* Name of function in C++ */
111
+ opts);
112
+ }
113
+
114
+ c10::intrusive_ptr<Work> broadcast(
115
+ std::vector<at::Tensor>& tensors,
116
+ const BroadcastOptions& opts = BroadcastOptions()) override {
117
+ PYBIND11_OVERRIDE(
118
+ c10::intrusive_ptr<Work>, /* Return type */
119
+ ProcessGroup, /* Parent class */
120
+ broadcast, /* Name of function in C++ */
121
+ tensors,
122
+ opts);
123
+ }
124
+
125
+ c10::intrusive_ptr<Work> reduce_scatter(
126
+ std::vector<at::Tensor>& outputTensors,
127
+ std::vector<std::vector<at::Tensor>>& inputTensors,
128
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
129
+ PYBIND11_OVERRIDE(
130
+ c10::intrusive_ptr<Work>, /* Return type */
131
+ ProcessGroup, /* Parent class */
132
+ reduce_scatter, /* Name of function in C++ */
133
+ outputTensors,
134
+ inputTensors,
135
+ opts);
136
+ }
137
+
138
+ c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
139
+ std::vector<at::Tensor>& outputTensors,
140
+ std::vector<at::Tensor>& inputTensors,
141
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
142
+ PYBIND11_OVERRIDE(
143
+ c10::intrusive_ptr<Work>, /* Return type */
144
+ ProcessGroup, /* Parent class */
145
+ reduce_scatter_tensor_coalesced, /* Name of function in C++ */
146
+ outputTensors,
147
+ inputTensors,
148
+ opts);
149
+ }
150
+
151
+ c10::intrusive_ptr<Work> send(
152
+ std::vector<at::Tensor>& tensors,
153
+ int dstRank,
154
+ int tag) override {
155
+ PYBIND11_OVERRIDE(
156
+ c10::intrusive_ptr<Work>, /* Return type */
157
+ ProcessGroup, /* Parent class */
158
+ send, /* Name of function in C++ */
159
+ tensors,
160
+ dstRank,
161
+ tag);
162
+ }
163
+
164
+ c10::intrusive_ptr<Work> recv(
165
+ std::vector<at::Tensor>& tensors,
166
+ int srcRank,
167
+ int tag) override {
168
+ PYBIND11_OVERRIDE(
169
+ c10::intrusive_ptr<Work>, /* Return type */
170
+ ProcessGroup, /* Parent class */
171
+ recv, /* Name of function in C++ */
172
+ tensors,
173
+ srcRank,
174
+ tag);
175
+ }
176
+ };
177
+
178
+ class TORCH_PYTHON_API PythonOnCompletionHook {
179
+ public:
180
+ // Wraps a py::object hook and acquires Python GIL in dtor before
181
+ // destructing the hook object.
182
+ PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {}
183
+
184
+ ~PythonOnCompletionHook() {
185
+ py::gil_scoped_acquire ag;
186
+ hook_.dec_ref();
187
+ // Explicitly set hook_ to nullptr to prevent py::object's dtor
188
+ // to decref on the PyObject again.
189
+ // See Note [Destructing py::object] in python_ivalue.h
190
+ hook_.ptr() = nullptr;
191
+ }
192
+
193
+ void operator()(std::shared_ptr<WorkInfo> workInfo) const {
194
+ std::exception_ptr eptr;
195
+ {
196
+ py::gil_scoped_acquire acquire;
197
+ try {
198
+ hook_(workInfo);
199
+ } catch (py::error_already_set& e) {
200
+ // py::error_already_set requires GIL to destruct, take
201
+ // special care.
202
+ eptr = std::make_exception_ptr(std::runtime_error(e.what()));
203
+ e.restore();
204
+ PyErr_Clear();
205
+ } catch (std::exception& e) {
206
+ eptr = std::current_exception();
207
+ }
208
+ }
209
+ // No more Python-related stuff at this point, i.e., this
210
+ // exception can be captured and handled by PG backend.
211
+ if (eptr)
212
+ std::rethrow_exception(eptr);
213
+ }
214
+
215
+ private:
216
+ py::object hook_;
217
+ };
218
+
219
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <shared_mutex>
5
+
6
+ #include <torch/csrc/autograd/function.h>
7
+
8
+ namespace c10d {
9
+
10
+ // `RankLocal` maintains a unique instance of T for each non-autograd thread.
11
+ // For non-autograd threads, `RankLocal<T>::get()` functions similar to
12
+ // thread_local. For autograd threads, `RankLocal<T>::get()` returns the
13
+ // instance of T corresponding to the enqueuing non-autograd thread. The
14
+ // mechanism allows for rank-specific context shared between forward and
15
+ // backward. It works for both the one-rank-per-process and one-rank-per-thread
16
+ // scenarios.
17
+ //
18
+ // NOTE: RankLocal doesn't make the underlying objects thread-safe.
19
+ template <typename T>
20
+ class RankLocal {
21
+ public:
22
+ RankLocal(const RankLocal&) = delete;
23
+ RankLocal& operator=(const RankLocal&) = delete;
24
+
25
+ static T& get() {
26
+ // Fast path: non-autograd threads can simply return
27
+ // the object reference cached in TLS.
28
+ if (cached_ != nullptr) {
29
+ return *cached_;
30
+ }
31
+ const auto node = torch::autograd::get_current_node();
32
+ auto fwd_thread_id = node == nullptr ? at::RecordFunction::currentThreadId()
33
+ : node->thread_id();
34
+ // Optimistically acquire the read lock first, since most likely we are in
35
+ // an autograd thread and the object has already been constructed.
36
+ {
37
+ std::shared_lock read_lock(lock_);
38
+ auto it = thread_id_to_rank_local_.find(fwd_thread_id);
39
+ if (it != thread_id_to_rank_local_.end()) {
40
+ // Cache for non-autograd threads
41
+ if (node == nullptr) {
42
+ cached_ = &it->second;
43
+ }
44
+ return it->second;
45
+ }
46
+ }
47
+
48
+ std::unique_lock write_lock(lock_);
49
+ auto [it, _] = thread_id_to_rank_local_.try_emplace(fwd_thread_id);
50
+ // Cache for non-autograd threads
51
+ if (node == nullptr) {
52
+ cached_ = &it->second;
53
+ }
54
+ return it->second;
55
+ }
56
+
57
+ private:
58
+ RankLocal(){};
59
+ thread_local static T* cached_;
60
+ static std::unordered_map<uint64_t, T> thread_id_to_rank_local_;
61
+ static std::shared_mutex lock_;
62
+ };
63
+
64
+ template <typename T>
65
+ thread_local T* RankLocal<T>::cached_ = nullptr;
66
+
67
+ template <typename T>
68
+ std::unordered_map<uint64_t, T> RankLocal<T>::thread_id_to_rank_local_;
69
+
70
+ template <typename T>
71
+ std::shared_mutex RankLocal<T>::lock_;
72
+
73
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <chrono>
4
+ #include <cstdint>
5
+ #include <stdexcept>
6
+ #include <string>
7
+ #include <vector>
8
+
9
+ #include <c10/macros/Macros.h>
10
+ #include <torch/custom_class.h>
11
+
12
+ namespace c10d {
13
+
14
+ // callback function will be given arguments (optional<string> oldValue,
15
+ // optional<string> newValue)
16
+ using WatchKeyCallback =
17
+ std::function<void(c10::optional<std::string>, c10::optional<std::string>)>;
18
+
19
+ class TORCH_API Store : public torch::CustomClassHolder {
20
+ public:
21
+ static constexpr std::chrono::milliseconds kDefaultTimeout =
22
+ std::chrono::seconds(300);
23
+ static constexpr std::chrono::milliseconds kNoTimeout =
24
+ std::chrono::milliseconds::zero();
25
+
26
+ Store() : timeout_(kDefaultTimeout) {}
27
+
28
+ explicit Store(const std::chrono::milliseconds& timeout)
29
+ : timeout_(timeout) {}
30
+
31
+ Store(const Store&) = default;
32
+ Store(Store&&) noexcept = default;
33
+
34
+ ~Store() override = default;
35
+
36
+ void set(const std::string& key, const std::string& value);
37
+
38
+ virtual void set(
39
+ const std::string& key,
40
+ const std::vector<uint8_t>& value) = 0;
41
+
42
+ std::string compareSet(
43
+ const std::string& key,
44
+ const std::string& currentValue,
45
+ const std::string& newValue);
46
+
47
+ virtual std::vector<uint8_t> compareSet(
48
+ const std::string& key,
49
+ const std::vector<uint8_t>& currentValue,
50
+ const std::vector<uint8_t>& newValue) {
51
+ TORCH_INTERNAL_ASSERT(false, "Not implemented.");
52
+ }
53
+
54
+ std::string get_to_str(const std::string& key);
55
+
56
+ virtual std::vector<uint8_t> get(const std::string& key) = 0;
57
+
58
+ virtual int64_t add(const std::string& key, int64_t value) = 0;
59
+
60
+ virtual bool deleteKey(const std::string& key) = 0;
61
+
62
+ virtual bool check(const std::vector<std::string>& keys) = 0;
63
+
64
+ virtual int64_t getNumKeys() = 0;
65
+
66
+ virtual void wait(const std::vector<std::string>& keys) = 0;
67
+
68
+ virtual void wait(
69
+ const std::vector<std::string>& keys,
70
+ const std::chrono::milliseconds& timeout) = 0;
71
+
72
+ virtual const std::chrono::milliseconds& getTimeout() const noexcept;
73
+
74
+ virtual void setTimeout(const std::chrono::milliseconds& timeout);
75
+
76
+ // watchKey() is deprecated and no longer supported.
77
+ virtual void watchKey(
78
+ const std::string& /* unused */,
79
+ WatchKeyCallback /* unused */) {
80
+ TORCH_CHECK(false, "watchKey is deprecated, no implementation support it.");
81
+ }
82
+
83
+ virtual void append(
84
+ const std::string& key,
85
+ const std::vector<uint8_t>& value);
86
+
87
+ virtual std::vector<std::vector<uint8_t>> multiGet(
88
+ const std::vector<std::string>& keys);
89
+
90
+ virtual void multiSet(
91
+ const std::vector<std::string>& keys,
92
+ const std::vector<std::vector<uint8_t>>& values);
93
+
94
+ // Returns true if this store support append, multiGet and multiSet
95
+ virtual bool hasExtendedApi() const;
96
+
97
+ protected:
98
+ std::chrono::milliseconds timeout_;
99
+ };
100
+
101
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <memory>
6
+
7
+ #include <torch/csrc/distributed/c10d/Store.hpp>
8
+
9
+ namespace c10d {
10
+ namespace detail {
11
+
12
+ class TCPServer;
13
+
14
+ class TCPClient;
15
+
16
+ struct SocketAddress {
17
+ std::string host{};
18
+ std::uint16_t port{};
19
+ };
20
+
21
+ class Counter {
22
+ public:
23
+ void update(double val);
24
+ std::unordered_map<std::string, double> observe() const;
25
+
26
+ double mean() const noexcept {
27
+ return mean_;
28
+ }
29
+ int64_t count() const noexcept {
30
+ return count_;
31
+ }
32
+ double variance() const noexcept {
33
+ return m2_ / count_;
34
+ }
35
+ double sample_variance() const noexcept {
36
+ return m2_ / (count_ - 1);
37
+ }
38
+
39
+ private:
40
+ int64_t count_ = 0;
41
+ double mean_ = 0;
42
+ double m2_ = 0;
43
+ };
44
+
45
+ } // namespace detail
46
+
47
+ struct TCPStoreOptions {
48
+ static constexpr std::uint16_t kDefaultPort = 29500;
49
+
50
+ std::uint16_t port = kDefaultPort;
51
+ bool isServer = false;
52
+ c10::optional<std::size_t> numWorkers = c10::nullopt;
53
+ bool waitWorkers = true;
54
+ std::chrono::milliseconds timeout = Store::kDefaultTimeout;
55
+
56
+ // A boolean value indicating whether multiple store instances can be
57
+ // initialized with the same host:port pair.
58
+ bool multiTenant = false;
59
+
60
+ // If specified, and if isServer is true, the underlying TCPServer will take
61
+ // over the bound socket associated to this fd. This option is useful to avoid
62
+ // port assignment races in certain scenarios.
63
+ c10::optional<int> masterListenFd = c10::nullopt;
64
+
65
+ // A boolean value indicating whether to use the experimental libUV backend.
66
+ bool useLibUV = false;
67
+ };
68
+
69
+ class TORCH_API TCPStore : public Store {
70
+ public:
71
+ explicit TCPStore(std::string host, const TCPStoreOptions& opts = {});
72
+
73
+ [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore(
74
+ const std::string& masterAddr,
75
+ std::uint16_t masterPort,
76
+ c10::optional<int> numWorkers = c10::nullopt,
77
+ bool isServer = false,
78
+ const std::chrono::milliseconds& timeout = kDefaultTimeout,
79
+ bool waitWorkers = true);
80
+
81
+ ~TCPStore() override;
82
+
83
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
84
+
85
+ std::vector<uint8_t> compareSet(
86
+ const std::string& key,
87
+ const std::vector<uint8_t>& expectedValue,
88
+ const std::vector<uint8_t>& desiredValue) override;
89
+
90
+ std::vector<uint8_t> get(const std::string& key) override;
91
+
92
+ int64_t add(const std::string& key, int64_t value) override;
93
+
94
+ bool deleteKey(const std::string& key) override;
95
+
96
+ bool check(const std::vector<std::string>& keys) override;
97
+
98
+ int64_t getNumKeys() override;
99
+
100
+ void wait(const std::vector<std::string>& keys) override;
101
+
102
+ void wait(
103
+ const std::vector<std::string>& keys,
104
+ const std::chrono::milliseconds& timeout) override;
105
+
106
+ void append(const std::string& key, const std::vector<uint8_t>& value)
107
+ override;
108
+
109
+ std::vector<std::vector<uint8_t>> multiGet(
110
+ const std::vector<std::string>& keys) override;
111
+
112
+ void multiSet(
113
+ const std::vector<std::string>& keys,
114
+ const std::vector<std::vector<uint8_t>>& values) override;
115
+
116
+ bool hasExtendedApi() const override;
117
+
118
+ // Waits for all workers to join.
119
+ void waitForWorkers();
120
+
121
+ // Returns the hostname used by the TCPStore.
122
+ const std::string& getHost() const noexcept {
123
+ return addr_.host;
124
+ }
125
+
126
+ // Returns the port used by the TCPStore.
127
+ std::uint16_t getPort() const noexcept {
128
+ return addr_.port;
129
+ }
130
+
131
+ std::unordered_map<std::string, std::unordered_map<std::string, double>>
132
+ collectClientCounters() const noexcept;
133
+
134
+ bool isLibUvBackend() const noexcept {
135
+ return usingLibUv_;
136
+ }
137
+
138
+ // note(xilunwu): this function is only for internal testing
139
+ void _splitSet(const std::string& key, const std::vector<uint8_t>& data);
140
+
141
+ private:
142
+ int64_t incrementValueBy(const std::string& key, int64_t delta);
143
+
144
+ void validate(void);
145
+
146
+ std::vector<uint8_t> doGet(const std::string& key);
147
+
148
+ void doWait(
149
+ c10::ArrayRef<std::string> keys,
150
+ std::chrono::milliseconds timeout);
151
+
152
+ detail::SocketAddress addr_;
153
+ std::shared_ptr<detail::TCPServer> server_;
154
+ std::unique_ptr<detail::TCPClient> client_;
155
+ c10::optional<std::size_t> numWorkers_;
156
+
157
+ const std::string initKey_ = "init/";
158
+ const std::string keyPrefix_ = "/";
159
+ std::mutex activeOpLock_;
160
+ std::unordered_map<std::string, detail::Counter> clientCounters_;
161
+ bool usingLibUv_ = false;
162
+ };
163
+
164
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h ADDED
@@ -0,0 +1,723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ApproximateClock.h>
4
+ #include <c10/util/irange.h>
5
+ #include <torch/csrc/distributed/c10d/Store.hpp>
6
+ #include <torch/csrc/distributed/c10d/Types.hpp>
7
+ #include <torch/csrc/jit/serialization/pickler.h>
8
+ #include <torch/csrc/profiler/combined_traceback.h>
9
+
10
+ #include <sys/types.h>
11
+
12
+ #include <cstdlib>
13
+ #include <string>
14
+ #include <system_error>
15
+ #include <vector>
16
+ namespace c10d {
17
+
18
+ /* Trace Utils Related to TORCH_NCCL_DESYNC_DEBUG */
19
+
20
+ inline std::string getTraceStartKey(const std::string& pgName, int rank) {
21
+ return pgName + "_" + std::to_string(rank) + "_trace_start";
22
+ }
23
+
24
+ inline std::string getTraceEndKey(const std::string& pgName, int rank) {
25
+ return pgName + "_" + std::to_string(rank) + "_trace_end";
26
+ }
27
+
28
+ inline bool traceUpdate(
29
+ c10::intrusive_ptr<Store>& store,
30
+ const std::string& key,
31
+ uint64_t seq,
32
+ const std::string& col) {
33
+ std::vector<uint8_t> value(col.size() + sizeof(seq) + 1);
34
+ memcpy(value.data(), &seq, sizeof(seq));
35
+ memcpy(value.data() + sizeof(seq), col.data(), col.size());
36
+ try {
37
+ store->set(key, value);
38
+ return true;
39
+ } catch (...) {
40
+ LOG(ERROR) << "Store is down while updating #" << seq << " with key "
41
+ << key;
42
+ return false;
43
+ }
44
+ return true;
45
+ }
46
+
47
+ enum TraceDebugEvent {
48
+ kEventStart,
49
+ kEventEnd,
50
+ };
51
+ // <seq, <rank, <col, start/end>>>
52
+ using TraceMap =
53
+ std::map<uint64_t, std::map<int, std::pair<std::string, TraceDebugEvent>>>;
54
+
55
+ inline std::string ranksToString(const std::vector<int>& ranks) {
56
+ std::string str;
57
+ for (int rank : ranks) {
58
+ if (str.empty()) {
59
+ str = std::to_string(rank);
60
+ } else {
61
+ str += ", " + std::to_string(rank);
62
+ }
63
+ }
64
+ return str;
65
+ }
66
+
67
+ inline std::string ranksFromTrace(
68
+ const std::vector<std::pair<int, std::string>>& items) {
69
+ std::string ranks;
70
+ for (auto& p : items) {
71
+ if (ranks.empty()) {
72
+ ranks = std::to_string(p.first);
73
+ } else {
74
+ ranks += ", " + std::to_string(p.first);
75
+ }
76
+ }
77
+ return ranks;
78
+ }
79
+
80
+ inline std::string analyzeMissingRanks(const std::vector<int>& missingRanks) {
81
+ return c10::str(
82
+ "\n\t - To our best knowledge, ranks [",
83
+ ranksToString(missingRanks),
84
+ "] are the lagging ranks that caused this timeout. "
85
+ "They never joined any collectives");
86
+ }
87
+
88
+ inline std::string analyzeLaggingRanks(const TraceMap& traceMap) {
89
+ uint64_t lagSeq = traceMap.begin()->first;
90
+ std::vector<int> startRanks;
91
+ std::vector<int> endRanks;
92
+ for (auto& p : traceMap.begin()->second) {
93
+ if (p.second.second == kEventStart) {
94
+ startRanks.push_back(p.first);
95
+ } else {
96
+ endRanks.push_back(p.first);
97
+ }
98
+ }
99
+ std::string report =
100
+ "\n\t - To our best knowledge, the lagging/dead/mismatched ranks "
101
+ "that caused the desync are:";
102
+ if (startRanks.size()) {
103
+ report += c10::str(
104
+ "\n\t - [",
105
+ ranksToString(startRanks),
106
+ "] joined but didn't finish collective #",
107
+ lagSeq,
108
+ " (count from 1)");
109
+ }
110
+ if (endRanks.size()) {
111
+ report += c10::str(
112
+ "\n\t [",
113
+ ranksToString(endRanks),
114
+ "] finished collective #",
115
+ lagSeq,
116
+ ", but didn't join collective #",
117
+ lagSeq + 1,
118
+ " (count from 1)");
119
+ }
120
+ return report;
121
+ }
122
+
123
+ inline std::string dumpSnapshot(TraceMap& traceMap) {
124
+ std::string report = "\n\t - Snapshot of ranks' latest states:";
125
+ for (auto& tracePair : traceMap) {
126
+ uint64_t seq = tracePair.first;
127
+ std::map<int, std::pair<std::string, TraceDebugEvent>>& subMap =
128
+ tracePair.second;
129
+
130
+ std::unordered_map<std::string, std::vector<int>> collectivesStart;
131
+ std::unordered_map<std::string, std::vector<int>> collectivesEnd;
132
+ for (auto& p : subMap) {
133
+ int rank = p.first;
134
+ const std::string& col = p.second.first;
135
+ if (p.second.second == kEventStart) {
136
+ collectivesStart[col].push_back(rank);
137
+ } else {
138
+ collectivesEnd[col].push_back(rank);
139
+ }
140
+ }
141
+
142
+ if (collectivesStart.size()) {
143
+ report += c10::str("\n\t #", seq, " started ranks:");
144
+ for (auto& mapPair : collectivesStart) {
145
+ report += c10::str(
146
+ "\n\t [",
147
+ ranksToString(mapPair.second),
148
+ "] started ",
149
+ mapPair.first);
150
+ }
151
+ }
152
+ if (collectivesEnd.size()) {
153
+ report += c10::str("\n\t #", seq, " finished ranks:");
154
+ for (auto& mapPair : collectivesEnd) {
155
+ report += c10::str(
156
+ "\n\t [",
157
+ ranksToString(mapPair.second),
158
+ "] finished ",
159
+ mapPair.first);
160
+ }
161
+ }
162
+ }
163
+ return report;
164
+ }
165
+
166
+ inline bool parseTraceValue(
167
+ c10::intrusive_ptr<Store>& store,
168
+ const std::string& key,
169
+ uint64_t& seq,
170
+ std::string& col) {
171
+ try {
172
+ std::vector<uint8_t> traceValue = store->get(key);
173
+ memcpy(&seq, traceValue.data(), sizeof(seq));
174
+ std::string colName((char*)traceValue.data() + sizeof(seq));
175
+ col = colName;
176
+ return true;
177
+ } catch (...) {
178
+ LOG(ERROR) << "Store is down while getting key " << key;
179
+ return false;
180
+ }
181
+ return true;
182
+ }
183
+
184
+ inline std::string retrieveDesyncReport(
185
+ c10::intrusive_ptr<Store>& store,
186
+ const std::string& pgName,
187
+ int myRank,
188
+ int worldSize) {
189
+ std::string report;
190
+
191
+ uint64_t thisSeq;
192
+ std::string thisCol;
193
+
194
+ std::vector<int> missingRanks;
195
+ TraceMap traceMap;
196
+
197
+ for (const auto rank : c10::irange(worldSize)) {
198
+ // Build traceMapStart.
199
+ uint64_t seqStart;
200
+ {
201
+ std::string traceKeyStart = getTraceStartKey(pgName, rank);
202
+ if (!store->check({traceKeyStart})) {
203
+ missingRanks.push_back(rank);
204
+ continue;
205
+ }
206
+ std::string col;
207
+ if (!parseTraceValue(store, traceKeyStart, seqStart, col)) {
208
+ return report;
209
+ }
210
+ traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart));
211
+ if (rank == myRank) {
212
+ thisSeq = seqStart;
213
+ thisCol = std::move(col);
214
+ }
215
+ }
216
+
217
+ // Build traceMapEnd.
218
+ {
219
+ std::string traceKeyEnd = getTraceEndKey(pgName, rank);
220
+ if (!store->check({traceKeyEnd})) {
221
+ continue;
222
+ }
223
+ uint64_t seq;
224
+ std::string col;
225
+ if (!parseTraceValue(store, traceKeyEnd, seq, col)) {
226
+ return report;
227
+ }
228
+ if (seq == seqStart) {
229
+ traceMap[seq][rank].second = kEventEnd;
230
+ }
231
+ }
232
+ }
233
+
234
+ TORCH_INTERNAL_ASSERT(
235
+ !missingRanks.empty() || !traceMap.empty(),
236
+ "Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG");
237
+ TORCH_INTERNAL_ASSERT(
238
+ !thisCol.empty(),
239
+ "Timeout rank [",
240
+ myRank,
241
+ "] must have collective tracking iteam in c10::Store trace");
242
+ TORCH_INTERNAL_ASSERT(
243
+ traceMap[thisSeq][myRank].second == kEventStart,
244
+ "Timeout rank [",
245
+ myRank,
246
+ "] last trace item must be kEventStart. thisSeq = ",
247
+ thisSeq,
248
+ ", col = ",
249
+ thisCol);
250
+
251
+ report += c10::str(
252
+ "\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq);
253
+
254
+ if (!missingRanks.empty()) {
255
+ report += analyzeMissingRanks(missingRanks);
256
+ } else {
257
+ report += analyzeLaggingRanks(traceMap);
258
+ report += dumpSnapshot(traceMap);
259
+ }
260
+
261
+ return report;
262
+ }
263
+
264
+ /* Trace Utils Related to Flight Recorder */
265
+
266
+ /* Note: this is only used by PGNCCL (could be generalized in an ideal world but
267
+ * wasn't done that way, so isn't expected to be fully general at the moment) */
268
+
269
+ #ifdef USE_C10D_NCCL
270
+
271
+ /* Helper used by work::getDuration() and nccl flight recorder */
272
+ float getDurationFromEvent(
273
+ at::cuda::CUDAEvent& ncclStartEvent,
274
+ at::cuda::CUDAEvent& ncclEndEvent) {
275
+ TORCH_CHECK(
276
+ ncclEndEvent.query(),
277
+ "getDuration can only be called after work is succeeded.")
278
+ return ncclStartEvent.elapsed_time(ncclEndEvent);
279
+ }
280
+
281
+ DebugInfoWriter::~DebugInfoWriter() = default;
282
+
283
+ void DebugInfoWriter::write(const std::string& ncclTrace) {
284
+ // Open a file for writing. The ios::binary flag is used to write data as
285
+ // binary.
286
+ std::ofstream file(filename_, std::ios::binary);
287
+
288
+ // Check if the file was opened successfully.
289
+ if (!file.is_open()) {
290
+ LOG(ERROR) << "Error opening file for writing NCCLPG debug info: "
291
+ << filename_;
292
+ return;
293
+ }
294
+
295
+ file.write(ncclTrace.data(), ncclTrace.size());
296
+ LOG(INFO) << "Finished writing NCCLPG debug info to " << filename_;
297
+ }
298
+
299
+ DebugInfoWriter& DebugInfoWriter::getWriter(int rank) {
300
+ if (writer_ == nullptr) {
301
+ std::string fileNamePrefix = getCvarString(
302
+ {"TORCH_NCCL_DEBUG_INFO_TEMP_FILE"}, "/tmp/nccl_trace_rank_");
303
+ // Using std::unique_ptr here to auto-delete the writer object
304
+ // when the pointer itself is destroyed.
305
+ std::unique_ptr<DebugInfoWriter> writerPtr(
306
+ new DebugInfoWriter(fileNamePrefix, rank));
307
+ DebugInfoWriter::registerWriter(std::move(writerPtr));
308
+ }
309
+ return *writer_;
310
+ }
311
+
312
+ void DebugInfoWriter::registerWriter(std::unique_ptr<DebugInfoWriter> writer) {
313
+ TORCH_CHECK_WITH(
314
+ DistBackendError,
315
+ hasWriterRegistered_.load() == false,
316
+ "debugInfoWriter already registered");
317
+ hasWriterRegistered_.store(true);
318
+ writer_ = std::move(writer);
319
+ }
320
+
321
+ std::unique_ptr<DebugInfoWriter> DebugInfoWriter::writer_ = nullptr;
322
+ std::atomic<bool> DebugInfoWriter::hasWriterRegistered_(false);
323
+
324
+ inline std::string pickle_str(const c10::IValue& v) {
325
+ std::vector<char> result;
326
+ {
327
+ auto writer = [&](const char* data, size_t size) {
328
+ result.insert(result.end(), data, data + size);
329
+ };
330
+ torch::jit::Pickler pickler(
331
+ writer, nullptr, nullptr, nullptr, nullptr, false);
332
+ pickler.protocol();
333
+ pickler.pushIValue(v);
334
+ pickler.stop();
335
+ }
336
+ return std::string(result.begin(), result.end());
337
+ }
338
+
339
+ inline std::string get_python_cpp_trace() {
340
+ // usage:
341
+ // LOG(INFO) << "stacktrace: "
342
+ // << get_python_cpp_trace();
343
+ // warn: might be slow in getting cpp traces
344
+ // because of slow/broken addr2line
345
+ // in different system libs
346
+ std::shared_ptr<torch::CapturedTraceback> tb =
347
+ torch::CapturedTraceback::gather(
348
+ /*python=*/true, /*script=*/true, /*cpp=*/true);
349
+ torch::SymbolizedTracebacks s_tbs = torch::symbolize({tb.get()});
350
+ const auto& s_tb = s_tbs.tracebacks.at(0);
351
+ std::stringstream oss;
352
+ for (auto idx : c10::irange(s_tb.size())) {
353
+ auto frame_id = s_tb[idx];
354
+ const auto& frame = s_tbs.all_frames.at(frame_id);
355
+ oss << "#" << idx << " " << frame.funcname << " from " << frame.filename
356
+ << ":" << frame.lineno << std::endl;
357
+ }
358
+ return oss.str();
359
+ }
360
+
361
+ inline c10::Dict<c10::IValue, c10::IValue> new_dict() {
362
+ return c10::Dict<c10::IValue, c10::IValue>(
363
+ c10::AnyType::get(), c10::AnyType::get());
364
+ }
365
+
366
+ inline c10::List<c10::IValue> new_list() {
367
+ return c10::List<c10::IValue>(c10::AnyType::get());
368
+ }
369
+
370
+ struct NCCLTraceBuffer {
371
+ static NCCLTraceBuffer* get() {
372
+ // intentionally leak on exit
373
+ // because this will hold python state that may get destructed
374
+ static NCCLTraceBuffer* instance = new NCCLTraceBuffer();
375
+ return instance;
376
+ }
377
+ NCCLTraceBuffer() {
378
+ max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0);
379
+ capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false);
380
+ enabled_ = max_entries_ > 0;
381
+ pg_id_to_ranks_ = {};
382
+ }
383
+ using Event = at::cuda::CUDAEvent;
384
+ struct Entry {
385
+ size_t id_; // incremented id in the trace buffer
386
+ // used to figure out where in the circular entries
387
+ // buffer this entry will be located to
388
+ // update state information
389
+ size_t pg_id_;
390
+
391
+ // Both seq_id_ and op_id_ are per_pg incrementing counters
392
+ // seq_id refers to actual kernel launches (e.g. 1 per coalesced group)
393
+ // op_id refers to logical operations (e.g. one per op inside coalesced
394
+ // group)
395
+ size_t seq_id_;
396
+ size_t op_id_;
397
+ std::string profiling_name_;
398
+
399
+ std::shared_ptr<torch::CapturedTraceback> traceback_;
400
+ // we borrow pointers to start_ and end_ so we can query the state
401
+ // on reporting. However, once the event is completed, the call
402
+ // to `complete` will clear these.
403
+ Event *start_, *end_;
404
+
405
+ // timestamp when the entry was created, likely close to the time the work
406
+ // was 'enqueued'- not necessarily started
407
+ c10::time_t time_created_;
408
+ c10::optional<float> duration_;
409
+
410
+ // timestamp when our CPU threads discovered that the kernel started.
411
+ // will always be _after_ it actually started, and can be very late
412
+ // if the watchdog thread got stuck on CUDA APIs.
413
+ c10::optional<c10::time_t> time_discovered_started_;
414
+
415
+ // timestamp when our CPU threads discovered that the kernel completed.
416
+ // will always be _after_ it actually complated, and can be the same time
417
+ // as the discovery of the start if the watchdog thread is stuck on CUDA
418
+ // APIs
419
+ c10::optional<c10::time_t> time_discovered_completed_;
420
+
421
+ // size information for input/output tensors
422
+ c10::SmallVector<int, 4> input_dims_;
423
+ c10::SmallVector<int, 4> output_dims_;
424
+ c10::SmallVector<int64_t, 8> sizes_; // flattened from inputs, outputs
425
+ bool retired_ = false; // is this work entry no longer in the workMetaList_?
426
+ // a retired but not completed event has timed out
427
+ };
428
+
429
+ bool enabled_ = false;
430
+ bool capture_cpp_stack_ = false;
431
+ std::mutex mutex_;
432
+ std::vector<Entry> entries_;
433
+ size_t max_entries_ = 0;
434
+ size_t next_ = 0;
435
+ size_t id_ = 0;
436
+ std::map<size_t, std::vector<uint64_t>> pg_id_to_ranks_;
437
+
438
+ c10::optional<size_t> record(
439
+ size_t pg_id,
440
+ size_t seq_id,
441
+ size_t op_id,
442
+ std::string profiling_name,
443
+ const std::vector<at::Tensor>& inputs,
444
+ const std::vector<at::Tensor>& outputs,
445
+ Event* start,
446
+ Event* end) {
447
+ if (!enabled_) {
448
+ return c10::nullopt;
449
+ }
450
+ auto traceback =
451
+ torch::CapturedTraceback::gather(true, true, capture_cpp_stack_);
452
+ std::lock_guard<std::mutex> guard(mutex_);
453
+
454
+ auto te = Entry{
455
+ id_,
456
+ pg_id,
457
+ seq_id,
458
+ op_id,
459
+ std::move(profiling_name),
460
+ std::move(traceback),
461
+ std::move(start),
462
+ std::move(end),
463
+ c10::getTime()};
464
+
465
+ for (const auto& input : inputs) {
466
+ c10::IntArrayRef sizes = input.sizes();
467
+ te.input_dims_.push_back(sizes.size());
468
+ te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
469
+ }
470
+
471
+ for (const auto& output : outputs) {
472
+ c10::IntArrayRef sizes = output.sizes();
473
+ te.output_dims_.push_back(sizes.size());
474
+ te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
475
+ }
476
+
477
+ if (entries_.size() < max_entries_) {
478
+ entries_.emplace_back(std::move(te));
479
+ } else {
480
+ entries_[next_++] = std::move(te);
481
+ if (next_ == max_entries_) {
482
+ next_ = 0;
483
+ }
484
+ }
485
+ return id_++;
486
+ }
487
+
488
+ void record_pg_ranks(size_t pg_id, std::vector<uint64_t> ranks) {
489
+ if (!enabled_) {
490
+ return;
491
+ }
492
+ std::lock_guard<std::mutex> guard(mutex_);
493
+ pg_id_to_ranks_[pg_id] = ranks;
494
+ }
495
+
496
+ void update_state(Entry& r) {
497
+ if (r.start_ != nullptr) {
498
+ bool started = r.start_->query();
499
+ if (started && !r.time_discovered_started_) {
500
+ r.time_discovered_started_ = c10::getTime();
501
+ }
502
+ }
503
+ if (r.end_ != nullptr) {
504
+ bool completed = r.end_->query();
505
+ if (completed && !r.time_discovered_completed_) {
506
+ r.time_discovered_completed_ = c10::getTime();
507
+ }
508
+ }
509
+ }
510
+
511
+ std::vector<Entry> dump_entries() {
512
+ std::lock_guard<std::mutex> guard(mutex_);
513
+ std::vector<Entry> result;
514
+ result.reserve(entries_.size());
515
+ result.insert(result.end(), entries_.begin() + next_, entries_.end());
516
+ result.insert(result.end(), entries_.begin(), entries_.begin() + next_);
517
+ // query any remaining events
518
+ for (auto& r : result) {
519
+ update_state(r);
520
+ r.start_ = r.end_ = nullptr;
521
+ }
522
+ return result;
523
+ }
524
+
525
+ /*
526
+ Mark an Event as completed and free its events.
527
+
528
+ This is called by the watchdog thread, and is asynchronous from the
529
+ perspective of the main thread.
530
+
531
+ compute_duration defaults to true since retire_id is only called in the
532
+ watchdog thread, which is currently a place we call cuda APIs which may hang,
533
+ but care should be taken to avoid computing duration in any function that must
534
+ never hang. (timing must also be enabled for compute_duration - see
535
+ TORCH_NCCL_ENABLE_TIMING).
536
+ */
537
+ void retire_id(c10::optional<size_t> id, bool compute_duration = true) {
538
+ if (!enabled_ || !id) {
539
+ return;
540
+ }
541
+
542
+ bool can_compute_duration = false;
543
+ Event* startEvent = nullptr;
544
+ Event* endEvent = nullptr;
545
+ c10::optional<float> duration = c10::nullopt;
546
+
547
+ std::unique_lock<std::mutex> guard(mutex_);
548
+
549
+ Entry* entry = &entries_.at(*id % max_entries_);
550
+ if (entry->id_ == *id) {
551
+ update_state(*entry);
552
+
553
+ if (compute_duration) {
554
+ can_compute_duration = entry->time_discovered_completed_.has_value() &&
555
+ entry->start_ && entry->end_;
556
+ startEvent = entry->start_;
557
+ endEvent = entry->end_;
558
+ }
559
+ }
560
+
561
+ if (can_compute_duration) {
562
+ // Compute duration without without holding the lock, because
563
+ // cudaEventDuration() can hang, and we need to acquire the lock before we
564
+ // can dump(), which we never want to block.
565
+ guard.unlock();
566
+ duration = getDurationFromEvent(*startEvent, *endEvent);
567
+ guard.lock();
568
+
569
+ // Refresh the entry pointer, see if the entry has been overwritten
570
+ entry = &entries_.at(*id % max_entries_);
571
+ if (entry->id_ != *id) {
572
+ LOG(INFO)
573
+ << "retire_id abandoned for id " << *id
574
+ << ", event was overwritten while waiting to compute duration.";
575
+ return;
576
+ }
577
+ if (duration.has_value()) {
578
+ entry->duration_ = duration.value();
579
+ }
580
+ }
581
+
582
+ entry->retired_ = true;
583
+ entry->start_ = entry->end_ = nullptr;
584
+ }
585
+
586
+ std::string dump(
587
+ const c10::optional<std::unordered_map<
588
+ std::string,
589
+ std::unordered_map<std::string, std::string>>>& ncclDumpMap) {
590
+ auto result = dump_entries();
591
+ auto entries = new_list();
592
+ c10::IValue entries_key = "entries";
593
+ c10::IValue nccl_comm_key = "nccl_comm_state";
594
+ c10::IValue version_key = "version";
595
+ // Update whenever changing contents or formatting of the dump
596
+ // (minor when adding fields, major when changing existing fields)
597
+ c10::IValue version_val = "1.4";
598
+ c10::IValue pg_config_key = "pg_config";
599
+ c10::IValue record_id_key = "record_id";
600
+ c10::IValue pg_id_key = "pg_id";
601
+ c10::IValue seq_id_key = "seq_id";
602
+ c10::IValue op_id_key = "op_id";
603
+ c10::IValue profiling_name_key = "profiling_name";
604
+ c10::IValue input_sizes_key = "input_sizes";
605
+ c10::IValue output_sizes_key = "output_sizes";
606
+ c10::IValue time_created_key = "time_created_ns";
607
+ c10::IValue duration_key = "duration_ms";
608
+
609
+ c10::IValue frames_key = "frames";
610
+ c10::IValue state_key = "state";
611
+ c10::IValue line_key = "line";
612
+ c10::IValue name_key = "name";
613
+ c10::IValue filename_key = "filename";
614
+ c10::IValue retired_key = "retired";
615
+ c10::IValue time_discovered_started_key = "time_discovered_started_ns";
616
+ c10::IValue time_discovered_completed_key = "time_discovered_completed_ns";
617
+
618
+ std::vector<torch::CapturedTraceback*> tracebacks;
619
+ for (auto& e : result) {
620
+ tracebacks.push_back(e.traceback_.get());
621
+ }
622
+ torch::SymbolizedTracebacks stracebacks = torch::symbolize(tracebacks);
623
+ std::vector<c10::IValue> all_frames;
624
+ for (const auto& f : stracebacks.all_frames) {
625
+ auto d = new_dict();
626
+ d.insert(name_key, f.funcname);
627
+ d.insert(filename_key, f.filename);
628
+ d.insert(line_key, int64_t(f.lineno));
629
+ all_frames.emplace_back(std::move(d));
630
+ }
631
+
632
+ for (auto i : c10::irange(result.size())) {
633
+ auto& e = result.at(i);
634
+ auto& tb = stracebacks.tracebacks.at(i);
635
+ auto dict = new_dict();
636
+ dict.insert(record_id_key, int64_t(e.id_));
637
+ dict.insert(pg_id_key, int64_t(e.pg_id_));
638
+ dict.insert(seq_id_key, int64_t(e.seq_id_));
639
+ dict.insert(op_id_key, int64_t(e.op_id_));
640
+ dict.insert(profiling_name_key, e.profiling_name_);
641
+ dict.insert(time_created_key, int64_t(e.time_created_));
642
+ if (e.duration_) {
643
+ dict.insert(duration_key, *e.duration_);
644
+ }
645
+
646
+ auto it = e.sizes_.begin();
647
+ auto read_sizes = [&](const c10::SmallVector<int, 4>& dims) {
648
+ auto sizes = new_list();
649
+ for (auto dim : dims) {
650
+ auto arg_sizes = new_list();
651
+ for (auto i : c10::irange(dim)) {
652
+ (void)i;
653
+ arg_sizes.push_back(*it++);
654
+ }
655
+ sizes.push_back(arg_sizes);
656
+ }
657
+ return sizes;
658
+ };
659
+
660
+ dict.insert(input_sizes_key, read_sizes(e.input_dims_));
661
+ dict.insert(output_sizes_key, read_sizes(e.output_dims_));
662
+ if (e.time_discovered_completed_.has_value()) {
663
+ dict.insert(state_key, "completed");
664
+ } else if (e.time_discovered_started_.has_value()) {
665
+ dict.insert(state_key, "started");
666
+ } else {
667
+ dict.insert(state_key, "scheduled");
668
+ }
669
+
670
+ dict.insert(
671
+ time_discovered_started_key,
672
+ e.time_discovered_started_.has_value()
673
+ ? int64_t(*e.time_discovered_started_)
674
+ : c10::IValue());
675
+ dict.insert(
676
+ time_discovered_completed_key,
677
+ e.time_discovered_completed_.has_value()
678
+ ? int64_t(*e.time_discovered_completed_)
679
+ : c10::IValue());
680
+ dict.insert(retired_key, e.retired_);
681
+
682
+ auto frames = new_list();
683
+ for (int64_t frame : tb) {
684
+ frames.push_back(all_frames.at(frame));
685
+ }
686
+ dict.insert(frames_key, frames);
687
+ entries.push_back(dict);
688
+ }
689
+ auto pg_config = new_dict();
690
+ for (const auto& [pg_id, ranks] : pg_id_to_ranks_) {
691
+ auto pg_ranks = new_list();
692
+ for (const auto& rank : ranks) {
693
+ pg_ranks.push_back(static_cast<int>(rank));
694
+ }
695
+ pg_config.insert(static_cast<int>(pg_id), pg_ranks);
696
+ }
697
+
698
+ // convert ncclDumpMap into a dictionary
699
+ auto per_comm_dict = new_dict();
700
+ if (ncclDumpMap.has_value()) {
701
+ for (const auto& [ncclId, ncclDump] : ncclDumpMap.value()) {
702
+ auto inner_dict = new_dict();
703
+ for (const auto& [key, value] : ncclDump) {
704
+ inner_dict.insert(key, value);
705
+ }
706
+ per_comm_dict.insert(ncclId, inner_dict);
707
+ }
708
+ }
709
+
710
+ auto dict = new_dict();
711
+ dict.insert(entries_key, entries);
712
+ dict.insert(version_key, version_val);
713
+ if (per_comm_dict.size() > 0) {
714
+ dict.insert(nccl_comm_key, per_comm_dict);
715
+ }
716
+ dict.insert(pg_config_key, pg_config);
717
+
718
+ return pickle_str(dict);
719
+ }
720
+ };
721
+
722
+ #endif
723
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/UCCUtils.hpp>
6
+
7
+ namespace c10d {
8
+
9
+ #define RECORD_COMMS_TRACE( \
10
+ _comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \
11
+ do { \
12
+ if (torch_ucc_config.enable_comms_logger) { \
13
+ _comms_tracer->recordComms( \
14
+ opTypeToString(_opType), \
15
+ (uintptr_t)_work.get(), \
16
+ _rank, \
17
+ _comm_size, \
18
+ _inTensors, \
19
+ _outTensors); \
20
+ } \
21
+ } while (0)
22
+
23
+ // interfaces to collect communication traces
24
+ class TORCH_API CommTraceLogger : public torch::CustomClassHolder {
25
+ private:
26
+ std::vector<std::string> comms_trace_;
27
+ std::vector<std::string> curBlocks_; /* unused */
28
+ std::vector<int64_t> curOutSplitSizes_;
29
+ std::vector<int64_t> curInSplitSizes_;
30
+ int curRoot_ = -1;
31
+ unsigned long seqnum = 0;
32
+
33
+ public:
34
+ void setCurBlock(const std::string& name); /* unused */
35
+ void popBlock(); /* unused */
36
+ // record root info if applicable, e.g., broadcast, gather, scatter
37
+ void recordOptionalInfo(int root = -1);
38
+ // record input/output splits of Alltoallv
39
+ void recordOptionalInfo(
40
+ const std::vector<int64_t>& outputSplitSizes = {},
41
+ const std::vector<int64_t>& inputSplitSizes = {});
42
+ // record essential comms information
43
+ void recordComms(
44
+ const std::string& collName,
45
+ const uintptr_t workReq = 0,
46
+ const int rank = -1,
47
+ const int world_size = -1,
48
+ const std::vector<at::Tensor>& inputTensors = {},
49
+ const std::vector<at::Tensor>& outputTensor = {});
50
+ // return collected comms traces
51
+ std::vector<std::string>& getCommsTrace() {
52
+ return comms_trace_;
53
+ }
54
+ };
55
+
56
+ } // namespace c10d
57
+
58
+ #endif // USE_C10D_UCC
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
6
+ #include <torch/csrc/distributed/c10d/Store.hpp>
7
+ #include <ucc/api/ucc.h>
8
+
9
+ namespace c10d {
10
+
11
+ // Macro to generate the error message on a non-successful UCC return value.
12
+ #define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \
13
+ do { \
14
+ _err = c10::str( \
15
+ "[", \
16
+ std::string(__FILE__), \
17
+ ":", \
18
+ std::to_string(__LINE__), \
19
+ "] ", \
20
+ logger->getLogPrefix(), \
21
+ _error_msg, \
22
+ ", error code ", \
23
+ _result, \
24
+ ": ", \
25
+ ucc_status_string(_result), \
26
+ ", system error code ", \
27
+ errno); \
28
+ } while (0)
29
+
30
+ // Macro to throw on a non-successful UCC return value.
31
+ #define TORCH_UCC_CHECK(_cmd, _error_msg) \
32
+ do { \
33
+ ucc_status_t result = _cmd; \
34
+ if (result != UCC_OK) { \
35
+ std::string err; \
36
+ TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
37
+ TORCH_CHECK(false, err); \
38
+ } \
39
+ } while (0)
40
+
41
+ // Macro and throw on a non-successful UCC return value and free its request.
42
+ #define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \
43
+ do { \
44
+ ucc_status_t result = _cmd; \
45
+ if (result != UCC_OK) { \
46
+ std::string err; \
47
+ TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
48
+ if (_request != nullptr) { \
49
+ ucc_collective_finalize(_request); \
50
+ } \
51
+ TORCH_CHECK(false, err); \
52
+ } \
53
+ } while (0)
54
+
55
+ // Macros to print logs with unified format
56
+ #define TORCH_UCC_LOG_ERROR(_phase, _msg) \
57
+ LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg;
58
+ #define TORCH_UCC_LOG_INFO(_phase, _msg) \
59
+ LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg;
60
+ #define TORCH_UCC_LOG_DEBUG(_phase, _msg) \
61
+ VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg;
62
+
63
+ enum torch_ucc_phase_t {
64
+ TORCH_UCC_UNKNOWN = -1,
65
+ TORCH_UCC_INIT,
66
+ TORCH_UCC_HEALTH_CHECK,
67
+ TORCH_UCC_READY,
68
+ TORCH_UCC_COLL_POST,
69
+ TORCH_UCC_COLL_PROGRESS,
70
+ TORCH_UCC_FINALIZE,
71
+ };
72
+
73
+ const std::map<torch_ucc_phase_t, std::string> ucc_phase_map = {
74
+ {TORCH_UCC_UNKNOWN, "UNKNOWN"},
75
+ {TORCH_UCC_INIT, "INIT"},
76
+ {TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"},
77
+ {TORCH_UCC_READY, "READY"},
78
+ {TORCH_UCC_COLL_POST, "COLL_POST"},
79
+ {TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"},
80
+ {TORCH_UCC_FINALIZE, "FINALIZE"},
81
+ };
82
+
83
+ class CommTraceLogger;
84
+
85
+ class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder {
86
+ public:
87
+ ProcessGroupUCCLogger();
88
+ ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase);
89
+
90
+ std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN);
91
+ void setLogPrefix(std::string log_prefix);
92
+ inline void setPhase(torch_ucc_phase_t phase) {
93
+ local_phase = phase;
94
+ }
95
+
96
+ void initCommsTracer();
97
+ void flushComms(int rank, int world_size);
98
+ std::shared_ptr<CommTraceLogger> trace_generator = nullptr;
99
+
100
+ protected:
101
+ std::string log_prefix;
102
+ torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN;
103
+ bool initialized_CommTraceLogger = false;
104
+ };
105
+
106
+ struct torch_ucc_oob_coll_info_t {
107
+ c10::intrusive_ptr<Store> store;
108
+ uint32_t comm_id;
109
+ int rank;
110
+ int size;
111
+ void* rbuf;
112
+ size_t msglen;
113
+ std::string getKey(std::string key) {
114
+ return std::to_string(comm_id) + key;
115
+ }
116
+ };
117
+
118
+ class CommBase {
119
+ public:
120
+ CommBase(const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger_)
121
+ : logger(logger_) {}
122
+ virtual void progress() = 0;
123
+ virtual void free_request(ucc_coll_req_h request) = 0;
124
+ virtual ~CommBase() {}
125
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
126
+ };
127
+ class CommUCC : public CommBase {
128
+ public:
129
+ ucc_lib_h lib{nullptr};
130
+ ucc_context_h context{nullptr};
131
+
132
+ public:
133
+ void progress() override;
134
+ CommUCC(
135
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
136
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger);
137
+ void free_request(ucc_coll_req_h request) override;
138
+ ~CommUCC();
139
+ };
140
+
141
+ ucc_status_t oob_allgather(
142
+ void* sbuf,
143
+ void* rbuf,
144
+ size_t msglen,
145
+ void* coll_info,
146
+ void** req);
147
+
148
+ ucc_status_t oob_allgather_test(void* req);
149
+
150
+ ucc_status_t oob_allgather_free(void* req);
151
+
152
+ // trim: remove spaces before and after the string view
153
+ // implementation borrowed from https://stackoverflow.com/a/17976541
154
+ inline c10::string_view trim(c10::string_view s) {
155
+ auto wsfront = std::find_if_not(
156
+ s.begin(), s.end(), [](int c) { return std::isspace(c); });
157
+ auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) {
158
+ return std::isspace(c);
159
+ }).base();
160
+ return (
161
+ wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront));
162
+ }
163
+
164
+ inline std::string tolower(c10::string_view s) {
165
+ std::string result;
166
+ result.reserve(s.size());
167
+ for (auto c : s) {
168
+ result.push_back(std::tolower(c));
169
+ }
170
+ return result;
171
+ }
172
+
173
+ inline std::vector<std::string> parse_list(std::string list) {
174
+ std::vector<std::string> result;
175
+ list = tolower(trim(list));
176
+ while (!list.empty()) {
177
+ const auto end_pos = list.find_first_of(',');
178
+ const auto token = trim(list.substr(0, end_pos));
179
+ result.push_back(std::string(token));
180
+ list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : "";
181
+ }
182
+ return result;
183
+ }
184
+
185
+ } // namespace c10d
186
+
187
+ #endif // USE_C10D_UCC
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
4
+
5
+ namespace c10d {
6
+ namespace tcputil {
7
+
8
+ #define CONNECT_SOCKET_OFFSET 2
9
+
10
+ inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) {
11
+ return ::poll(fds, nfds, timeout);
12
+ }
13
+
14
+ inline void addPollfd(
15
+ std::vector<struct pollfd>& fds,
16
+ int socket,
17
+ short events) {
18
+ fds.push_back({.fd = socket, .events = events});
19
+ }
20
+
21
+ inline struct ::pollfd getPollfd(int socket, short events) {
22
+ struct ::pollfd res = {.fd = socket, .events = events};
23
+ return res;
24
+ }
25
+
26
+ } // namespace tcputil
27
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/accumulate.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/distributed/c10d/Types.hpp>
8
+
9
+ #ifdef _WIN32
10
+ #include <winsock2.h>
11
+ #include <ws2tcpip.h>
12
+ typedef SSIZE_T ssize_t;
13
+ #pragma comment(lib, "Ws2_32.lib")
14
+ #else
15
+ #include <fcntl.h>
16
+ #include <netdb.h>
17
+ #include <sys/poll.h>
18
+ #include <sys/socket.h>
19
+ #include <unistd.h>
20
+ #endif
21
+
22
+ #include <sys/types.h>
23
+
24
+ #include <chrono>
25
+ #include <cstdint>
26
+ #include <cstdlib>
27
+ #include <functional>
28
+ #include <limits>
29
+ #include <string>
30
+ #include <system_error>
31
+ #include <tuple>
32
+ #include <vector>
33
+
34
+ namespace c10d {
35
+
36
+ TORCH_API size_t getTensorsNumel(const std::vector<at::Tensor>& tensors);
37
+
38
+ // Retrieve tensor shapes from a given tensor.
39
+ TORCH_API std::vector<at::Tensor> getTensorShapes(
40
+ const std::vector<at::Tensor>& tensors);
41
+
42
+ // Use -2 to represent unset state of env vars
43
+ #define C10D_ENV_NOT_SET -2
44
+
45
+ // Turns at::IntArrayRef into "(1, 2, 3, 4)".
46
+ inline std::string toString(at::IntArrayRef l) {
47
+ std::stringstream ss;
48
+ ss << "(";
49
+ for (const auto i : c10::irange(l.size())) {
50
+ if (i > 0) {
51
+ ss << ", ";
52
+ }
53
+ ss << l[i];
54
+ }
55
+ ss << ")";
56
+ return ss.str();
57
+ }
58
+
59
+ inline std::string toString(const c10::Layout& layout) {
60
+ std::stringstream ss;
61
+ ss << layout;
62
+ return ss.str();
63
+ }
64
+
65
+ inline void assertSameType(
66
+ const at::DeprecatedTypeProperties& type,
67
+ const std::vector<at::Tensor>& tensors) {
68
+ for (const auto i : c10::irange(tensors.size())) {
69
+ if (!tensors[i].options().type_equal(type.options())) {
70
+ const std::string expected = type.toString();
71
+ const std::string actual = tensors[i].toString();
72
+ throw std::invalid_argument(
73
+ "mixed types (" + expected + " and " + actual + ")");
74
+ }
75
+ }
76
+ }
77
+
78
+ inline std::vector<std::string> split(
79
+ char separator,
80
+ const std::string& string) {
81
+ std::vector<std::string> pieces;
82
+ std::stringstream ss(string);
83
+ std::string item;
84
+ while (std::getline(ss, item, separator)) {
85
+ pieces.push_back(std::move(item));
86
+ }
87
+ return pieces;
88
+ }
89
+
90
+ inline std::string getCvarString(
91
+ const std::vector<std::string>& env,
92
+ const char* def) {
93
+ const char* ret = def;
94
+
95
+ if (env.empty()) {
96
+ TORCH_CHECK(false, "No environment variables passed");
97
+ return ret;
98
+ }
99
+
100
+ /* parse environment variable in reverse order, so the early
101
+ * versions of a variable get higher priority than the latter
102
+ * versions of the same variable */
103
+ for (int i = env.size() - 1; i >= 0; i--) {
104
+ const char* val = std::getenv(env[i].c_str());
105
+ if (val == nullptr) {
106
+ continue;
107
+ } else if (i) {
108
+ TORCH_WARN(
109
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
110
+ " instead");
111
+ }
112
+
113
+ ret = val;
114
+ }
115
+
116
+ return ret;
117
+ }
118
+
119
+ inline int getCvarInt(const std::vector<std::string>& env, int def) {
120
+ int ret = def;
121
+
122
+ if (env.empty()) {
123
+ TORCH_CHECK(false, "No environment variables passed");
124
+ return ret;
125
+ }
126
+
127
+ /* parse environment variable in reverse order, so the early
128
+ * versions of a variable get higher priority than the latter
129
+ * versions of the same variable */
130
+ for (int i = env.size() - 1; i >= 0; i--) {
131
+ char* val = std::getenv(env[i].c_str());
132
+ if (val == nullptr) {
133
+ continue;
134
+ } else if (i) {
135
+ TORCH_WARN(
136
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
137
+ " instead");
138
+ }
139
+
140
+ try {
141
+ ret = std::stoi(val);
142
+ } catch (std::exception& e) {
143
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
144
+ }
145
+ }
146
+
147
+ return ret;
148
+ }
149
+
150
+ inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
151
+ bool ret = def;
152
+
153
+ if (env.empty()) {
154
+ TORCH_CHECK(false, "No environment variables passed");
155
+ return ret;
156
+ }
157
+
158
+ /* parse environment variable in reverse order, so the early
159
+ * versions of a variable get higher priority than the latter
160
+ * versions of the same variable */
161
+ for (int i = env.size() - 1; i >= 0; i--) {
162
+ char* val_ = std::getenv(env[i].c_str());
163
+ if (val_ == nullptr) {
164
+ continue;
165
+ } else if (i) {
166
+ TORCH_WARN(
167
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
168
+ " instead");
169
+ }
170
+
171
+ std::string val = std::string(val_);
172
+ for (auto& x : val) {
173
+ x = std::tolower(x);
174
+ }
175
+
176
+ if (val == "y" || val == "yes" || val == "1" || val == "t" ||
177
+ val == "true") {
178
+ ret = true;
179
+ } else if (
180
+ val == "n" || val == "no" || val == "0" || val == "f" ||
181
+ val == "false") {
182
+ ret = false;
183
+ } else {
184
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
185
+ return ret;
186
+ }
187
+ }
188
+
189
+ return ret;
190
+ }
191
+
192
+ inline void assertSameSizes(
193
+ const at::IntArrayRef& sizes,
194
+ const std::vector<at::Tensor>& tensors) {
195
+ for (const auto i : c10::irange(tensors.size())) {
196
+ if (!tensors[i].sizes().equals(sizes)) {
197
+ const auto expected = toString(sizes);
198
+ const auto actual = toString(tensors[i].sizes());
199
+ throw std::invalid_argument(
200
+ "mixed sizes (" + expected + " and " + actual + ")");
201
+ }
202
+ }
203
+ }
204
+
205
+ inline void assertSameSizeAndType(const std::vector<at::Tensor>& tensors) {
206
+ // Ensure we have at least one tensor
207
+ if (tensors.empty()) {
208
+ throw std::invalid_argument("argument is empty");
209
+ }
210
+
211
+ // Ensure all tensors have identical type and shape
212
+ auto options = tensors[0].options();
213
+ auto sizes = tensors[0].sizes();
214
+ for (const auto i : c10::irange(1, tensors.size())) {
215
+ if (!tensors[i].options().type_equal(options)) {
216
+ const auto expected = toString(options);
217
+ const auto actual = toString(tensors[i].options());
218
+ throw std::invalid_argument(
219
+ "argument contains mixed types (" + expected + " and " + actual +
220
+ ")");
221
+ }
222
+ if (!tensors[i].sizes().equals(sizes)) {
223
+ const auto expected = toString(sizes);
224
+ const auto actual = toString(tensors[i].sizes());
225
+ throw std::invalid_argument(
226
+ "argument contains mixed sizes (" + expected + " and " + actual +
227
+ ")");
228
+ }
229
+ }
230
+ }
231
+
232
+ inline void assertTypeMatch(
233
+ std::function<void(const std::string&)> fn,
234
+ const at::DeprecatedTypeProperties& type,
235
+ const at::ArrayRef<at::Tensor> tensors,
236
+ size_t index) {
237
+ if (!tensors[index].options().type_equal(type.options())) {
238
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
239
+ type.toString() + ", got " + tensors[index].toString() + ")");
240
+ }
241
+ }
242
+
243
+ inline void assertTypeMatch(
244
+ std::function<void(const std::string&)> fn,
245
+ const at::TensorOptions& options,
246
+ const at::ArrayRef<at::Tensor> tensors,
247
+ size_t index) {
248
+ if (!tensors[index].options().type_equal(options)) {
249
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
250
+ toString(options) + ", got " + toString(tensors[index].options()) + ")");
251
+ }
252
+ }
253
+
254
+ inline void assertSizesMatch(
255
+ std::function<void(const std::string&)> fn,
256
+ const at::IntArrayRef& sizes,
257
+ const at::ArrayRef<at::Tensor> tensors,
258
+ size_t index) {
259
+ if (tensors[index].sizes() != sizes) {
260
+ fn("invalid tensor size at index " + std::to_string(index) + " (expected " +
261
+ toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")");
262
+ }
263
+ }
264
+
265
+ inline void assertLayoutMatch(
266
+ std::function<void(const std::string&)> fn,
267
+ const c10::Layout& expected,
268
+ const at::ArrayRef<at::Tensor> tensors,
269
+ size_t index) {
270
+ const auto& actual = tensors[index].layout();
271
+ if (actual != expected) {
272
+ fn("invalid tensor layout at index " + std::to_string(index) +
273
+ " (expected " + toString(expected) + ", got " + toString(actual) + ")");
274
+ }
275
+ }
276
+
277
+ inline void assertLayoutMatch(
278
+ std::function<void(const std::string&)> fn,
279
+ const at::ArrayRef<at::Tensor> tensors) {
280
+ const auto& layout = tensors[0].layout();
281
+ for (const auto i : c10::irange(1, tensors.size())) {
282
+ assertLayoutMatch(fn, layout, tensors, i);
283
+ }
284
+ }
285
+
286
+ inline void assertNonEmpty(
287
+ std::function<void(const std::string&)> fn,
288
+ const at::ArrayRef<at::Tensor> tensors) {
289
+ if (tensors.empty()) {
290
+ fn("requires non-empty tensor list");
291
+ }
292
+ }
293
+
294
+ inline void assertSingleElement(
295
+ std::function<void(const std::string&)> fn,
296
+ const at::ArrayRef<at::Tensor> tensors) {
297
+ if (tensors.size() != 1) {
298
+ fn("requires a single-element tensor list");
299
+ }
300
+ }
301
+
302
+ inline void assertSingleElementInput(
303
+ std::function<void(const std::string&)> fn,
304
+ const at::ArrayRef<at::Tensor> tensors) {
305
+ if (tensors.size() != 1) {
306
+ fn("requires a single-element input tensor list");
307
+ }
308
+ }
309
+
310
+ inline void assertSingleElementOutput(
311
+ std::function<void(const std::string&)> fn,
312
+ const at::ArrayRef<at::Tensor> tensors) {
313
+ if (tensors.size() != 1) {
314
+ fn("requires a single-element output tensor list");
315
+ }
316
+ }
317
+
318
+ inline void assertRootRank(
319
+ std::function<void(const std::string&)> fn,
320
+ int rank,
321
+ int size) {
322
+ if (rank < 0 || rank >= size) {
323
+ fn("invalid root rank: " + std::to_string(rank));
324
+ }
325
+ }
326
+
327
+ inline void assertRootTensor(
328
+ std::function<void(const std::string&)> fn,
329
+ int rank,
330
+ int size) {
331
+ if (rank < 0 || rank >= size) {
332
+ fn("invalid root tensor: " + std::to_string(rank));
333
+ }
334
+ }
335
+
336
+ inline void assertDense(
337
+ std::function<void(const std::string&)> fn,
338
+ const at::ArrayRef<at::Tensor> tensors) {
339
+ const auto& layout = tensors[0].layout();
340
+ if (layout != at::kStrided) {
341
+ fn("only supports dense tensors");
342
+ }
343
+ }
344
+
345
+ inline void assertCPU(
346
+ std::function<void(const std::string&)> fn,
347
+ const at::ArrayRef<at::Tensor> tensors) {
348
+ const auto& device = tensors[0].device();
349
+ if (device.type() != at::kCPU) {
350
+ fn("only supports CPU tensors");
351
+ }
352
+ }
353
+
354
+ inline void assertSameDevice(
355
+ std::function<void(const std::string&)> fn,
356
+ const at::ArrayRef<at::Tensor> tensors) {
357
+ if (tensors.size() < 2) {
358
+ return;
359
+ }
360
+ const auto& device = tensors[0].device();
361
+ for (const auto i : c10::irange(1, tensors.size())) {
362
+ if (tensors[i].device() != device) {
363
+ fn("tensors should be on the same device");
364
+ }
365
+ }
366
+ }
367
+
368
+ inline void assertTypeAndSizesMatch(
369
+ std::function<void(const std::string&)> fn,
370
+ const at::ArrayRef<at::Tensor> tensors,
371
+ const at::DeprecatedTypeProperties& type,
372
+ const at::IntArrayRef& sizes) {
373
+ for (const auto i : c10::irange(tensors.size())) {
374
+ assertTypeMatch(fn, type, tensors, i);
375
+ assertSizesMatch(fn, sizes, tensors, i);
376
+ }
377
+ }
378
+
379
+ inline void assertTypeAndSizesMatch(
380
+ std::function<void(const std::string&)> fn,
381
+ const at::ArrayRef<at::Tensor> tensors,
382
+ const at::TensorOptions& options,
383
+ const at::IntArrayRef& sizes) {
384
+ for (const auto i : c10::irange(tensors.size())) {
385
+ assertTypeMatch(fn, options, tensors, i);
386
+ assertSizesMatch(fn, sizes, tensors, i);
387
+ }
388
+ }
389
+
390
+ inline void assertTypeAndSizesMatch(
391
+ std::function<void(const std::string&)> fn,
392
+ const at::ArrayRef<at::Tensor> tensors) {
393
+ const auto& options = tensors[0].options();
394
+ const auto sizes = tensors[0].sizes();
395
+ assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes);
396
+ }
397
+
398
+ // Copied from ATen/core/functional.h.
399
+ template <typename F, typename T>
400
+ inline auto fmap(T& inputs, const F& fn)
401
+ -> std::vector<decltype(fn(*inputs.begin()))> {
402
+ std::vector<decltype(fn(*inputs.begin()))> r;
403
+ r.reserve(inputs.size());
404
+ for (auto& input : inputs) {
405
+ r.push_back(fn(input));
406
+ }
407
+ return r;
408
+ }
409
+
410
+ // Copied from torch/csrc/utils/tensor_flatten.h.
411
+ inline at::Tensor flattenDenseTensors(at::TensorList tensors) {
412
+ static const auto flatten = [](const at::Tensor& t) {
413
+ return t.contiguous().view({-1});
414
+ };
415
+ if (tensors.size() == 1) {
416
+ return flatten(tensors[0]);
417
+ }
418
+ return at::cat(::c10d::fmap(tensors, flatten));
419
+ }
420
+
421
+ inline at::Tensor newLikeFlat(
422
+ std::vector<std::vector<at::Tensor>>& tensors,
423
+ size_t deviceIdx) {
424
+ if (tensors.empty() || tensors[0].empty()) {
425
+ TORCH_CHECK(false, "Received an empty list");
426
+ }
427
+ if (deviceIdx >= tensors.size()) {
428
+ TORCH_CHECK(false, "Invalid device index");
429
+ }
430
+ auto& t = tensors[deviceIdx][0];
431
+ auto device = t.device();
432
+ for (const auto i : c10::irange(1, tensors[deviceIdx].size())) {
433
+ if (tensors[deviceIdx][i].device() != device) {
434
+ TORCH_CHECK(false, "Expecting all tensors on the same device");
435
+ }
436
+ }
437
+ at::DeviceGuard gpuGuard(device);
438
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors[deviceIdx].size())};
439
+ std::vector<int64_t> strides{static_cast<int64_t>(t.numel())};
440
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
441
+ strides.insert(strides.end(), t.strides().begin(), t.strides().end());
442
+ return at::empty_strided(
443
+ sizes, strides, t.options().memory_format(c10::nullopt));
444
+ }
445
+
446
+ inline at::Tensor newLikeFlat(std::vector<at::Tensor>& tensors) {
447
+ if (tensors.empty()) {
448
+ TORCH_CHECK(false, "Received an empty list");
449
+ }
450
+ auto& t = tensors[0];
451
+ at::DeviceGuard gpuGuard(t.device());
452
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors.size())};
453
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
454
+ return at::empty(sizes, t.options());
455
+ }
456
+
457
+ inline std::vector<std::vector<int64_t>> getSizes(
458
+ const std::vector<at::Tensor>& tensors) {
459
+ std::vector<std::vector<int64_t>> sizes(tensors.size());
460
+ for (const auto i : c10::irange(tensors.size())) {
461
+ sizes[i] = tensors[i].sizes().vec();
462
+ }
463
+ return sizes;
464
+ }
465
+
466
+ inline std::vector<int> getDevices(const std::vector<at::Tensor>& tensors) {
467
+ std::vector<int> devices(tensors.size(), -1);
468
+ if (tensors[0].device().is_cuda()) {
469
+ for (const auto i : c10::irange(tensors.size())) {
470
+ devices[i] = tensors[i].storage().device().index();
471
+ }
472
+ }
473
+ return devices;
474
+ }
475
+
476
+ template <typename T>
477
+ inline T* getDataPointer(const at::Tensor& tensor) {
478
+ // This method is only used in ProcessGroupGloo for now. Call sites must make
479
+ // sure that the input tensor is contiguous. It is OK if the tensor does not
480
+ // start from the beginning of the storage. For example, it could come from
481
+ // chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of
482
+ // tensor.storage().data()
483
+ // NB: not using tensor.data<T>() because tensor is not aware of gloo::TYPE
484
+ return static_cast<T*>(tensor.data_ptr());
485
+ }
486
+
487
+ template <typename T>
488
+ std::vector<T*> getDataPointers(const std::vector<at::Tensor>& tensors) {
489
+ std::vector<T*> ptrs(tensors.size());
490
+ for (const auto i : c10::irange(tensors.size())) {
491
+ ptrs[i] = getDataPointer<T>(tensors[i]);
492
+ }
493
+ return ptrs;
494
+ }
495
+
496
+ // For alltoall split size sanity check
497
+ inline void checkSplitSizes(
498
+ const std::vector<int64_t>& split_sizes,
499
+ const at::Tensor& tensor,
500
+ int group_size) {
501
+ if (split_sizes.empty()) {
502
+ TORCH_CHECK(
503
+ tensor.size(0) % group_size == 0,
504
+ "Tensor's dim 0 does not divide equally across group size");
505
+ } else {
506
+ TORCH_CHECK(
507
+ split_sizes.size() == static_cast<size_t>(group_size),
508
+ "Number of tensor splits not equal to group size");
509
+ const auto sum = c10::sum_integers(split_sizes);
510
+ TORCH_CHECK(
511
+ sum == tensor.size(0), "Split sizes doesn't match total dim 0 size");
512
+ }
513
+ }
514
+
515
+ // Compute alltoall lengths and offsets, handling multi-dimension tensors
516
+ template <typename T>
517
+ size_t computeLengthsAndOffsets(
518
+ const std::vector<int64_t>& split_sizes,
519
+ const at::Tensor& tensor,
520
+ std::vector<T>* lengths,
521
+ std::vector<T>* offsets) {
522
+ size_t group_size = lengths->size();
523
+ bool equal_splits = false;
524
+ size_t dim0_size = tensor.size(0);
525
+ size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1);
526
+ size_t split_size = 0;
527
+ size_t offset = 0;
528
+
529
+ if (split_sizes.empty()) {
530
+ equal_splits = true;
531
+ split_size = tensor.size(0) / group_size;
532
+ }
533
+ for (const auto i : c10::irange(group_size)) {
534
+ size_t length = row_size * (equal_splits ? split_size : split_sizes[i]);
535
+ (*lengths)[i] = length;
536
+ (*offsets)[i] = offset;
537
+ // TODO: see if we should add overflow protection for offset
538
+ offset += length;
539
+ }
540
+ return offset;
541
+ }
542
+
543
+ template <typename T>
544
+ size_t computeLengthsAndOffsets(
545
+ const std::vector<at::Tensor>& tensors,
546
+ std::vector<T>* lengths,
547
+ std::vector<T>* offsets) {
548
+ size_t group_size = lengths->size();
549
+ size_t offset = 0;
550
+ for (const auto i : c10::irange(group_size)) {
551
+ size_t length = tensors[i].numel();
552
+ (*lengths)[i] = length;
553
+ (*offsets)[i] = offset;
554
+ offset += length;
555
+ }
556
+ return offset;
557
+ }
558
+
559
+ using RankType = uint32_t;
560
+ using SizeType = uint64_t;
561
+
562
+ // `errno` is only meaningful when it fails. E.g., a successful `fork()` sets
563
+ // `errno` to `EINVAL` in child process on some macos
564
+ // (https://stackoverflow.com/a/20295079), and thus `errno` should really only
565
+ // be inspected if an error occurred.
566
+ //
567
+ // `success_cond` is an expression used to check if an error has happend. So for
568
+ // `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output
569
+ // is stored in variable `__output` and may be used in `success_cond`.
570
+ #ifdef _WIN32
571
+ #define SYSCHECK(expr, success_cond) \
572
+ while (true) { \
573
+ auto __output = (expr); \
574
+ auto errno_local = WSAGetLastError(); \
575
+ (void)__output; \
576
+ if (!(success_cond)) { \
577
+ if (errno == EINTR) { \
578
+ continue; \
579
+ } else if ( \
580
+ errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \
581
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
582
+ } else { \
583
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \
584
+ } \
585
+ } else { \
586
+ break; \
587
+ } \
588
+ }
589
+ #else
590
+ #define SYSCHECK(expr, success_cond) \
591
+ while (true) { \
592
+ auto __output = (expr); \
593
+ (void)__output; \
594
+ if (!(success_cond)) { \
595
+ if (errno == EINTR) { \
596
+ continue; \
597
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) { \
598
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
599
+ } else { \
600
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \
601
+ } \
602
+ } else { \
603
+ break; \
604
+ } \
605
+ }
606
+ #endif
607
+
608
+ // Most functions indicate error by returning `-1`. This is a helper macro for
609
+ // this common case with `SYSCHECK`.
610
+ // Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1
611
+ #define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1)
612
+
613
+ namespace tcputil {
614
+
615
+ // Send and receive
616
+ template <typename T>
617
+ void sendBytes(
618
+ int socket,
619
+ const T* buffer,
620
+ size_t length,
621
+ bool moreData = false) {
622
+ size_t bytesToSend = sizeof(T) * length;
623
+ if (bytesToSend == 0) {
624
+ return;
625
+ }
626
+
627
+ auto bytes = reinterpret_cast<const uint8_t*>(buffer);
628
+ uint8_t* currentBytes = const_cast<uint8_t*>(bytes);
629
+
630
+ int flags = 0;
631
+
632
+ #ifdef MSG_MORE
633
+ if (moreData) { // there is more data to send
634
+ flags |= MSG_MORE;
635
+ }
636
+ #endif
637
+
638
+ // Ignore SIGPIPE as the send() return value is always checked for error
639
+ #ifdef MSG_NOSIGNAL
640
+ flags |= MSG_NOSIGNAL;
641
+ #endif
642
+
643
+ while (bytesToSend > 0) {
644
+ ssize_t bytesSent;
645
+ SYSCHECK_ERR_RETURN_NEG1(
646
+ bytesSent =
647
+ ::send(socket, (const char*)currentBytes, bytesToSend, flags))
648
+ if (bytesSent == 0) {
649
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
650
+ }
651
+
652
+ bytesToSend -= bytesSent;
653
+ currentBytes += bytesSent;
654
+ }
655
+ }
656
+
657
+ template <typename T>
658
+ void recvBytes(int socket, T* buffer, size_t length) {
659
+ size_t bytesToReceive = sizeof(T) * length;
660
+ if (bytesToReceive == 0) {
661
+ return;
662
+ }
663
+
664
+ auto bytes = reinterpret_cast<uint8_t*>(buffer);
665
+ uint8_t* currentBytes = bytes;
666
+
667
+ while (bytesToReceive > 0) {
668
+ ssize_t bytesReceived;
669
+ SYSCHECK_ERR_RETURN_NEG1(
670
+ bytesReceived = recv(socket, (char*)currentBytes, bytesToReceive, 0))
671
+ if (bytesReceived == 0) {
672
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
673
+ }
674
+
675
+ bytesToReceive -= bytesReceived;
676
+ currentBytes += bytesReceived;
677
+ }
678
+ }
679
+
680
+ // send a vector's length and data
681
+ template <typename T>
682
+ void sendVector(int socket, const std::vector<T>& vec, bool moreData = false) {
683
+ SizeType size = vec.size();
684
+ sendBytes<SizeType>(socket, &size, 1, true);
685
+ sendBytes<T>(socket, vec.data(), size, moreData);
686
+ }
687
+
688
+ // receive a vector as sent in sendVector
689
+ template <typename T>
690
+ std::vector<T> recvVector(int socket) {
691
+ SizeType valueSize;
692
+ recvBytes<SizeType>(socket, &valueSize, 1);
693
+ std::vector<T> value(valueSize);
694
+ recvBytes<T>(socket, value.data(), value.size());
695
+ return value;
696
+ }
697
+
698
+ // this is only for convenience when sending rvalues
699
+ template <typename T>
700
+ void sendValue(int socket, const T& value, bool moreData = false) {
701
+ sendBytes<T>(socket, &value, 1, moreData);
702
+ }
703
+
704
+ template <typename T>
705
+ T recvValue(int socket) {
706
+ T value;
707
+ recvBytes<T>(socket, &value, 1);
708
+ return value;
709
+ }
710
+
711
+ // send a string's length and data
712
+ inline void sendString(
713
+ int socket,
714
+ const std::string& str,
715
+ bool moreData = false) {
716
+ SizeType size = str.size();
717
+ sendBytes<SizeType>(socket, &size, 1, true);
718
+ sendBytes<char>(socket, str.data(), size, moreData);
719
+ }
720
+
721
+ // receive a string as sent in sendString
722
+ inline std::string recvString(int socket) {
723
+ SizeType valueSize;
724
+ recvBytes<SizeType>(socket, &valueSize, 1);
725
+ std::vector<char> value(valueSize);
726
+ recvBytes<char>(socket, value.data(), value.size());
727
+ return std::string(value.data(), value.size());
728
+ }
729
+
730
+ } // namespace tcputil
731
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
4
+
5
+ namespace c10d {
6
+ namespace tcputil {
7
+
8
+ #define CONNECT_SOCKET_OFFSET 1
9
+
10
+ inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) {
11
+ return WSAPoll(fdArray, fds, timeout);
12
+ }
13
+
14
+ inline void addPollfd(
15
+ std::vector<struct pollfd>& fds,
16
+ int socket,
17
+ short events) {
18
+ fds.push_back({(SOCKET)socket, events});
19
+ }
20
+
21
+ inline struct ::pollfd getPollfd(int socket, short events) {
22
+ struct ::pollfd res = {(SOCKET)socket, events};
23
+ return res;
24
+ }
25
+
26
+ } // namespace tcputil
27
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <stdexcept>
5
+ #include <vector>
6
+
7
+ constexpr auto kNoTimeout = std::chrono::milliseconds(0);
8
+
9
+ namespace c10d {
10
+
11
+ constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY";
12
+
13
+ enum class OpType : std::uint8_t {
14
+ BROADCAST = 0,
15
+ ALLREDUCE = 1,
16
+ ALLREDUCE_COALESCED = 2,
17
+ REDUCE = 3,
18
+ ALLGATHER = 4,
19
+ _ALLGATHER_BASE = 5,
20
+ ALLGATHER_COALESCED = 6,
21
+ GATHER = 7,
22
+ SCATTER = 8,
23
+ REDUCE_SCATTER = 9,
24
+ ALLTOALL_BASE = 10,
25
+ ALLTOALL = 11,
26
+ SEND = 12,
27
+ RECV = 13,
28
+ RECVANYSOURCE = 14,
29
+ BARRIER = 15,
30
+ _REDUCE_SCATTER_BASE = 16,
31
+ COALESCED = 17,
32
+ _ALLREDUCE_SPARSE = 18,
33
+ UNKNOWN = 100,
34
+ };
35
+
36
+ // Converts OpType to human readable string.
37
+ TORCH_API std::string opTypeToString(OpType opType);
38
+
39
+ // Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE)
40
+ TORCH_API bool isP2POp(OpType opType, bool batchP2P = false);
41
+
42
+ // Please do not use Work API, it is going away, to be
43
+ // replaced by ivalue::Future.
44
+ // Python binding for this class might change, please do not assume
45
+ // this will be bound using pybind.
46
+ class TORCH_API Work : public torch::CustomClassHolder {
47
+ public:
48
+ Work(
49
+ int rank = -1,
50
+ OpType opType = OpType::UNKNOWN,
51
+ const char* profilingTitle = nullptr,
52
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
53
+ c10::nullopt);
54
+
55
+ ~Work() override;
56
+
57
+ // Checks if request has completed. Non-blocking operation.
58
+ virtual bool isCompleted();
59
+
60
+ // Returns if the work completed successfully.
61
+ // If false, the exception function can be called to get details.
62
+ virtual bool isSuccess() const;
63
+
64
+ // Returns exception if isSuccess() returned false.
65
+ virtual std::exception_ptr exception() const;
66
+
67
+ // Returns source rank if this objects represents a recv-from-any.
68
+ virtual int sourceRank() const;
69
+
70
+ // Returns result tensors, if applicable.
71
+ // If work is not supposed to have result, we return empty list.
72
+ virtual std::vector<at::Tensor> result();
73
+
74
+ // Ensures that operations on the output tensors that are invoked
75
+ // after this function returns are correctly sequenced after the
76
+ // asynchronous completion of this work.
77
+ //
78
+ // For CUDA tensors, it inserts stream synchronization such that
79
+ // the streams of the caller wait for completion of the
80
+ // asynchronous operations on the destination tensors.
81
+ //
82
+ // For CPU tensors, it is currently a nop.
83
+ //
84
+ // This function should only be used if the caller polls for
85
+ // completion through the `isCompleted` function, it has returned
86
+ // true, and the `isSuccess` function also has returned true.
87
+ //
88
+ virtual void synchronize();
89
+
90
+ // Waits until request completes. Blocking operation.
91
+ // Throws if the work completed with an exception.
92
+ // Returns false if the work is aborted.
93
+ // Otherwise, it always returns true, indicating the work is completed.
94
+ //
95
+ // Functionally equivalent to:
96
+ //
97
+ // while (!isCompleted()) { /* nop */ }
98
+ // auto success = isSuccess();
99
+ // if (!success) { std::rethrow_exception(exception()); }
100
+ // return success;
101
+ //
102
+ virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout);
103
+
104
+ virtual void abort();
105
+
106
+ // Returns a Future object that will be associated with the completion of
107
+ // work. Only NCCL backend is currently supported.
108
+ virtual c10::intrusive_ptr<c10::ivalue::Future> getFuture();
109
+
110
+ virtual float getDuration() const;
111
+
112
+ virtual uint64_t getSequencenumber() const;
113
+
114
+ OpType retrieveOpType() const;
115
+
116
+ static c10::intrusive_ptr<Work> create_from_future(
117
+ const c10::intrusive_ptr<c10::ivalue::Future>&);
118
+
119
+ protected:
120
+ // Completes the work object and optionally sets the exception in a
121
+ // thread-safe manner. Notifies all waiting condition variables as well.
122
+ void finish(std::exception_ptr exception = nullptr);
123
+
124
+ // Similar to finish, but throws an exception if one is already set or
125
+ // provided by the user.
126
+ void finishAndThrow(std::exception_ptr exception);
127
+
128
+ mutable std::mutex mutex_;
129
+ std::condition_variable cv_;
130
+ bool completed_ = false;
131
+ std::exception_ptr exception_;
132
+
133
+ // Current rank of the node.
134
+ const int rank_;
135
+
136
+ // Operation type that this work object refers to.
137
+ OpType opType_;
138
+
139
+ // When profiling, the callback to record end of operation event. This
140
+ // callback needs to be called when collective operation is complete.
141
+ std::function<void()> recordFunctionEndCallback_;
142
+ };
143
+
144
+ struct TORCH_API WorkInfo {
145
+ WorkInfo(
146
+ const OpType& opType,
147
+ const uint64_t seq,
148
+ const std::chrono::time_point<std::chrono::system_clock>& timeStarted,
149
+ const std::chrono::time_point<std::chrono::system_clock>& timeFinished,
150
+ const std::chrono::duration<float>& activeDuration)
151
+ : opType(opType),
152
+ seq(seq),
153
+ timeStarted(timeStarted),
154
+ timeFinished(timeFinished),
155
+ activeDuration(activeDuration) {}
156
+
157
+ OpType opType;
158
+ uint64_t seq;
159
+ std::chrono::time_point<std::chrono::system_clock> timeStarted;
160
+ std::chrono::time_point<std::chrono::system_clock> timeFinished;
161
+ std::chrono::duration<float> activeDuration;
162
+ };
163
+
164
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch {
6
+ namespace distributed {
7
+ namespace c10d {
8
+
9
+ PyMethodDef* python_functions();
10
+
11
+ } // namespace c10d
12
+ } // namespace distributed
13
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
7
+ #include <utility>
8
+
9
+ namespace c10d {
10
+
11
+ // Broadcast many tensors to all processes in the process group.
12
+ TORCH_API void broadcast_coalesced(
13
+ const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
14
+ at::TensorList tensors,
15
+ size_t buffer_size,
16
+ int rank = 0);
17
+
18
+ // This class passes bucket contents tensor to DDP communication hook.
19
+ class TORCH_API GradBucket {
20
+ public:
21
+ explicit GradBucket(
22
+ size_t index,
23
+ size_t bucket_count,
24
+ at::Tensor tensor,
25
+ std::vector<size_t> offsets,
26
+ std::vector<size_t> lengths,
27
+ std::vector<c10::IntArrayRef> sizes_vec,
28
+ std::vector<at::Tensor> parameters,
29
+ c10::optional<at::Tensor> sparse_grad_indices)
30
+ : index_(index),
31
+ bucket_count_(bucket_count),
32
+ buffer_(std::move(tensor)),
33
+ offsets_(std::move(offsets)),
34
+ lengths_(std::move(lengths)),
35
+ sizes_vec_(std::move(sizes_vec)),
36
+ parameters_(std::move(parameters)),
37
+ sparse_grad_indices_(std::move(sparse_grad_indices)) {}
38
+
39
+ // Returns the index of the bucket, which is unique across all the buckets.
40
+ size_t getIndex() const {
41
+ return index_;
42
+ }
43
+
44
+ const at::Tensor& getBuffer() const {
45
+ return buffer_;
46
+ }
47
+
48
+ // Returns a mutable buffer compared with the above method.
49
+ at::Tensor& getBufferRef() {
50
+ return buffer_;
51
+ }
52
+
53
+ // Overwrites the buffer at a specific index.
54
+ void setBuffer(at::Tensor& buffer) {
55
+ buffer_ = buffer;
56
+ }
57
+
58
+ // Each tensor in the list that getGradients corresponds to a
59
+ // parameter.
60
+ std::vector<at::Tensor> getGradients() const;
61
+
62
+ // Returns model parameters belonging to this bucket. They are returned in the
63
+ // same order as gradient tensors via getGradients(). For example,
64
+ // getParameters[i] will have its gradient stored in
65
+ // getGradients[i]
66
+ const std::vector<at::Tensor> getParameters() const {
67
+ return parameters_;
68
+ }
69
+
70
+ // Returns whther this bucket is the last bucket to allreduce in an iteration.
71
+ bool isLast() const {
72
+ return index_ == bucket_count_ - 1;
73
+ }
74
+
75
+ c10::optional<at::Tensor>& getSparseGradIndices() {
76
+ return sparse_grad_indices_;
77
+ }
78
+
79
+ private:
80
+ size_t index_;
81
+ size_t bucket_count_;
82
+ at::Tensor buffer_;
83
+
84
+ // Per-variable info in buffer_.
85
+ std::vector<size_t> offsets_;
86
+ std::vector<size_t> lengths_;
87
+ std::vector<c10::IntArrayRef> sizes_vec_;
88
+
89
+ // Model parameters for this bucket.
90
+ const std::vector<at::Tensor> parameters_;
91
+
92
+ // Predefined sparse indices for this bucket (only used for sparse tensors).
93
+ // The gradients will be updated to have indices with these tensor values
94
+ c10::optional<at::Tensor> sparse_grad_indices_;
95
+ };
96
+
97
+ // Base class of both `PythonCommHook` and `CppCommHook`.
98
+ // Requires implementing 1) `runHook` method that communicates gradients
99
+ // asynchronously, and 2) `parseHookResult` method that converts the hook
100
+ // result into a tensor.
101
+ class TORCH_API CommHookInterface {
102
+ public:
103
+ virtual ~CommHookInterface() = default;
104
+
105
+ // Passes the input grad bucket to the registered communication hook.
106
+ // Once the tensor in the bucket are ready, kicks off the hook asynchronously
107
+ // and returns a future that holds the communication results.
108
+ virtual c10::intrusive_ptr<c10::ivalue::Future> runHook(
109
+ GradBucket& bucket) = 0;
110
+
111
+ // Returns the resulting tensor once the communication hook result is
112
+ // ready. The resulting tensor will then be copied to the grads of
113
+ // individual parameters.
114
+ virtual at::Tensor parseHookResult(const c10::IValue& result) = 0;
115
+ };
116
+
117
+ namespace detail {
118
+ // This helper function is called both by CppCommHookInterface below and inside
119
+ // reducer.
120
+ TORCH_API at::Tensor parseCppCommHookResult(const c10::IValue& result);
121
+ } // namespace detail
122
+
123
+ // This CppCommHook interface only requires implementing runHook method that
124
+ // potentially uses a state.
125
+ template <typename T>
126
+ class CppCommHookInterface : public CommHookInterface {
127
+ public:
128
+ explicit CppCommHookInterface(T state) : state_(std::move(state)) {}
129
+
130
+ ~CppCommHookInterface() override = default;
131
+
132
+ at::Tensor parseHookResult(const c10::IValue& result) override {
133
+ return detail::parseCppCommHookResult(result);
134
+ }
135
+
136
+ protected:
137
+ T state_;
138
+ };
139
+
140
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <c10/macros/Macros.h>
10
+
11
+ namespace c10d {
12
+
13
+ enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
14
+
15
+ TORCH_API void setDebugLevel(DebugLevel level);
16
+
17
+ // Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
18
+ // environment variable.
19
+ TORCH_API void setDebugLevelFromEnvironment();
20
+
21
+ TORCH_API DebugLevel debug_level() noexcept;
22
+
23
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+ #include <torch/csrc/distributed/c10d/comm.hpp>
5
+
6
+ namespace c10d {
7
+
8
+ enum class BuiltinCommHookType {
9
+ ALLREDUCE = 1,
10
+ FP16_COMPRESS = 2,
11
+ };
12
+
13
+ class AllReduceCommHook
14
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
15
+ public:
16
+ explicit AllReduceCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
17
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
18
+
19
+ ~AllReduceCommHook() override = default;
20
+
21
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
22
+ };
23
+
24
+ class FP16CompressCommHook
25
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
26
+ public:
27
+ explicit FP16CompressCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
28
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
29
+
30
+ ~FP16CompressCommHook() override = default;
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
33
+ };
34
+
35
+ // Almost same as AllReduceCommHook, but without division inside the hook.
36
+ // This enables the optimization of fusing copy and division and saves one scan
37
+ // over all the input parameters, when no communication hook is provided by the
38
+ // user. Only used internally and not released as a public built-in
39
+ // communication hook.
40
+ class _AllReduceBySumCommHook
41
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
42
+ public:
43
+ explicit _AllReduceBySumCommHook(
44
+ const c10::intrusive_ptr<ProcessGroup>& state)
45
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
46
+
47
+ ~_AllReduceBySumCommHook() override = default;
48
+
49
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
50
+ };
51
+
52
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <cstring>
10
+ #include <system_error>
11
+
12
+ #include <fmt/format.h>
13
+
14
+ namespace fmt {
15
+
16
+ template <>
17
+ struct formatter<std::error_category> {
18
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
19
+ return ctx.begin();
20
+ }
21
+
22
+ template <typename FormatContext>
23
+ decltype(auto) format(const std::error_category& cat, FormatContext& ctx)
24
+ const {
25
+ if (std::strcmp(cat.name(), "generic") == 0) {
26
+ return fmt::format_to(ctx.out(), "errno");
27
+ } else {
28
+ return fmt::format_to(ctx.out(), "{} error", cat.name());
29
+ }
30
+ }
31
+ };
32
+
33
+ template <>
34
+ struct formatter<std::error_code> {
35
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
36
+ return ctx.begin();
37
+ }
38
+
39
+ template <typename FormatContext>
40
+ decltype(auto) format(const std::error_code& err, FormatContext& ctx) const {
41
+ return fmt::format_to(
42
+ ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message());
43
+ }
44
+ };
45
+
46
+ } // namespace fmt
47
+
48
+ namespace c10d {
49
+ namespace detail {
50
+
51
+ inline std::error_code lastError() noexcept {
52
+ return std::error_code{errno, std::generic_category()};
53
+ }
54
+
55
+ } // namespace detail
56
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <stdexcept>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Exception.h>
13
+
14
+ // Utility macro similar to C10_THROW_ERROR, the major difference is that this
15
+ // macro handles exception types defined in the c10d namespace, whereas
16
+ // C10_THROW_ERROR requires an exception to be defined in the c10 namespace.
17
+ #define C10D_THROW_ERROR(err_type, msg) \
18
+ throw ::c10d::err_type( \
19
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
20
+
21
+ namespace c10d {
22
+
23
+ using c10::DistNetworkError;
24
+
25
+ class TORCH_API SocketError : public DistNetworkError {
26
+ using DistNetworkError::DistNetworkError;
27
+ };
28
+
29
+ class TORCH_API TimeoutError : public DistNetworkError {
30
+ using DistNetworkError::DistNetworkError;
31
+ };
32
+
33
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/util/Logging.h>
2
+ #include <torch/csrc/distributed/c10d/reducer.hpp>
3
+
4
+ #include <mutex>
5
+
6
+ namespace c10d {
7
+
8
+ class TORCH_API Logger {
9
+ public:
10
+ explicit Logger(std::shared_ptr<c10d::Reducer> reducer);
11
+ // Set logging data that can be got during DistributedDataParallel
12
+ // construction time.
13
+ void set_construction_data_and_log(
14
+ const std::string& module_name,
15
+ const std::vector<int>& device_ids,
16
+ int output_device,
17
+ bool broadcast_buffers,
18
+ bool has_sync_bn,
19
+ bool static_graph);
20
+
21
+ void set_static_graph();
22
+
23
+ // An interface for users to get DDPLoggingData and log them
24
+ // in the applications. Explanation of logging fields are in
25
+ // "struct DDPLoggingData" of "torch/c10/util/Logging.h".
26
+ at::DDPLoggingData get_ddp_logging_data();
27
+
28
+ // Stream insertion operator for logging data to stream under
29
+ // TORCH_DISTRIBUTED_DEBUG.
30
+ friend std::ostream& operator<<(std::ostream& output, const Logger& logger);
31
+
32
+ ~Logger() noexcept(false) {
33
+ // Log if DDP graph is static in Logger dtor instead of Reducer dtor since
34
+ // Logger is deleted before Reducer.
35
+ log_if_graph_static(reducer_->ddp_graph_static());
36
+ }
37
+
38
+ // Set environment variables.
39
+ void set_env_variables();
40
+ // Set parameters stats.
41
+ void set_parameter_stats();
42
+ // Get size of each bucket (Bytes).
43
+ std::vector<int64_t> get_bucket_sizes();
44
+ // Get variable indices for each bucket.
45
+ std::vector<std::vector<size_t>> get_per_bucket_variable_indices();
46
+ // Set comm. hook, if used
47
+ void set_comm_hook(const std::string& hook);
48
+ // Set running with uneven input detection (model.join() context manager)
49
+ void set_uneven_input_join();
50
+
51
+ // Reset performance stats at current iteration
52
+ void reset_performance_stats();
53
+
54
+ // Calculate avg stats using cpu timer and gpu timer
55
+ // that has been recorded in reducer.
56
+ void calculate_avg_time(
57
+ int64_t& avg_time,
58
+ int64_t& time_duration,
59
+ Timer& timer,
60
+ Timer::Event start_event,
61
+ Timer::Event end_event);
62
+
63
+ // Set the absolute time of the event that has been recorded in reducer.
64
+ void set_event_time(int64_t& event_time, Timer& timer, Timer::Event event);
65
+ // Set stats that can be collected only during
66
+ // training loop. It is called at the beginning of forward call
67
+ // to record the run time stats of sampled iterations that previously ran.
68
+ // GPU performance stats are collected only for single process
69
+ // single device program and single device module right now.
70
+ // TODO to support single process multiple devices and multi device modules,
71
+ // events need to be created and recorded on multiple devices.
72
+ void set_runtime_stats_and_log();
73
+
74
+ // Called when DDP/reducer is failing with an error. The
75
+ // logging data structure will have two fields filled: "has_error" indicating
76
+ // that this iteration encountered an error and other fields are not valid,
77
+ // and "error", a string which contains the error message that DDP failed
78
+ // with.
79
+ template <typename... Args>
80
+ void set_error_and_log(const std::string& ddp_error, const Args&... args) {
81
+ ddp_logging_data_->ints_map["has_error"] = 1;
82
+ auto err = c10::str(ddp_error, args...);
83
+ ddp_logging_data_->strs_map["error"] = err;
84
+ // Report the iteration we are erroring at so user knows how many examples
85
+ // successfully processed before this error was hit.
86
+ ddp_logging_data_->ints_map["iteration"] = reducer_->num_iterations_;
87
+ at::LogPyTorchDDPUsage(*ddp_logging_data_);
88
+ }
89
+
90
+ // When running without static graph, called when reducer is destroyed to log
91
+ // if graph was actually static and is a candidate for static graph
92
+ // optimization.
93
+ void log_if_graph_static(bool is_static);
94
+
95
+ private:
96
+ // ddp_logging_data_ is used to hold all the ddp related logging
97
+ // data fields.
98
+ std::unique_ptr<at::DDPLoggingData> ddp_logging_data_;
99
+ std::shared_ptr<c10d::Reducer> reducer_;
100
+ // track the number of iterations when runtime stats are collected so far.
101
+ long num_iterations_stats_recorded_ = 0;
102
+ };
103
+
104
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <string>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Logging.h>
13
+ #include <fmt/format.h>
14
+
15
+ namespace c10d {
16
+ namespace detail {
17
+
18
+ enum class LogLevel { Trace, Debug, Info, Warning, Error };
19
+
20
+ TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept;
21
+
22
+ template <typename... T>
23
+ std::string formatLogMessage(fmt::string_view fmt, T&&... args) {
24
+ return fmt::vformat(fmt, fmt::make_format_args(args...));
25
+ }
26
+
27
+ } // namespace detail
28
+ } // namespace c10d
29
+
30
+ #define C10D_ERROR(...) \
31
+ LOG_IF( \
32
+ ERROR, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \
33
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
34
+
35
+ #define C10D_WARNING(...) \
36
+ LOG_IF( \
37
+ WARNING, \
38
+ c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \
39
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
40
+
41
+ #define C10D_INFO(...) \
42
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \
43
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
44
+
45
+ #define C10D_DEBUG(...) \
46
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \
47
+ << "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__)
48
+
49
+ #define C10D_TRACE(...) \
50
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \
51
+ << "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__)
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <atomic>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <tuple>
8
+ #include <unordered_map>
9
+ #include <vector>
10
+
11
+ #include <ATen/core/ivalue_inl.h>
12
+ #include <c10/macros/Macros.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <torch/csrc/autograd/function.h>
15
+ #include <torch/csrc/autograd/profiler.h>
16
+ #include <torch/csrc/autograd/variable.h>
17
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
18
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
19
+ #include <torch/csrc/distributed/c10d/comm.hpp>
20
+ #include <torch/csrc/distributed/c10d/debug.h>
21
+ #include <torch/csrc/distributed/c10d/default_comm_hooks.hpp>
22
+ #include <torch/csrc/distributed/c10d/reducer_timer.hpp>
23
+ #ifndef _WIN32
24
+ #include <torch/csrc/distributed/autograd/context/context.h>
25
+ #endif
26
+
27
+ namespace c10d {
28
+
29
+ constexpr int kDefaultFirstBucketBytes = int(1024 * 1024);
30
+ constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024);
31
+ // Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations.
32
+ constexpr int kDDPRuntimeLoggingSampleRate = 100;
33
+
34
+ // Forward declaration
35
+ class Logger;
36
+
37
+ // Local accumulator type for a single bucket.
38
+ struct BucketAccumulator {
39
+ std::vector<size_t> indices;
40
+ size_t size = 0;
41
+ size_t size_limit = 0;
42
+ };
43
+
44
+ class TORCH_API Reducer {
45
+ public:
46
+ // The constructor takes a list of variables (i.e. parameters) for this
47
+ // process's single model replica (as DDP assumes single-process
48
+ // single-device). The bucket assignment for this reducer, `bucket_indices`,
49
+ // is specified as a list of buckets, each of which is specified as a list of
50
+ // indices into the bucket's `variables` list.
51
+ explicit Reducer(
52
+ std::vector<at::Tensor> params,
53
+ std::vector<std::vector<size_t>> bucket_indices,
54
+ std::vector<size_t> per_bucket_size_limits,
55
+ c10::intrusive_ptr<c10d::ProcessGroup> process_group,
56
+ std::vector<bool> expect_sparse_gradients,
57
+ int64_t bucket_bytes_cap,
58
+ bool find_unused_parameters,
59
+ bool gradient_as_bucket_view,
60
+ std::unordered_map<size_t, std::string> param_names,
61
+ int64_t first_bucket_bytes_cap);
62
+
63
+ ~Reducer() noexcept(false);
64
+
65
+ // To (re-)initialize bucket assignment, pass a list of buckets, each of
66
+ // which is specified by a list of indices in the bucket's `variables` list.
67
+ // This function performs validation that the variables within a bucket
68
+ // all live on the same device and have the same dimensionality.
69
+ void initialize_buckets(std::vector<std::vector<size_t>> bucket_indices);
70
+
71
+ void autograd_hook(size_t index);
72
+
73
+ // This function is called when the forward function has produced an output,
74
+ // and the user wishes to reduce gradients in the backwards pass.
75
+ // If they don't, and wish to accumulate gradients before reducing them,
76
+ // a call to this function can simply be omitted.
77
+ void prepare_for_backward(const std::vector<at::Tensor>& outputs);
78
+
79
+ // Called at the beginning of forward() inside DistributedDataParallel,
80
+ // right now it captures the starting time of forward in each iteration.
81
+ void prepare_for_forward();
82
+
83
+ // Returns the relative time in nanoseconds when gradients were ready,
84
+ // with respect to the time `prepare_for_backward` was called. The
85
+ // vector is for parameters for a single model replica.
86
+ std::vector<int64_t> get_backward_stats() const {
87
+ return backward_stats_;
88
+ }
89
+
90
+ // Registers a hook to the reducer. The hook is `CommHookInterface`
91
+ // type to allow both Python and CPP hooks. This function can only
92
+ // be called once before calling backward.
93
+ // Cannot combine with the call of `register_builtin_comm_hook`.
94
+ void register_comm_hook(std::unique_ptr<CommHookInterface> iface);
95
+
96
+ // Registers a built-in C++ comm hook to the reducer. This function can only
97
+ // be called once before calling backward.
98
+ // Cannot combine with the call of `register_comm_hook`.
99
+ void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type);
100
+
101
+ // Informs reducer that optimizer is running in backward, so gradients
102
+ // don't need to be copied from buckets as the optimizer would've already
103
+ // been applied.
104
+ void set_optimizer_in_backward() {
105
+ optim_in_backward_ = true;
106
+ };
107
+
108
+ // Runs allreduce or installed communication hook given GradBucket instance.
109
+ c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(
110
+ GradBucket& grad_bucket);
111
+
112
+ // Runs default allreduce hook.
113
+ c10::intrusive_ptr<c10::ivalue::Future> run_allreduce_hook(
114
+ GradBucket& grad_bucket);
115
+
116
+ // Returns gradient buckets in sequential order of buckets_. This is the order
117
+ // in which buckets are reduced across processes. If return_zero_tensors=true,
118
+ // will return zero tensors of the same shape instead of the true tensors.
119
+ std::vector<c10d::GradBucket> get_grad_buckets(
120
+ bool return_zero_tensors = true) const;
121
+
122
+ // Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_
123
+ // according to when tensors received grads in the backward pass.
124
+ // TODO this function makes broadcast communication call and
125
+ // could be overlapped with next forward() call, thus
126
+ // it could be async. Will make it async when rebuilding buckets for
127
+ // find_unused_parameters = true case, as we could rebuild buckets more than
128
+ // once for find_unused_parameters = true case, where subgraphs are trained
129
+ // and parameter indices order may change more frequently.
130
+ // For find_unused_parameters = false case, buckets are only rebuilt once,
131
+ // the performance cost is negligible. Returns true if the buckets were
132
+ // rebuilt.
133
+ bool rebuild_buckets();
134
+
135
+ void setSparseMetadata(std::map<std::string, at::Tensor>& metadata);
136
+
137
+ // Install futures that should be awaited at end of backwards. Currently these
138
+ // are only used by user-defined custom buffer reduction hooks, but can be
139
+ // generalized to any user-originating futures that need to be awaited.
140
+ void install_futures(c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs);
141
+
142
+ // Returns true if we should rebuild buckets, else false. We only rebuild
143
+ // buckets once after the first iteration and never rebuild them if
144
+ // find_unused_parameters_.
145
+ inline bool should_rebuild_buckets() const {
146
+ return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_;
147
+ }
148
+
149
+ // Pushes all parameters to be rebuilt.
150
+ void push_rebuilt_params_for_all_indices();
151
+
152
+ // Creates and sets ForwardPassWorkHandle given a Work and the
153
+ // corresponding tensor being reduced.
154
+ void set_forward_pass_work_handle(
155
+ c10::intrusive_ptr<c10d::Work> forwardPassWorkHandle,
156
+ bool useStaticWorldSize);
157
+
158
+ // Retrieve on-device tensors used to track locally unused parameters. It is
159
+ // a tensor where index i = 1 if the Variable with that index has been used.
160
+ at::Tensor get_local_used_map_on_device() const;
161
+
162
+ // An function for users to set sample_rate of collecting
163
+ // runtime stats. The time stats will be recorded for the
164
+ // first 10 iterations, after 10 iterations time stats will be
165
+ // recorded once every "sample_rate" training iterations.
166
+ void set_ddp_runtime_logging_sample_rate(int sample_rate);
167
+
168
+ // Specify the training graph is static.
169
+ void set_static_graph();
170
+
171
+ // Delay all reduce to be after all gradients' calculation is complete.
172
+ void delay_all_reduce();
173
+
174
+ void set_mixed_precision_param_dtype(c10::ScalarType dtype);
175
+
176
+ // Weak reference to associated DDP logger. The reference is weak to avoid
177
+ // refcycle between reducer and logger.
178
+ void set_logger(std::weak_ptr<c10d::Logger> logger);
179
+
180
+ // When graph is not explicitly set by user as static and has unused
181
+ // parameters, this will return whether the graph has been static until the
182
+ // current iteration, which means unused params set has not changed.
183
+ bool ddp_graph_static();
184
+
185
+ // Removes autograd hooks registered by the Reducer on the model parameters.
186
+ void remove_autograd_hooks();
187
+
188
+ // Checks whether or not the reducer has finalized the current backward
189
+ // iteration.
190
+ void check_finalized();
191
+
192
+ // Updates the underlying process group used by DDP with the new process
193
+ // group.
194
+ void update_process_group(
195
+ c10::intrusive_ptr<c10d::ProcessGroup> new_process_group);
196
+
197
+ // Resets reducer state.
198
+ void reset_state();
199
+
200
+ protected:
201
+ // Forward declaration.
202
+ struct Bucket;
203
+
204
+ void push_rebuilt_params(const size_t& index);
205
+
206
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
207
+ mutable std::mutex mutex_;
208
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
209
+ const std::vector<at::Tensor> params_;
210
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
211
+ c10::intrusive_ptr<::c10d::ProcessGroup> process_group_;
212
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
213
+ std::vector<bool> expect_sparse_gradients_;
214
+
215
+ std::vector<std::shared_ptr<torch::autograd::Node>>
216
+ grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
217
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
218
+ std::unordered_map<torch::autograd::Node*, size_t> gradAccToVariableMap_;
219
+ std::vector<std::pair<uintptr_t, std::shared_ptr<torch::autograd::Node>>>
220
+ hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
221
+
222
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
223
+ bool expect_autograd_hooks_;
224
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
225
+ bool require_finalize_;
226
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
227
+ size_t next_bucket_;
228
+
229
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
230
+ bool has_marked_unused_parameters_;
231
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
232
+ const bool find_unused_parameters_;
233
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
234
+ const bool gradient_as_bucket_view_;
235
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
236
+ std::vector<size_t> unused_parameters_;
237
+ // Previous iteration's unused params, used for checking if unused parameters
238
+ // change between iterations. Only filled during the first backwards call.
239
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
240
+ std::vector<size_t> prev_iteration_unused_parameters_;
241
+ // Whether graph is static or not. When user does not explicitly set static
242
+ // graph, the only possible dynamism is set of unused parameters changing
243
+ // between iterations which is tracked by this flag.
244
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
245
+ bool ddp_graph_static_{true};
246
+ // Locally used parameter maps indicating if parameters are used locally
247
+ // during the current iteration or no_sync session if no_sync is on.
248
+ // Each map is a one-dim int32 tensor of number of parameters. These tensors
249
+ // are marked in autograd_hook to indicate the corresponding param has been
250
+ // used, and get allreduced in the end of backward step of current iteration
251
+ // or no_sync session for figuring out the globally unused parameters.
252
+ //
253
+ // local_used_map_: CPU tensor for bookkeeping locally used params
254
+ // local_used_map_dev_: dev tensor for reducing globally unused params
255
+ at::Tensor local_used_map_;
256
+ at::Tensor local_used_map_dev_;
257
+ // Indicate that reduction is done and D2H copy is done as well.
258
+ bool local_used_map_reduced_;
259
+
260
+ // Weak pointer to associated DDP logger.
261
+ std::weak_ptr<c10d::Logger> logger_;
262
+ // List of futures installed by Reducer::install_futures that should be
263
+ // awaited at the end of backwards pass.
264
+ c10::optional<c10::List<c10::intrusive_ptr<c10::ivalue::Future>>>
265
+ installed_futures_{c10::nullopt};
266
+ // Mixed precision parameter dtype for bucket type checking.
267
+ c10::optional<c10::ScalarType> mixed_precision_param_dtype_{c10::nullopt};
268
+
269
+ // Work handle for allreduce on local_used_map_
270
+ c10::intrusive_ptr<c10d::Work> local_used_work_;
271
+
272
+ void mark_variable_ready_dense(size_t variable_index);
273
+
274
+ void mark_variable_ready_sparse(size_t variable_index);
275
+
276
+ void mark_variable_ready(size_t variable_index);
277
+
278
+ void mark_bucket_ready(size_t bucket_index);
279
+
280
+ void finalize_bucket_dense(Bucket& bucket);
281
+
282
+ void finalize_backward();
283
+
284
+ // Returns list of model parameters corresponding to the given bucket.
285
+ // bucket_index is a key to cache after buckets are rebuilt, after which this
286
+ // mapping never changes.
287
+ std::vector<at::Tensor> get_variables_for_bucket(
288
+ size_t bucket_index,
289
+ const Bucket& bucket) const;
290
+
291
+ // Asserts that the reduction for the previous iteration has finished before
292
+ // rebuilding buckets or kicking off the next one.
293
+ void ensure_prior_reduction_finished();
294
+
295
+ // Broadcast rebuilt buckets from rank 0 to other ranks before initializing
296
+ // the buckets
297
+ void sync_bucket_indices(std::vector<std::vector<size_t>>& bucket_indices);
298
+
299
+ // We'd like to use DistAutogradContext::GradCallback here but dist autograd
300
+ // doesn't exist under Windows. So we just directly use the concrete type but
301
+ // to preserve and enforce our original intent we do a static assert when dist
302
+ // autograd is available.
303
+ using GradCallback = std::function<bool(at::Tensor&)>;
304
+ #ifndef _WIN32
305
+ static_assert(
306
+ std::is_same<
307
+ GradCallback,
308
+ torch::distributed::autograd::DistAutogradContext::GradCallback>::
309
+ value,
310
+ "");
311
+ #endif
312
+ void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
313
+
314
+ // This function is called inside `initialize_buckets()`. It initializes both
315
+ // `bucket_views_in` and `bucket_views_out` with views for each variable's
316
+ // gradient into the bucket's flattened `gradients` tensor. Views serve as
317
+ // entry points to `copy_()` each grad's data in/out of the flattened
318
+ // `gradients` tensor.
319
+ void initialize_bucket_views(Bucket& bucket);
320
+
321
+ // This function is called inside `finalize_backward`, it happens only if
322
+ // DDP communication hook was registered to recreate just bucket_views_out
323
+ // with the result of `future_work`.
324
+ void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor);
325
+
326
+ // If gradient_as_bucket_view_ is false, after allreduce buckets,
327
+ // copy bucket results back to grads.
328
+ void copy_bucket_to_grad(
329
+ at::Tensor& variable,
330
+ Reducer::Bucket& bucket,
331
+ size_t intra_bucket_index,
332
+ bool global_unused);
333
+ // Check layout of grad and bucket_view before copying the grad to bucket.
334
+ void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view);
335
+
336
+ // A bucket contains [1..N] gradients to be reduced, where the gradients
337
+ // have the same dtype and device.
338
+ // Coalescing gradients together before reducing can result in lower overhead
339
+ // and/or faster time to completion. Coalescing requires the constituent
340
+ // gradients to have the same dtype and device, and the resulting flattened
341
+ // tensor uses that common dtype and device. The flattened tensor is filled
342
+ // as the corresponding gradients are computed (triggered by autograd hooks),
343
+ // and the buckets are reduced in a predetermined order consistent across
344
+ // processes.
345
+ struct Bucket {
346
+ // Gradients of the bucket flattened into a 1-dimensional tensor
347
+ at::Tensor gradients;
348
+
349
+ // Views into the `gradients` tensor for each individual gradient
350
+ // Each view is created with layout (size and stride) matching the
351
+ // gradient's expected layout (see the "Gradient Layout Contract" in
352
+ // torch/csrc/autograd/functions/accumulate_grad.h).
353
+ // `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])`
354
+ // provide convenient ways to copy gradient data in/out of `gradients`,
355
+ // respectively.
356
+ // We keep both `bucket_views_in` and `bucket_views_out` because
357
+ // registering a DDP communication hook may re-initialize
358
+ // `bucket_views_out` with the value of the hook's `future_work` but we
359
+ // still need separate views into the bucket's original flattened gradient
360
+ // to copy in gradient data.
361
+ std::vector<at::Tensor> bucket_views_in;
362
+ std::vector<at::Tensor> bucket_views_out;
363
+
364
+ // Variables whose gradients are held in this bucket
365
+ // We use refcounted tensors here so that we can easily unflatten the
366
+ // bucket's flattened `gradients` tensor into the participating variables
367
+ // after reduction has completed.
368
+ std::vector<at::Tensor> variables;
369
+
370
+ // Per-variable offset/length into the flattened `gradients` tensor and
371
+ // the corresponding `GradBucket` instance for communication hooks
372
+ std::vector<size_t> offsets;
373
+ std::vector<size_t> lengths;
374
+
375
+ // Per-variable sizes slicing into the bucket's `gradients` tensor
376
+ std::vector<c10::IntArrayRef> sizes_vec;
377
+
378
+ // Number of gradients left to be computed before the bucket is ready to
379
+ // be reduced
380
+ size_t pending;
381
+
382
+ // Global indices of participating variables in the bucket
383
+ std::vector<size_t> variable_indices;
384
+
385
+ // Future work handle for DDP communication hook
386
+ // If no hook is registered, a temporary vanilla allreduce hook is used.
387
+ c10::intrusive_ptr<at::ivalue::Future> future_work;
388
+
389
+ // If this bucket should expect a single sparse gradient
390
+ // If `true`, then this implies that `bucket.variables.size() == 1`.
391
+ bool expect_sparse_gradient = false;
392
+
393
+ // Sparse indices tensor
394
+ c10::optional<at::Tensor> sparse_tensor_indices = c10::nullopt;
395
+
396
+ // TODO(@pietern)
397
+ // Memory copies from gradient tensors into the bucket are potentially
398
+ // done on different CUDA streams. We record an event for every copy
399
+ // so that we can synchronize with them prior to kicking off the reduction.
400
+ // std::vector<at::cuda::CUDAEvent> events;
401
+ };
402
+
403
+ std::vector<Bucket> buckets_;
404
+
405
+ // A variable locator locates a particular variable in the reducer's buckets
406
+ struct VariableLocator {
407
+ // Index of the bucket containing the variable in the `buckets_` vector
408
+ size_t bucket_index;
409
+ // Index of the variable in the bucket, which may be used consistently
410
+ // across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`,
411
+ // `lengths`, `sizes_vec`, and `variable_indices` in `Bucket`
412
+ size_t intra_bucket_index;
413
+
414
+ VariableLocator() = default;
415
+
416
+ VariableLocator(size_t bucket_index_, size_t intra_bucket_index_)
417
+ : bucket_index(bucket_index_),
418
+ intra_bucket_index(intra_bucket_index_) {}
419
+ };
420
+
421
+ // Map the index of a variable to its location in the bucket structure.
422
+ std::vector<VariableLocator> variable_locators_;
423
+
424
+ // track the number of iterations to synchronize grads in training so far.
425
+ long num_iterations_;
426
+ // track distinct iteration of backward call. This is distinct from
427
+ // num_iterations_, for example in the case of multiple forward before
428
+ // backward.
429
+ long num_bwd_calls_;
430
+ // whether the first autograd hook for a distinct backward pass has been
431
+ // called.
432
+ bool first_autograd_hook_called_;
433
+ // track the number of buckets that have been ready for
434
+ // communication calls like allReduce or communication hooks.
435
+ int num_buckets_ready_;
436
+
437
+ // Timing information.
438
+ int64_t backward_compute_start_time_ = -1;
439
+ std::unique_ptr<Timer> timer_;
440
+
441
+ // We collect the relative timestamp of every gradient being ready
442
+ // when executing autograd. This can be used to derive a timeline of
443
+ // the point in time buckets were ready, or ideal bucket assignment/ordering.
444
+ std::vector<int64_t> backward_stats_;
445
+
446
+ bool should_collect_runtime_stats();
447
+ void record_forward_compute_start_time();
448
+ void record_backward_compute_start_time();
449
+ void record_backward_compute_end_time();
450
+ void record_backward_comm_start_time();
451
+ void record_backward_comm_end_time();
452
+
453
+ int get_ddp_runtime_logging_sample_rate();
454
+ int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate;
455
+
456
+ bool is_multi_device_module_ = false;
457
+
458
+ // Following variables are to help build dynamic bucket order
459
+ bool has_rebuilt_bucket_;
460
+ std::vector<at::Tensor> rebuilt_params_;
461
+ std::vector<int64_t> rebuilt_param_indices_;
462
+ const int64_t bucket_bytes_cap_;
463
+
464
+ #ifndef _WIN32
465
+ struct RpcContext {
466
+ using ContextPtr = torch::distributed::autograd::ContextPtr;
467
+ // The shared_ptr is to hold the context instance.
468
+ ContextPtr context_ptr_holder;
469
+ std::atomic<ContextPtr::element_type*> context_ptr{nullptr};
470
+
471
+ void set(ContextPtr&& new_context_ptr);
472
+ };
473
+ RpcContext rpc_context_;
474
+ #endif
475
+
476
+ // A struct containing work handle and tensor for allreduce scheduled in
477
+ // forward pass, if applicable.
478
+ struct ForwardPassAllreduceWork {
479
+ c10::intrusive_ptr<c10d::Work> workHandle;
480
+ at::Tensor resultTensor;
481
+ // whether we should divide by the initial world_size or the no. of
482
+ // remaining DDP ranks.
483
+ bool useStaticWorldSize;
484
+ };
485
+
486
+ // Handle for the currently scheduled allreduce in the forward pass, if
487
+ // applicable.
488
+ ForwardPassAllreduceWork forwardPassWorkHandle_;
489
+
490
+ // Division factor for reduction of gradients.
491
+ // Equal to the process group size, with an exception of handling uneven
492
+ // input.
493
+ int div_factor_;
494
+
495
+ bool static_graph_;
496
+
497
+ // Key: size_t (index), Value: the number of times that a variable's
498
+ // autograd_hook() should be triggered before marking this variable's grad as
499
+ // ready for communication. Map will not change after 1st iteration.
500
+ std::unordered_map<size_t, int> numGradHooksTriggeredMap_;
501
+ // Key: size_t (index), Value: the number of times that a variable's
502
+ // autograd_hook() are left to be triggered before marking this variable's
503
+ // grad as ready for communication. Map will change after 1st iteration to
504
+ // track a grad is ready for communication or not.
505
+ std::unordered_map<size_t, int> numGradHooksTriggeredMapPerIteration_;
506
+
507
+ private:
508
+ // reset counting for buckets before backward starts
509
+ void reset_bucket_counting();
510
+ // search unused parameters beore backward starts
511
+ void search_unused_parameters(
512
+ const std::vector<torch::autograd::Variable>& outputs);
513
+ void set_divide_factor();
514
+ // kick off all reduce for the ready bucket
515
+ void all_reduce_bucket(Bucket& bucket);
516
+ // kick off all reduce to local used map, it can help find global unused
517
+ // parameters
518
+ void all_reduce_local_used_map();
519
+ // initialize locally used parameter maps
520
+ void initialize_local_used_map();
521
+ // get current cuda stream
522
+ const c10::Stream get_current_stream();
523
+ bool dynamic_graph_find_unused();
524
+ bool static_graph_first_iteration();
525
+ bool static_graph_after_first_iteration();
526
+
527
+ // comm_hook_ is used to access the DDP communication hook if registered.
528
+ std::unique_ptr<CommHookInterface> comm_hook_;
529
+
530
+ // Sparse metadata contains the indices that will be used
531
+ // when calling into sparse allreduce.
532
+ // This is only used in the sparse allreduce collective calls
533
+ std::unique_ptr<std::map<std::string, at::Tensor>> sparse_metadata_;
534
+
535
+ // Debug level setting. It is parsed once when Reducer is constructed, and
536
+ // remains the same across a single invocation of DDP training.
537
+ DebugLevel ddp_debug_level_;
538
+ // Mapping of variable index to fully qualified name of model to notify users
539
+ // about errors when certain parameters do not get gradient.
540
+ std::unordered_map<size_t, std::string> param_names_;
541
+ // Variable indices stored sequentially in order of when the gradient is ready
542
+ // for the current backwards pass.
543
+ std::vector<int> grad_ready_order_indices_;
544
+ // Bytes capacity of first bucket, can be configured by user
545
+ int64_t first_bucket_bytes_cap_;
546
+ // Per iteration set of parameter indices that have been marked ready.
547
+ std::unordered_set<size_t> perIterationReadyParams_;
548
+ // Retrieves parameter names that have not been marked as ready as part of
549
+ // previous iteration.
550
+ std::vector<std::string> getUnmarkedParamsForIteration();
551
+ // Retrieves parameter indices that have not been marked as ready as part of
552
+ // previous iteration.
553
+ std::vector<size_t> getUnmarkedParamIndicesForIteration();
554
+ // Raises appropriate error if mark_variable_ready is called on the same
555
+ // variable twice, which is unexpected.
556
+ void checkAndRaiseMarkedTwiceError(size_t curVariableIndex);
557
+ // Retrieves parameter corresponding to the given VariableIndex.
558
+ at::Tensor& get_param_from_index(size_t index);
559
+
560
+ // Cached bucket index to model parameter mapping. Populated after buckets
561
+ // are rebuilt after which this mapping is static.
562
+ mutable std::unordered_map<size_t, std::vector<at::Tensor>>
563
+ cached_variables_for_bucket_;
564
+
565
+ bool optim_in_backward_{false};
566
+ friend class Logger;
567
+ };
568
+
569
+ // This is equivalent to take_tensors but returns indices into the
570
+ // tensor list argument for bucket assignment. Also, it is aware
571
+ // of device placement and will not allow buckets to span devices.
572
+ // The index of tensors[i] assigned to bucket is tensor_indices[i],
573
+ // when tensor_indices is empty, the index of tensors[i] assigned to
574
+ // bucket is i.
575
+ TORCH_API std::tuple<std::vector<std::vector<size_t>>, std::vector<size_t>>
576
+ compute_bucket_assignment_by_size(
577
+ const std::vector<at::Tensor>& tensors,
578
+ const std::vector<size_t>& bucket_size,
579
+ const std::vector<bool>& expect_sparse_gradient = {},
580
+ const std::vector<int64_t>& tensor_indices = {},
581
+ const c10::optional<std::weak_ptr<c10d::Logger>>& logger = {});
582
+
583
+ // Verify models across all processes are the same as model on rank 0 with
584
+ // respect to no. of params and matching dtype/size/layout.
585
+ TORCH_API void verify_params_across_processes(
586
+ const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
587
+ const std::vector<at::Tensor>& params,
588
+ const c10::optional<std::weak_ptr<c10d::Logger>>& logger);
589
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/ApproximateClock.h>
3
+ #include <torch/csrc/autograd/profiler.h>
4
+
5
+ namespace c10d {
6
+ constexpr int kUnsetTime = -1;
7
+
8
+ inline int64_t current_time_in_nanos() {
9
+ return c10::getTime();
10
+ }
11
+
12
+ class TORCH_API Timer {
13
+ private:
14
+ // The timestamp of forward call start time in each iteration.
15
+ int64_t forward_start_time = kUnsetTime;
16
+ // The timestamp of backward computation start and end time in each
17
+ // iteration.
18
+ int64_t backward_compute_start_time = kUnsetTime;
19
+ int64_t backward_compute_end_time = kUnsetTime;
20
+ // The timestamp of first communication call start time in each iteration.
21
+ int64_t backward_comm_start_time = kUnsetTime;
22
+ // The timestamp of last communication call end time in each iteration.
23
+ int64_t backward_comm_end_time = kUnsetTime;
24
+
25
+ public:
26
+ enum class Event {
27
+ kForwardStart,
28
+ kBackwardComputeStart,
29
+ kBackwardComputeEnd,
30
+ kBackwardCommStart,
31
+ kBackwardCommEnd,
32
+ };
33
+
34
+ // Record the current event, i.e., mark it as having occurred now. Default
35
+ // CPU implementation.
36
+ virtual void record(Event event) {
37
+ getTimeRef(event) = current_time_in_nanos();
38
+ }
39
+
40
+ // Return the difference between when two events occurred, in nanoseconds.
41
+ // Or nullopt if one of them hasn't been recorded.
42
+ virtual c10::optional<int64_t> measureDifference(Event start, Event end) = 0;
43
+
44
+ virtual ~Timer() = default;
45
+
46
+ // Return host-side timestamp, or nullopt if it has not yet been recorded.
47
+ c10::optional<int64_t> getTimestamp(Event event) {
48
+ auto time = getTimeRef(event);
49
+ if (time == kUnsetTime) {
50
+ return c10::nullopt;
51
+ } else {
52
+ return time;
53
+ }
54
+ }
55
+
56
+ // Return host-side time member variable corresponding to the given event.
57
+ int64_t& getTimeRef(Event event) {
58
+ switch (event) {
59
+ case Event::kForwardStart:
60
+ return forward_start_time;
61
+ case Event::kBackwardComputeStart:
62
+ return backward_compute_start_time;
63
+ case Event::kBackwardComputeEnd:
64
+ return backward_compute_end_time;
65
+ case Event::kBackwardCommStart:
66
+ return backward_comm_start_time;
67
+ case Event::kBackwardCommEnd:
68
+ return backward_comm_end_time;
69
+ default:
70
+ TORCH_INTERNAL_ASSERT(false);
71
+ }
72
+ }
73
+ };
74
+
75
+ TORCH_DECLARE_TYPED_REGISTRY(
76
+ TimerRegistry,
77
+ c10::DeviceType,
78
+ Timer,
79
+ std::unique_ptr,
80
+ c10::Device);
81
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/irange.h>
6
+ #include <vector>
7
+
8
+ namespace c10d {
9
+ const int kUnsetSeqNum = 0;
10
+
11
+ namespace {
12
+ constexpr int kByteOffset = 8;
13
+ }
14
+
15
+ // Converts from int to char vec to write in store
16
+ template <typename T>
17
+ inline std::vector<T> toVec(uint64_t num, int numBytes) {
18
+ std::vector<T> values;
19
+ // Read off bytes from right to left, pushing them into
20
+ // char array.
21
+ for (const auto i : c10::irange(numBytes)) {
22
+ uint8_t x = (num >> (kByteOffset * i)) & 0xff;
23
+ values.push_back(static_cast<T>(x));
24
+ }
25
+ return values;
26
+ }
27
+
28
+ // Converts from char vec (such as from store read) to int.
29
+ template <typename T>
30
+ inline uint64_t fromVec(const std::vector<T>& values) {
31
+ uint64_t num = 0;
32
+ // Set each byte at the correct location on num
33
+ for (const auto i : c10::irange(values.size())) {
34
+ uint8_t x = static_cast<uint8_t>(values[i]);
35
+ num |= (static_cast<int64_t>(x) << (kByteOffset * i));
36
+ }
37
+ return num;
38
+ }
39
+
40
+ class TORCH_API SequenceNum {
41
+ public:
42
+ SequenceNum();
43
+ explicit SequenceNum(const uint64_t num);
44
+ // Retrieve num_. Will throw if not set.
45
+ uint64_t get() const;
46
+ // Increment num_. Will throw if not set.
47
+ void increment();
48
+ // Increment num_ and return the old value. Will throw if not set.
49
+ uint64_t getAndIncrement();
50
+ // Sets num_
51
+ void set(const uint64_t num);
52
+ // Returns true if this SequenceNum is properly initialized with a value, else
53
+ // false.
54
+ bool isSet() const;
55
+
56
+ SequenceNum& operator=(const SequenceNum& other);
57
+
58
+ SequenceNum(const SequenceNum& other);
59
+
60
+ private:
61
+ c10::optional<uint64_t> num_;
62
+ mutable std::mutex lock_;
63
+ };
64
+
65
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <chrono>
10
+ #include <cstdint>
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <torch/csrc/distributed/c10d/exception.h>
17
+
18
+ namespace c10d {
19
+ namespace detail {
20
+
21
+ class SocketOptions {
22
+ public:
23
+ SocketOptions& prefer_ipv6(bool value) noexcept {
24
+ prefer_ipv6_ = value;
25
+
26
+ return *this;
27
+ }
28
+
29
+ bool prefer_ipv6() const noexcept {
30
+ return prefer_ipv6_;
31
+ }
32
+
33
+ SocketOptions& connect_timeout(std::chrono::seconds value) noexcept {
34
+ connect_timeout_ = value;
35
+
36
+ return *this;
37
+ }
38
+
39
+ std::chrono::seconds connect_timeout() const noexcept {
40
+ return connect_timeout_;
41
+ }
42
+
43
+ private:
44
+ bool prefer_ipv6_ = true;
45
+ std::chrono::seconds connect_timeout_{30};
46
+ };
47
+
48
+ class SocketImpl;
49
+
50
+ class Socket {
51
+ public:
52
+ // This function initializes the underlying socket library and must be called
53
+ // before any other socket function.
54
+ static void initialize();
55
+
56
+ static Socket listen(std::uint16_t port, const SocketOptions& opts = {});
57
+
58
+ static Socket listenFromFd(int fd, std::uint16_t expected_port);
59
+
60
+ static Socket connect(
61
+ const std::string& host,
62
+ std::uint16_t port,
63
+ const SocketOptions& opts = {});
64
+
65
+ Socket() noexcept = default;
66
+
67
+ Socket(const Socket& other) = delete;
68
+
69
+ Socket& operator=(const Socket& other) = delete;
70
+
71
+ Socket(Socket&& other) noexcept;
72
+
73
+ Socket& operator=(Socket&& other) noexcept;
74
+
75
+ ~Socket();
76
+
77
+ Socket accept() const;
78
+
79
+ int handle() const noexcept;
80
+
81
+ std::uint16_t port() const;
82
+
83
+ bool waitForInput(std::chrono::milliseconds timeout);
84
+
85
+ private:
86
+ explicit Socket(std::unique_ptr<SocketImpl>&& impl) noexcept;
87
+
88
+ std::unique_ptr<SocketImpl> impl_;
89
+ };
90
+
91
+ } // namespace detail
92
+
93
+ } // namespace c10d
venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/PrefixStore.hpp>
4
+ #include <torch/csrc/distributed/rpc/utils.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace rpc {
9
+
10
+ // All RPC peers should call into this function at the same time. Each peer
11
+ // provides its own id and name, and this function uses the given Store to
12
+ // gather global name-to-id mapping on all peers.
13
+ TORCH_API std::unordered_map<std::string, worker_id_t> collectNames(
14
+ ::c10d::PrefixStore store,
15
+ const worker_id_t selfId,
16
+ const std::string& selfName,
17
+ const int worldSize);
18
+
19
+ // Ranks in dynamic RPC groups will initially call into this to establish the
20
+ // name-to-id mapping for the current peers in the group. The current rank will
21
+ // put its own worker info in the store and discover all the ranks that came
22
+ // before it. NOTE: This needs to be called with the Dynamic RPC group
23
+ // membership management token held.
24
+ TORCH_API std::unordered_map<std::string, worker_id_t> collectCurrentNames(
25
+ ::c10d::PrefixStore store,
26
+ const worker_id_t selfId,
27
+ const std::string& selfName);
28
+
29
+ // Remove name frmo Store, used in dynamic RPC groups.
30
+ // NOTE: This needs to be called with the Dynamic RPC group
31
+ // membership management token held.
32
+ TORCH_API void removeCurrentName(
33
+ ::c10d::PrefixStore store,
34
+ const worker_id_t selfId,
35
+ const std::string& selfName);
36
+
37
+ // This performs a synchronization of all call counts by using store.
38
+ // All RPC peers wait for others to join to exit at the same time.
39
+ TORCH_API int syncCallCount(
40
+ ::c10d::PrefixStore store,
41
+ const int worldSize,
42
+ int activeCalls = 0);
43
+
44
+ } // namespace rpc
45
+ } // namespace distributed
46
+ } // namespace torch