applied-ai-018 commited on
Commit
c3cc40c
·
verified ·
1 Parent(s): ea5af34

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h +142 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h +18 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h +19 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h +10 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h +52 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h +10 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h +26 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h +218 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h +7 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h +13 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h +167 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h +174 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h +49 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h +37 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h +25 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h +29 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h +23 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h +42 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h +24 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h +98 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h +62 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h +59 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h +39 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h +21 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp +383 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp +63 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp +32 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp +14 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp +61 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp +64 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +918 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp +113 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp +353 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp +140 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp +73 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp +161 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp +77 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h +543 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp +58 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp +27 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp +729 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp +161 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h +13 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp +140 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h +23 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp +52 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp +104 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h +51 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp +589 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp +65 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
5
+ #include <c10/cuda/CUDAMacros.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+
8
+ #include <c10/cuda/CUDACachingAllocator.h>
9
+
10
+ #include <array>
11
+ #include <mutex>
12
+
13
+ namespace torch::cuda::CUDAPluggableAllocator {
14
+
15
+ #if defined(TORCH_HIP_VERSION)
16
+ using streamType = c10::hip::HIPStream;
17
+ #else
18
+ using streamType = c10::cuda::CUDAStream;
19
+ #endif
20
+
21
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::CUDAAllocator>
22
+ getCurrentAllocator();
23
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::CUDAAllocator>
24
+ createCustomAllocator(
25
+ std::function<void*(size_t, int, cudaStream_t)> alloc_fn,
26
+ std::function<void(void*, size_t, int, cudaStream_t)> free_fn);
27
+ void changeCurrentAllocator(
28
+ const std::shared_ptr<c10::cuda::CUDACachingAllocator::CUDAAllocator>&
29
+ allocator);
30
+
31
+ struct _AllocationMetadata {
32
+ _AllocationMetadata();
33
+ _AllocationMetadata(size_t size, int device_idx, cudaStream_t stream);
34
+ size_t size;
35
+ int device_idx;
36
+ cudaStream_t stream;
37
+ };
38
+
39
+ struct CUDAPluggableAllocator
40
+ : public c10::cuda::CUDACachingAllocator::CUDAAllocator {
41
+ CUDAPluggableAllocator(
42
+ std::function<void*(size_t, int, cudaStream_t)> alloc_fn,
43
+ std::function<void(void*, size_t, int, cudaStream_t)> free_fn);
44
+
45
+ CUDAPluggableAllocator(CUDAPluggableAllocator& other);
46
+
47
+ void set_init_fn(std::function<void(int)> init_fn);
48
+
49
+ void set_reset_fn(std::function<void()> reset_fn);
50
+
51
+ void set_memory_fraction_fn(
52
+ std::function<void(double, int)> memory_fraction_fn);
53
+
54
+ void set_base_alloc_fn(std::function<void*(void*, size_t*)> base_alloc_fn);
55
+
56
+ void set_record_stream_fn(
57
+ std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn);
58
+
59
+ void set_begin_allocate_stream_to_pool(
60
+ std::function<void(int, cudaStream_t, c10::cuda::MempoolId_t)>
61
+ capture_begin_fn);
62
+
63
+ void set_end_allocate_stream_to_pool_fn(
64
+ std::function<void(int, cudaStream_t)> capture_about_to_end_fn);
65
+
66
+ void set_release_pool(
67
+ std::function<void(int, c10::cuda::MempoolId_t)> capture_destroy_fn);
68
+
69
+ void* malloc(size_t size, int device, cudaStream_t stream);
70
+
71
+ c10::DataPtr allocate(size_t size) const override;
72
+ c10::DeleterFnPtr raw_deleter() const override;
73
+
74
+ void* raw_alloc(size_t nbytes) override;
75
+ void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override;
76
+ void raw_delete(void* ptr) override;
77
+ void init(int device_count) override;
78
+ bool initialized() override;
79
+ void setMemoryFraction(double fraction, int device) override;
80
+ void emptyCache() override;
81
+ void cacheInfo(int dev_id, size_t* largestBlock) override;
82
+ void* getBaseAllocation(void* ptr, size_t* size) override;
83
+
84
+ void recordStream(const c10::DataPtr&, streamType stream) override;
85
+
86
+ c10::cuda::CUDACachingAllocator::DeviceStats getDeviceStats(
87
+ int device) override;
88
+ void resetAccumulatedStats(int device) override;
89
+ void resetPeakStats(int device) override;
90
+ c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override;
91
+ void beginAllocateStreamToPool(
92
+ int device,
93
+ cudaStream_t stream,
94
+ c10::cuda::MempoolId_t mempool_id) override;
95
+ void endAllocateStreamToPool(int device, cudaStream_t stream) override;
96
+ void releasePool(int device, c10::cuda::MempoolId_t mempool_id) override;
97
+ std::shared_ptr<void> getIpcDevPtr(std::string handle) override;
98
+ void recordHistory(
99
+ bool enabled,
100
+ c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder,
101
+ size_t alloc_trace_max_entries,
102
+ c10::cuda::CUDACachingAllocator::RecordContext when) override;
103
+ void attachOutOfMemoryObserver(
104
+ c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override;
105
+ void attachAllocatorTraceTracker(
106
+ c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override;
107
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState>
108
+ getCheckpointState(int device, at::cuda::MempoolId_t id) override;
109
+ c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState(
110
+ int device,
111
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState> pps)
112
+ override;
113
+ void enablePeerAccess(int dev, int dev_to_access) override;
114
+ cudaError_t memcpyAsync(
115
+ void* dst,
116
+ int dstDevice,
117
+ const void* src,
118
+ int srcDevice,
119
+ size_t count,
120
+ cudaStream_t stream,
121
+ bool p2p_enabled) override;
122
+ std::string name() override;
123
+
124
+ protected:
125
+ std::function<void*(size_t, int, cudaStream_t)> alloc_fn_;
126
+ std::function<void(void*, size_t, int, cudaStream_t)> free_fn_;
127
+ std::function<void(int)> init_fn_;
128
+ std::function<void()> reset_fn_;
129
+ std::function<void(double, int)> memory_fraction_fn_;
130
+ std::function<void*(void*, size_t*)> base_alloc_fn_;
131
+ std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn_;
132
+ std::function<void(int, cudaStream_t, c10::cuda::MempoolId_t)>
133
+ begin_allocate_stream_to_pool_fn_;
134
+ std::function<void(int, cudaStream_t)> end_allocate_stream_to_pool_fn_;
135
+ std::function<void(int, c10::cuda::MempoolId_t)> relase_pool_fn_;
136
+ std::mutex allocator_mutex_;
137
+ // We do the bookeeping here in order to simplify custom allocators
138
+ std::unordered_map<void*, _AllocationMetadata> allocation_metadata_;
139
+
140
+ bool initialized_ = false;
141
+ };
142
+ } // namespace torch::cuda::CUDAPluggableAllocator
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_EVENT_INC
2
+ #define THCP_EVENT_INC
3
+
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ struct THCPEvent {
8
+ PyObject_HEAD at::cuda::CUDAEvent cuda_event;
9
+ };
10
+ extern PyObject* THCPEventClass;
11
+
12
+ void THCPEvent_init(PyObject* module);
13
+
14
+ inline bool THCPEvent_Check(PyObject* obj) {
15
+ return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass);
16
+ }
17
+
18
+ #endif // THCP_EVENT_INC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_STREAM_INC
2
+ #define THCP_STREAM_INC
3
+
4
+ #include <c10/cuda/CUDAStream.h>
5
+ #include <torch/csrc/Stream.h>
6
+ #include <torch/csrc/python_headers.h>
7
+
8
+ struct THCPStream : THPStream {
9
+ at::cuda::CUDAStream cuda_stream;
10
+ };
11
+ extern PyObject* THCPStreamClass;
12
+
13
+ void THCPStream_init(PyObject* module);
14
+
15
+ inline bool THCPStream_Check(PyObject* obj) {
16
+ return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass);
17
+ }
18
+
19
+ #endif // THCP_STREAM_INC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_H
2
+ #define THCP_H
3
+
4
+ #include <torch/csrc/THP.h>
5
+ #include <torch/csrc/cuda/Event.h>
6
+ #include <torch/csrc/cuda/Module.h>
7
+ #include <torch/csrc/cuda/Stream.h>
8
+ #include <torch/csrc/python_headers.h>
9
+
10
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/cuda/ATenCUDAGeneral.h>
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <torch/csrc/Export.h>
8
+
9
+ #include <cstddef>
10
+ #include <vector>
11
+
12
+ namespace torch::cuda {
13
+
14
+ using tensor_list2d = std::vector<std::vector<at::Tensor>>;
15
+
16
+ TORCH_CUDA_CU_API std::vector<at::Tensor>& broadcast_out(
17
+ const at::Tensor& tensor,
18
+ std::vector<at::Tensor>& out_tensors);
19
+ TORCH_CUDA_CU_API std::vector<at::Tensor> broadcast(
20
+ const at::Tensor& tensor,
21
+ at::IntArrayRef devices);
22
+ TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced(
23
+ at::TensorList tensors,
24
+ at::IntArrayRef devices,
25
+ size_t buffer_size);
26
+
27
+ TORCH_CUDA_CU_API std::vector<at::Tensor>& scatter_out(
28
+ const at::Tensor& tensor,
29
+ std::vector<at::Tensor>& out_tensors,
30
+ int64_t dim = 0,
31
+ const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
32
+ streams = c10::nullopt);
33
+
34
+ TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
35
+ const at::Tensor& tensor,
36
+ at::IntArrayRef devices,
37
+ const c10::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
38
+ int64_t dim = 0,
39
+ const c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
40
+ streams = c10::nullopt);
41
+
42
+ TORCH_CUDA_CU_API at::Tensor& gather_out(
43
+ at::TensorList tensors,
44
+ at::Tensor& out_tensor,
45
+ int64_t dim);
46
+
47
+ TORCH_CUDA_CU_API at::Tensor gather(
48
+ at::TensorList tensors,
49
+ int64_t dim,
50
+ c10::optional<int32_t> destination_index);
51
+
52
+ } // namespace torch::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <bitset>
4
+
5
+ namespace torch {
6
+
7
+ static constexpr size_t MAX_CUDA_DEVICES = 64;
8
+ using device_set = std::bitset<MAX_CUDA_DEVICES>;
9
+
10
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <string>
6
+
7
+ namespace torch::cuda {
8
+
9
+ // C++-only versions of these, for python use
10
+ // those defined in cuda/Module.cpp which also record python state.
11
+ TORCH_CUDA_CU_API void _record_memory_history(
12
+ bool enabled,
13
+ bool record_context = true,
14
+ int64_t trace_alloc_max_entries = 1,
15
+ bool trace_alloc_record_context = false,
16
+ bool record_cpp_context = false);
17
+
18
+ TORCH_CUDA_CU_API void _record_memory_history(
19
+ c10::optional<std::string> enabled = "all",
20
+ c10::optional<std::string> context = "all",
21
+ std::string stacks = "all",
22
+ size_t max_entries = UINT64_MAX);
23
+
24
+ TORCH_CUDA_CU_API std::string _memory_snapshot_pickled();
25
+
26
+ } // namespace torch::cuda
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <c10/util/Optional.h>
6
+
7
+ #include <cstddef>
8
+ #include <vector>
9
+
10
+ // NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for
11
+ // HIP 3.1+
12
+ #if defined(__CUDA_BF16_TYPES_EXIST__)
13
+ #define HAS_NCCL_BF16_DATATYPE \
14
+ ((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10))
15
+ #elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301)
16
+ #define HAS_NCCL_BF16_DATATYPE 1
17
+ #else
18
+ #define HAS_NCCL_BF16_DATATYPE 0
19
+ #endif
20
+
21
+ namespace torch::cuda::nccl {
22
+
23
+ /* The following are copied from <nccl.h> and redefined in torch::cuda::nccl
24
+ * namespace */
25
+ /* pytorch should only use the following definition within pytorch scope */
26
+
27
+ /* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm
28
+ * in nccl.cpp */
29
+ typedef void* ncclComm_t;
30
+
31
+ /** redefine nccl unique ID in torch scope. this should be identical to native
32
+ * nccl impp. */
33
+ #define NCCL_UNIQUE_ID_BYTES 128
34
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
35
+ typedef struct {
36
+ char internal[NCCL_UNIQUE_ID_BYTES];
37
+ } ncclUniqueId;
38
+
39
+ /* Error type */
40
+ enum class ncclResult {
41
+ Success = 0,
42
+ UnhandledCudaError = 1,
43
+ SystemError = 2,
44
+ InternalError = 3,
45
+ InvalidArgument = 4,
46
+ InvalidUsage = 5,
47
+ NumResults = 6,
48
+ InProgress = 7
49
+ };
50
+
51
+ /* Reduction operation selector */
52
+ enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 };
53
+
54
+ /* Data types */
55
+ enum class ncclDataType {
56
+ Int8 = 0,
57
+ Char = 0,
58
+ Uint8 = 1,
59
+ Int32 = 2,
60
+ Int = 2,
61
+ Uint32 = 3,
62
+ Int64 = 4,
63
+ Uint64 = 5,
64
+ Float16 = 6,
65
+ Half = 6,
66
+ Float32 = 7,
67
+ Float = 7,
68
+ Float64 = 8,
69
+ Double = 8,
70
+ Bfloat16 = 9,
71
+ NumTypes = 10
72
+ };
73
+
74
+ // RAII helper class to manage NCCL group API and CUDA free mutex.
75
+ // The destructor is allowed to throw since this helper class only
76
+ // manages group and lock lifetimes.
77
+ struct AutoNcclGroup {
78
+ AutoNcclGroup();
79
+ AutoNcclGroup(std::vector<ncclComm_t>& comms, bool comm_nonblocking);
80
+ ~AutoNcclGroup() noexcept(false);
81
+ std::vector<ncclComm_t> comms_;
82
+ bool comm_nonblocking_;
83
+ };
84
+
85
+ // NOTE: this is exposed only so that python_nccl.cpp can some of these helpers.
86
+ // Don't use them outside of these files.
87
+ namespace detail {
88
+
89
+ TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status);
90
+
91
+ static inline void NCCL_CHECK(ncclResult status) {
92
+ if (status != ncclResult::Success) {
93
+ throw_nccl_error(status);
94
+ }
95
+ }
96
+
97
+ TORCH_CUDA_CPP_API at::ArrayRef<ncclComm_t> get_communicators(
98
+ at::TensorList inputs);
99
+ TORCH_CUDA_CPP_API void check_inputs(
100
+ at::TensorList inputs,
101
+ at::TensorList outputs,
102
+ int input_multiplier,
103
+ int output_multiplier);
104
+ TORCH_CUDA_CPP_API void check_inputs(
105
+ at::TensorList inputs,
106
+ const at::Tensor& output,
107
+ int root,
108
+ int input_multiplier,
109
+ int output_multiplier);
110
+
111
+ } // namespace detail
112
+
113
+ using comm_list = std::vector<ncclComm_t>;
114
+ using stream_list = std::vector<c10::optional<at::cuda::CUDAStream>>;
115
+
116
+ TORCH_CUDA_CPP_API std::uint64_t version();
117
+ TORCH_CUDA_CPP_API const char* version_suffix();
118
+
119
+ bool is_available(at::TensorList tensors);
120
+
121
+ TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id);
122
+ TORCH_CUDA_CPP_API ncclComm_t
123
+ comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank);
124
+ TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm);
125
+
126
+ TORCH_CUDA_CPP_API void broadcast(
127
+ at::TensorList tensors,
128
+ const stream_list& streams = {},
129
+ const comm_list& user_comms = {});
130
+
131
+ size_t get_max_count();
132
+
133
+ TORCH_CUDA_CPP_API void reduce(
134
+ const std::vector<at::Tensor>& inputs,
135
+ at::Tensor& output,
136
+ int32_t root = 0,
137
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
138
+ const stream_list& streams = {},
139
+ const comm_list& user_comms = {});
140
+
141
+ TORCH_CUDA_CPP_API void reduce(
142
+ std::vector<at::Tensor>& inputs,
143
+ int32_t root = 0,
144
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
145
+ const stream_list& streams = {},
146
+ const comm_list& user_comms = {});
147
+
148
+ TORCH_CUDA_CPP_API void all_reduce(
149
+ const std::vector<at::Tensor>& inputs,
150
+ std::vector<at::Tensor>& outputs,
151
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
152
+ const stream_list& streams = {},
153
+ const comm_list& user_comms = {});
154
+
155
+ TORCH_CUDA_CPP_API void reduce_scatter(
156
+ const std::vector<at::Tensor>& inputs,
157
+ std::vector<at::Tensor>& outputs,
158
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
159
+ const stream_list& streams = {},
160
+ const comm_list& user_comms = {});
161
+
162
+ TORCH_CUDA_CPP_API void scatter(
163
+ const std::vector<at::Tensor>& inputs,
164
+ at::Tensor& outputs,
165
+ ncclComm_t comm,
166
+ at::cuda::CUDAStream& stream,
167
+ int32_t root = 0);
168
+
169
+ TORCH_CUDA_CPP_API void all_gather(
170
+ const std::vector<at::Tensor>& inputs,
171
+ std::vector<at::Tensor>& outputs,
172
+ const stream_list& streams = {},
173
+ const comm_list& user_comms = {});
174
+
175
+ TORCH_CUDA_CPP_API void gather(
176
+ const at::Tensor& inputs,
177
+ std::vector<at::Tensor>& outputs,
178
+ ncclComm_t comm,
179
+ at::cuda::CUDAStream& stream,
180
+ int32_t root = 0);
181
+
182
+ TORCH_CUDA_CPP_API void all2all_single_equal_split(
183
+ at::Tensor& input,
184
+ at::Tensor& output,
185
+ int size,
186
+ ncclComm_t comm,
187
+ at::cuda::CUDAStream& stream);
188
+
189
+ TORCH_CUDA_CPP_API void all2all_single_unequal_split(
190
+ void* sendbuff,
191
+ const size_t* sendcounts,
192
+ const size_t* senddispls,
193
+ void* recvbuff,
194
+ const size_t* recvcounts,
195
+ const size_t* recvdispls,
196
+ size_t size,
197
+ c10::ScalarType type,
198
+ ncclComm_t comm,
199
+ at::cuda::CUDAStream& stream);
200
+
201
+ TORCH_CUDA_CPP_API void all2all(
202
+ std::vector<at::Tensor>& outputTensors,
203
+ std::vector<at::Tensor>& inputTensors,
204
+ ncclComm_t _comm,
205
+ at::cuda::CUDAStream& stream);
206
+
207
+ TORCH_CUDA_CPP_API void send(
208
+ const at::Tensor& input,
209
+ ncclComm_t comm,
210
+ at::cuda::CUDAStream stream,
211
+ int dst);
212
+
213
+ TORCH_CUDA_CPP_API void recv(
214
+ at::Tensor& output,
215
+ ncclComm_t comm,
216
+ at::cuda::CUDAStream stream,
217
+ int src);
218
+ } // namespace torch::cuda::nccl
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::cuda::python {
4
+
5
+ void initCommMethods(PyObject* module);
6
+
7
+ } // namespace torch::cuda::python
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args);
6
+ PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args);
7
+ PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args);
8
+ PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args);
9
+ PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args);
10
+ PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args);
11
+ PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args);
12
+ PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args);
13
+ PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <mutex>
4
+ #include <unordered_map>
5
+
6
+ #include <torch/csrc/distributed/autograd/context/context.h>
7
+
8
+ namespace torch {
9
+ namespace distributed {
10
+ namespace autograd {
11
+
12
+ // Singleton class per worker which is responsible for storing the distributed
13
+ // autograd context for each autograd pass and also cleans up data for an
14
+ // autograd pass once its done.
15
+ //
16
+ // Each autograd pass is assigned a unique autograd_context_id and all data for
17
+ // that pass (DistAutogradContext) is stored in this container indexed by the
18
+ // autograd_context_id. The autograd_context_id itself is a 64 bit globally
19
+ // unique id. The first 16 bits is the worker_id and the next 48 bits is an
20
+ // auto-incrementing id for each worker.
21
+ //
22
+ // This container is also responsible for maintaining a globally unique message
23
+ // id, which is used to associate send/recv autograd function pairs. The format
24
+ // is similar to the autograd_context_id where we have a 64 bit integer with
25
+ // first 16 bits being the worker id and next 48 bits are auto-incrementing.
26
+ class TORCH_API DistAutogradContainer {
27
+ public:
28
+ explicit DistAutogradContainer(uint32_t num_shards);
29
+
30
+ // One time initialization of the container.
31
+ static DistAutogradContainer& init(int64_t worker_id);
32
+
33
+ // Retrieve the singleton instance of the container, ensures we have
34
+ // initialized the container.
35
+ static DistAutogradContainer& getInstance();
36
+
37
+ // Create a new context for a distributed autograd pass.
38
+ const ContextPtr newContext();
39
+
40
+ // Clean up resources for a given context_id once the autograd pass is done.
41
+ // Sends RPC to other workers this worker knows about, telling them to clean
42
+ // up their context as well. Throws an exception if the context_id does not
43
+ // exist.
44
+ void releaseContext(int64_t context_id);
45
+
46
+ // Releases an autograd context if it is present on this node. Also sends RPC
47
+ // to other workers this worker knows about, telling them to clean up their
48
+ // context. Does nothing if it is not present.
49
+ void releaseContextIfPresent(int64_t context_id);
50
+
51
+ // Checks if the passed in context_id is valid.
52
+ void isValidContext(int64_t context_id);
53
+
54
+ // Retrieve the autograd context for a given context_id.
55
+ ContextPtr retrieveContext(int64_t context_id);
56
+
57
+ // Retrieves the currently active autograd context for the current thread.
58
+ ContextPtr currentContext();
59
+
60
+ // Checks whether or not the current thread has a valid autograd context.
61
+ bool hasValidContext() const;
62
+
63
+ // Generate a new autograd_message_id for send/recv autograd functions.
64
+ int64_t newAutogradMessageId();
65
+
66
+ // Creates a new autograd context with the provided context_id. If a context
67
+ // already exists with the provided context_id, we just return it.
68
+ // This does not set the current context for the current thread.
69
+ ContextPtr getOrCreateContext(int64_t context_id);
70
+
71
+ // Retrieves the maximum possible autograd_context_id/autograd_message_id that
72
+ // can be generated by this worker.
73
+ int64_t getMaxId();
74
+
75
+ // Retrieves the worker ID for this node
76
+ rpc::worker_id_t getWorkerId() const;
77
+
78
+ // Can set current context id if there is no valid context yet
79
+ static void setCurrentContextId(int64_t contextId);
80
+
81
+ // Forcibly sets the thread local current context id. Should only be used in
82
+ // cases where you know what you're doing and need to override the thread
83
+ // local. Otherwise, use setCurrentContextId instead.
84
+ static void forceCurrentContextId(int64_t contextId);
85
+
86
+ // Clear current context id
87
+ void clearCurrentContext();
88
+
89
+ // Returns the number of autograd contexts in the container.
90
+ size_t numAutogradContexts() const;
91
+
92
+ // Returns the current thread local context id for this thread.
93
+ static int64_t currentContextId();
94
+
95
+ DistAutogradContainer(const DistAutogradContainer&) = delete;
96
+ DistAutogradContainer& operator=(const DistAutogradContainer&) = delete;
97
+ DistAutogradContainer(DistAutogradContainer&&) = delete;
98
+ DistAutogradContainer& operator=(DistAutogradContainer&&) = delete;
99
+
100
+ private:
101
+ // Number of shards for the map storing autograd contexts. We'd like this
102
+ // to be a power of 2 and we don't expect a value much higher than the
103
+ // number of cores would provide much benefit.
104
+ static constexpr uint32_t kNumDefaultShards = 128;
105
+
106
+ // Use cache line size for alignment.
107
+ static constexpr int kCacheLineSize = 64;
108
+
109
+ // Structure holding one shard of the sharded autograd context map with its
110
+ // associated lock. Align to cache line size to avoid contention between
111
+ // adjacent entries.
112
+ struct alignas(kCacheLineSize) ContextsShard {
113
+ // Lock for this shard.
114
+ mutable std::mutex lock;
115
+
116
+ // Map storing autograd contexts for this shard.
117
+ std::unordered_map<int64_t, ContextPtr> contexts;
118
+ };
119
+
120
+ DistAutogradContainer() = delete;
121
+ ~DistAutogradContainer() = default;
122
+
123
+ static DistAutogradContainer& getInstanceInternal();
124
+
125
+ // Retrieve the shard for given context_id.
126
+ ContextsShard& getShard(int64_t context_id);
127
+
128
+ // Sends an RPC to the workers that have a context corresponding to passed in
129
+ // context_id. This function should be called with the lock.
130
+ void sendReleaseContextRpc(
131
+ const std::unordered_set<rpc::worker_id_t>& workerIds,
132
+ int64_t context_id);
133
+
134
+ // Erase context_id from the autograd context map, and reset the thread local
135
+ // current context id if it corresponds to the passed in context id. This
136
+ // function should be called with the lock.
137
+ void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id);
138
+
139
+ // Compute the number of shards for the autograd_contexts_ map.
140
+ static uint32_t computeNumShards();
141
+
142
+ // Auto incrementing context id used to identify unique autograd passes.
143
+ // Initialized with the first 16 bits being the worker_id.
144
+ std::atomic<int64_t> next_context_id_;
145
+
146
+ // Unique id to identify a worker in the distributed setting.
147
+ int16_t worker_id_;
148
+
149
+ // Whether or not the container has been initialized appropriately.
150
+ bool initialized_;
151
+
152
+ // Sharded autograd context map.
153
+ std::vector<ContextsShard> autograd_contexts_;
154
+
155
+ // Number of shards for the sharded autograd_contexts_ map.
156
+ uint32_t num_shards_;
157
+
158
+ // Autograd message id to identify unique send/recv autograd function pairs.
159
+ std::atomic<int64_t> next_autograd_message_id_;
160
+
161
+ // Maximum allowed value for autograd_context_id or autograd_message_id.
162
+ int64_t max_id_;
163
+ };
164
+
165
+ } // namespace autograd
166
+ } // namespace distributed
167
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <functional>
5
+
6
+ #include <ATen/core/Dict.h>
7
+ #include <torch/csrc/autograd/engine.h>
8
+ #include <torch/csrc/distributed/autograd/functions/recvrpc_backward.h>
9
+ #include <torch/csrc/distributed/autograd/functions/sendrpc_backward.h>
10
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
11
+
12
+ namespace torch {
13
+ namespace distributed {
14
+ namespace autograd {
15
+
16
+ class RecvRpcBackward;
17
+
18
+ // DistAutogradContext which stores information for a single distributed
19
+ // autograd pass on a worker.
20
+ class TORCH_API DistAutogradContext {
21
+ public:
22
+ using GradCallback = std::function<bool(torch::Tensor&)>;
23
+
24
+ explicit DistAutogradContext(int64_t contextId);
25
+
26
+ // Retrieves the autograd context id for this context.
27
+ int64_t contextId() const;
28
+
29
+ // Records a 'send' autograd function for this context with the provided
30
+ // message id.
31
+ void addSendFunction(
32
+ const std::shared_ptr<SendRpcBackward>& func,
33
+ int64_t autograd_message_id);
34
+
35
+ // Records a 'recv' autograd function for this context with the provided
36
+ // message id.
37
+ void addRecvFunction(
38
+ std::shared_ptr<RecvRpcBackward>& func,
39
+ int64_t autograd_message_id);
40
+
41
+ // Given an autograd_message_id, retrieve the appropriate send function.
42
+ std::shared_ptr<SendRpcBackward> retrieveSendFunction(
43
+ int64_t autograd_message_id);
44
+
45
+ // Return all send functions for this context.
46
+ std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>> sendFunctions()
47
+ const;
48
+
49
+ // Return all recv functions for this context.
50
+ std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>> recvFunctions()
51
+ const;
52
+
53
+ // Adds a future message recording an outstanding RPC.
54
+ void addOutstandingRpc(const c10::intrusive_ptr<rpc::JitFuture>& jitFuture);
55
+
56
+ // Returns all gradients.
57
+ const c10::Dict<torch::Tensor, torch::Tensor> getGradients() const;
58
+
59
+ // This function gives a mutable grad reference to the callback.
60
+ // If the callback returns true, it means the grad in the context
61
+ // needs to be updated.
62
+ void runGradCallbackForVariable(
63
+ const torch::autograd::Variable& variable,
64
+ GradCallback&& cb);
65
+
66
+ DistAutogradContext(const DistAutogradContext&) = delete;
67
+ DistAutogradContext& operator=(const DistAutogradContext&) = delete;
68
+ DistAutogradContext(DistAutogradContext&&) = delete;
69
+ DistAutogradContext& operator=(DistAutogradContext&&) = delete;
70
+
71
+ // records the workerID of a node that we sent an RPC to.
72
+ // workerIDs are added here when we attach a send function to this autograd
73
+ // context
74
+ void addKnownWorkerId(const rpc::worker_id_t workerId);
75
+
76
+ // Retrieves a set containing the known workerIds for this context
77
+ // These are the different workers that this context has sent RPCs to.
78
+ std::unordered_set<rpc::worker_id_t> getKnownWorkerIds() const;
79
+
80
+ private:
81
+ friend class BackwardPassCleanupGuard;
82
+ friend class DistEngine;
83
+ friend class RecvRpcBackward;
84
+ friend class DistAccumulateGradCaptureHook;
85
+
86
+ // Record that we would like to accumulate the provided gradient on the given
87
+ // variable.
88
+ void accumulateGrad(
89
+ const torch::autograd::Variable& variable,
90
+ const torch::Tensor& grad,
91
+ size_t num_expected_refs);
92
+
93
+ // Retrieve the GraphTask.
94
+ std::shared_ptr<torch::autograd::GraphTask> retrieveGraphTask();
95
+
96
+ // Set the appropriate graph task for the backward pass. Can be called only
97
+ // once.
98
+ void setGraphTask(std::shared_ptr<torch::autograd::GraphTask> graphTask);
99
+
100
+ // Resets the graph task to ensure we can run another distributed backward
101
+ // pass for the same autograd context.
102
+ void resetGraphTask();
103
+
104
+ // Waits for all outstanding RPCs for this context to finish and clears all
105
+ // outstanding rpcs held in this context. This should be called only once.
106
+ c10::intrusive_ptr<c10::ivalue::Future> clearAndWaitForOutstandingRpcsAsync();
107
+
108
+ void clearOutstandingRpcs();
109
+
110
+ // Record an event to mark the completion of gradient computation. These
111
+ // events will later help to properly synchronize gradients consumptions
112
+ // in getGradients(). We need these events because backward and
113
+ // optimizer.step are separate RPC calls, and will occur on different CUDA
114
+ // streams. Without synchronization, it is possible that gradients are
115
+ // consumed before they are ready.
116
+ void recordGradEvent(c10::Device device);
117
+
118
+ const int64_t contextId_;
119
+
120
+ // Set containing known worker IDs, used in cleaning up autograd context.
121
+ // Whenever a sendRpcBackward is attached to the autograd graph for this
122
+ // context, the destination is added here.
123
+ std::unordered_set<rpc::worker_id_t> knownWorkerIds_;
124
+
125
+ // Map from autograd_message_id to appropriate 'send' autograd function.
126
+ std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>>
127
+ sendAutogradFunctions_;
128
+
129
+ // Map from autograd_message_id to appropriate 'recv' autograd function.
130
+ std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>>
131
+ recvAutogradFunctions_;
132
+
133
+ // Gradients accumulated in this context so far. The key is the variable on
134
+ // which the gradient needs to be accumulated and the value is the gradient
135
+ // that needs to be accumulated on that variable..
136
+ c10::Dict<torch::Tensor, torch::Tensor> accumulatedGrads_;
137
+
138
+ // See comments for recordGradEvent(c10::Device device);
139
+ std::unordered_map<c10::Device, c10::Event> gradReadyEvents_;
140
+ const c10::impl::VirtualGuardImpl impl_;
141
+
142
+ // The autograd GraphTask for the backward pass on this node for this context.
143
+ std::shared_ptr<torch::autograd::GraphTask> graphTask_;
144
+
145
+ // List of futures for RPCs initiated by this node to propagate gradients to
146
+ // other nodes. The distributed autograd engine on this node can return
147
+ // successfully only if all these futures are done and are successful.
148
+ std::vector<c10::intrusive_ptr<rpc::JitFuture>> outStandingRpcs_;
149
+
150
+ // Lock to protect concurrent modification of the context.
151
+ mutable std::mutex lock_;
152
+ };
153
+
154
+ using ContextPtr = std::shared_ptr<DistAutogradContext>;
155
+
156
+ // This class stores a shared_ptr to a DistAutogradContext instance in a
157
+ // thread local variable. The instance is given by the call site. The class
158
+ // doesn't know the current context. It's just a util class.
159
+ class TORCH_API ThreadLocalDistAutogradContext {
160
+ public:
161
+ // Store 'new_context' to the thread local variable maintained by this class.
162
+ explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context);
163
+ ~ThreadLocalDistAutogradContext();
164
+
165
+ // Retrieve the stored DistAutogradContext instance.
166
+ static ContextPtr getContextPtr();
167
+
168
+ private:
169
+ ContextPtr prev_context_ptr_;
170
+ };
171
+
172
+ } // namespace autograd
173
+ } // namespace distributed
174
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function.h>
4
+ #include <torch/csrc/distributed/autograd/context/context.h>
5
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
7
+
8
+ namespace torch {
9
+ namespace distributed {
10
+ namespace autograd {
11
+
12
+ // Forward declarations.
13
+ class DistAutogradContext;
14
+
15
+ // As part of our distributed autograd implementation, whenever we receive an
16
+ // RPC from a node, we add a 'RecvRpcBackward' autograd function to the
17
+ // autograd graph. This is more or less a placeholder function that is used to
18
+ // pass gradients to the remote host during the backward pass. The inputs to the
19
+ // RPC function are the inputs to this autograd function.
20
+ class TORCH_API RecvRpcBackward : public torch::autograd::Node {
21
+ public:
22
+ explicit RecvRpcBackward(
23
+ const AutogradMetadata& autogradMetadata,
24
+ std::shared_ptr<DistAutogradContext> autogradContext,
25
+ rpc::worker_id_t fromWorkerId,
26
+ rpc::DeviceMap deviceMap);
27
+
28
+ torch::autograd::variable_list apply(
29
+ torch::autograd::variable_list&& grads) override;
30
+
31
+ private:
32
+ const AutogradMetadata autogradMetadata_;
33
+
34
+ // Hold a weak reference to the autograd context to avoid circular
35
+ // dependencies with the context (since it holds a reference to
36
+ // RecvRpcBackward).
37
+ std::weak_ptr<DistAutogradContext> autogradContext_;
38
+
39
+ // The worker id from which the RPC was received. During the backward pass,
40
+ // we need to propagate the gradients to this workerId.
41
+ rpc::worker_id_t fromWorkerId_;
42
+
43
+ // Device mapping for tensors sent over RPC.
44
+ const rpc::DeviceMap deviceMap_;
45
+ };
46
+
47
+ } // namespace autograd
48
+ } // namespace distributed
49
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function.h>
4
+
5
+ namespace torch {
6
+ namespace distributed {
7
+ namespace autograd {
8
+
9
+ // As part of our distributed autograd implementation, whenever we send an RPC
10
+ // from one node to another, we add a 'SendRpcBackward' autograd function to the
11
+ // autograd graph. This is more or less a placeholder function that is used to
12
+ // kickoff the autograd engine on the current worker on the backward pass. The
13
+ // edges for this autograd function are the inputs to the RPC method.
14
+ //
15
+ // During the backward pass, this function is queued for execution in the
16
+ // autograd engine which eventually runs the rest of the autograd graph.
17
+ struct TORCH_API SendRpcBackward : public torch::autograd::Node {
18
+ public:
19
+ torch::autograd::variable_list apply(
20
+ torch::autograd::variable_list&& inputs) override;
21
+
22
+ // SendRpcBackward is actually the root of an autograd graph on the local
23
+ // node. As a result, it doesn't receive any 'inputs', but rather the RPC
24
+ // framework passes gradients over to this function to kickoff local autograd
25
+ // computation.
26
+ void setGrads(const torch::autograd::variable_list& grads);
27
+
28
+ // Retrieve the grads for the function.
29
+ const torch::autograd::variable_list& getGrads() const;
30
+
31
+ private:
32
+ torch::autograd::variable_list grads_;
33
+ };
34
+
35
+ } // namespace autograd
36
+ } // namespace distributed
37
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstdint>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // This structure represents autograd metadata that we need to pass across
11
+ // different nodes when we call an RPC which needs autograd computation.
12
+ struct TORCH_API AutogradMetadata {
13
+ AutogradMetadata(int64_t autogradContextId, int64_t autogradMessageId);
14
+
15
+ // autogradContextId_ is a globally unique integer that identifies a
16
+ // particular distributed autograd pass.
17
+ int64_t autogradContextId;
18
+ // autogradMessageId_ is a globally unique integer that identifies a pair
19
+ // of send/recv autograd functions.
20
+ int64_t autogradMessageId;
21
+ };
22
+
23
+ } // namespace autograd
24
+ } // namespace distributed
25
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Used to request other workers to clean up their autograd context.
12
+ class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase {
13
+ public:
14
+ explicit CleanupAutogradContextReq(int64_t context_id);
15
+ // Serialization and deserialization methods.
16
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
17
+ static std::unique_ptr<CleanupAutogradContextReq> fromMessage(
18
+ const rpc::Message& message);
19
+
20
+ // Retrieve the context id we are cleaning up with this message.
21
+ int64_t getContextId();
22
+
23
+ private:
24
+ int64_t context_id_;
25
+ };
26
+
27
+ } // namespace autograd
28
+ } // namespace distributed
29
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Empty response for CleanupAutogradContextReq. Send to acknowledge receipt of
11
+ // a CleanupAutogradContextReq.
12
+ class TORCH_API CleanupAutogradContextResp : public rpc::RpcCommandBase {
13
+ public:
14
+ CleanupAutogradContextResp() = default;
15
+ // Serialization and deserialization methods.
16
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
17
+ static std::unique_ptr<CleanupAutogradContextResp> fromMessage(
18
+ const rpc::Message& message);
19
+ };
20
+
21
+ } // namespace autograd
22
+ } // namespace distributed
23
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace distributed {
10
+ namespace autograd {
11
+
12
+ // Used to propagate gradients from one node to another during a distributed
13
+ // backwards pass. This RPC call is invoked when we hit a `recv` autograd
14
+ // function during backward pass execution.
15
+ class TORCH_API PropagateGradientsReq : public rpc::RpcCommandBase {
16
+ public:
17
+ PropagateGradientsReq(
18
+ const AutogradMetadata& autogradMetadata,
19
+ std::vector<torch::autograd::Variable> grads,
20
+ bool retainGraph = false);
21
+
22
+ const AutogradMetadata& getAutogradMetadata();
23
+
24
+ const std::vector<torch::autograd::Variable>& getGrads();
25
+
26
+ // Serialization and deserialization methods.
27
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
28
+ static std::unique_ptr<PropagateGradientsReq> fromMessage(
29
+ const rpc::Message& message);
30
+
31
+ // Whether or not to retain the autograd graph.
32
+ bool retainGraph();
33
+
34
+ private:
35
+ AutogradMetadata autogradMetadata_;
36
+ std::vector<torch::autograd::Variable> grads_;
37
+ bool retainGraph_;
38
+ };
39
+
40
+ } // namespace autograd
41
+ } // namespace distributed
42
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Response for the PropagateGradients call. Currently, this class is mostly
11
+ // just a placeholder and sends an empty message over the wire. The purpose of
12
+ // this RPC command is to indicate whether or not the PropagateGradientsReq call
13
+ // was successfully or not.
14
+ class TORCH_API PropagateGradientsResp : public rpc::RpcCommandBase {
15
+ public:
16
+ PropagateGradientsResp() = default;
17
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
18
+ static std::unique_ptr<PropagateGradientsResp> fromMessage(
19
+ const rpc::Message& message);
20
+ };
21
+
22
+ } // namespace autograd
23
+ } // namespace distributed
24
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Represents an RPC that includes autograd information. This class basically
12
+ // wraps another `RpcCommandBase` object which represents the actual RPC and has
13
+ // additional autograd information associated with that RPC.
14
+ class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase {
15
+ public:
16
+ // Used when we are sending an RPC over the wire.
17
+ RpcWithAutograd(
18
+ rpc::worker_id_t fromWorkerId,
19
+ rpc::MessageType messageType,
20
+ const AutogradMetadata& autogradMetadata,
21
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
22
+ rpc::DeviceMap deviceMap = {});
23
+
24
+ // Used when receiving an RPC over the wire.
25
+ RpcWithAutograd(
26
+ rpc::worker_id_t fromWorkerId,
27
+ rpc::MessageType messageType,
28
+ const AutogradMetadata& autogradMetadata,
29
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
30
+ rpc::MessageType wrappedMessageType,
31
+ std::vector<torch::Tensor> tensors,
32
+ rpc::DeviceMap deviceMap = {});
33
+
34
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
35
+
36
+ static std::unique_ptr<RpcWithAutograd> fromMessage(
37
+ const rpc::Message& message);
38
+
39
+ // Retrieves tensors as part of this RPC, which need to be considered for
40
+ // autograd computations.
41
+ std::vector<torch::Tensor>& tensors();
42
+
43
+ const AutogradMetadata& autogradMetadata() const;
44
+
45
+ RpcCommandBase& wrappedRpc();
46
+
47
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
48
+
49
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
50
+
51
+ // Message type of the wrapped RPC.
52
+ rpc::MessageType wrappedMessageType() const;
53
+
54
+ // Retrieve the worker id from which the RPC originated.
55
+ rpc::worker_id_t fromWorkerId() const;
56
+
57
+ // Retrieve the device map.
58
+ const rpc::DeviceMap& deviceMap();
59
+
60
+ private:
61
+ // WorkerId from which this RPC originated. This is necessary for knowing
62
+ // which worker we need to contact during the backward pass.
63
+ rpc::worker_id_t fromWorkerId_;
64
+
65
+ // Message type for this call.
66
+ rpc::MessageType messageType_;
67
+
68
+ AutogradMetadata autogradMetadata_;
69
+
70
+ // Since wrappedMessage_ is destructively constructed from wrappedRpc_,
71
+ // they are valid exclusively. They are used for different purpose.
72
+ // wrappedRpc_ is used while constructing receive rpcWithAutograd;
73
+ // wrappedMessage_ is used while constructing send rpcWithAutograd;
74
+
75
+ // When receive rpcWithAutograd is constructed fromMessage, it is valid;
76
+ // When send rpcWithAutograd is constructed before toMessage, it is nullptr;
77
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
78
+
79
+ // Serialized message representing wrappedRpc_. Used mostly as a cache to
80
+ // avoid serializing the request twice.
81
+ // When receive rpcWithAutograd is constructed fromMessage, it is nullptr;
82
+ // When send rpcWithAutograd is constructed before toMessage, it is valid;
83
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
84
+
85
+ // message type of the wrappedMessage, this is stored separately since
86
+ // wrappedMessage_ is not always guaranteed to be populated.
87
+ rpc::MessageType wrappedMessageType_;
88
+
89
+ // Tensors part of the wrappedRpc that need to be considered for autograd.
90
+ std::vector<torch::Tensor> tensors_;
91
+
92
+ // Device mapping for tensors that are sent across an RPC to another node.
93
+ rpc::DeviceMap deviceMap_;
94
+ };
95
+
96
+ } // namespace autograd
97
+ } // namespace distributed
98
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/profiler.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
7
+ #include <torch/csrc/distributed/rpc/types.h>
8
+
9
+ namespace torch {
10
+ namespace distributed {
11
+ namespace autograd {
12
+
13
+ class TORCH_API RpcWithProfilingReq : public rpc::RpcCommandBase {
14
+ public:
15
+ // For sending RPCs, invoked when client is creating this RPC command.
16
+ RpcWithProfilingReq(
17
+ rpc::MessageType messageType,
18
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
19
+ torch::autograd::profiler::ProfilerConfig&& profilerConfig,
20
+ rpc::ProfilingId profilingKeyId);
21
+
22
+ // For receiving an RPC
23
+ // Used in fromMessage.
24
+ RpcWithProfilingReq(
25
+ rpc::MessageType messageType,
26
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
27
+ rpc::MessageType wrappedMessageType,
28
+ std::vector<torch::Tensor> tensors,
29
+ torch::autograd::profiler::ProfilerConfig&& profilerConfig,
30
+ rpc::ProfilingId profilingKeyId);
31
+
32
+ // Convert this RPC Command to a Message that can be sent over the wire.
33
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
34
+ static std::unique_ptr<RpcWithProfilingReq> fromMessage(
35
+ const rpc::Message& message);
36
+
37
+ // Retrieve the profiling data that is associated with this command.
38
+ torch::autograd::profiler::ProfilerConfig getProfilingConfig() const;
39
+ // Retrieve the globally unique profiling ID corresponding to this command.
40
+ const rpc::ProfilingId& getProfilingId() const;
41
+ // Retrieve the original RPC which this ProfilingRPC wraps.
42
+ RpcCommandBase& wrappedRpc();
43
+ // Destructively move the wrapped RPC.
44
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
45
+ // Message type of the wrapped RPC
46
+ rpc::MessageType wrappedMessageType() const;
47
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
48
+
49
+ private:
50
+ // message type
51
+ const rpc::MessageType messageType_;
52
+ // wrapped message
53
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
54
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
55
+ rpc::MessageType wrappedMessageType_;
56
+ std::vector<torch::Tensor> tensors_;
57
+ const torch::autograd::profiler::ProfilerConfig profilerConfig_;
58
+ const rpc::ProfilingId profilingKeyId_;
59
+ };
60
+ } // namespace autograd
61
+ } // namespace distributed
62
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/profiler.h>
4
+ #include <torch/csrc/distributed/rpc/message.h>
5
+ #include <torch/csrc/distributed/rpc/rpc_agent.h>
6
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
7
+ #include <torch/csrc/distributed/rpc/types.h>
8
+
9
+ namespace torch {
10
+ namespace distributed {
11
+ namespace autograd {
12
+ class TORCH_API RpcWithProfilingResp : public rpc::RpcCommandBase {
13
+ public:
14
+ // For sending RPCs over the wire
15
+ RpcWithProfilingResp(
16
+ rpc::MessageType messageType,
17
+ c10::intrusive_ptr<rpc::Message> wrappedMessage,
18
+ std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
19
+ rpc::ProfilingId profilingId);
20
+
21
+ // For receiving RPCs. Used in from message when converting a message received
22
+ // over the wire.
23
+ RpcWithProfilingResp(
24
+ rpc::MessageType messageType,
25
+ std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
26
+ rpc::MessageType wrappedMessageType,
27
+ std::vector<torch::Tensor> tensors,
28
+ std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
29
+ rpc::ProfilingId profilingId);
30
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
31
+ static std::unique_ptr<RpcWithProfilingResp> fromMessage(
32
+ const rpc::Message& message);
33
+ // Retrieve remote Events
34
+ std::vector<torch::autograd::profiler::LegacyEvent> getProfiledEvents() const;
35
+ // Retrieve the globally unique profiling ID corresponding to this command.
36
+ const rpc::ProfilingId& getProfilingId() const;
37
+ // Retrieve the original RPC which this ProfilingRPC wraps.
38
+ RpcCommandBase& wrappedRpc();
39
+ // Destructively move the wrapped RPC.
40
+ std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
41
+ // Message type of the wrapped RPC
42
+ rpc::MessageType wrappedMessageType() const;
43
+ // Set the wrapped RPC for this RPC.
44
+ void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
45
+
46
+ private:
47
+ // message type
48
+ const rpc::MessageType messageType_;
49
+ // wrapped message
50
+ c10::intrusive_ptr<rpc::Message> wrappedMessage_;
51
+ std::unique_ptr<RpcCommandBase> wrappedRpc_;
52
+ rpc::MessageType wrappedMessageType_;
53
+ std::vector<torch::Tensor> tensors_;
54
+ const std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents_;
55
+ const rpc::ProfilingId profilingId_;
56
+ };
57
+ } // namespace autograd
58
+ } // namespace distributed
59
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+ #include <torch/csrc/distributed/rpc/types.h>
6
+
7
+ namespace torch {
8
+ namespace distributed {
9
+ namespace autograd {
10
+
11
+ // Internal system RPC to invoke distributed backward pass on remote nodes when
12
+ // 'rref.backward()' is invoked.
13
+ class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
14
+ public:
15
+ RRefBackwardReq(
16
+ const rpc::RRefId& rrefId,
17
+ int64_t autogradContextId,
18
+ bool retainGraph = false);
19
+
20
+ const rpc::RRefId& getRRefId() const;
21
+
22
+ int64_t getAutogradContextId() const;
23
+
24
+ bool retainGraph() const;
25
+
26
+ // Serialization and deserialization methods.
27
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
28
+ static std::unique_ptr<RRefBackwardReq> fromMessage(
29
+ const rpc::Message& message);
30
+
31
+ private:
32
+ const rpc::RRefId rrefId_;
33
+ const int64_t autogradContextId_;
34
+ const bool retainGraph_;
35
+ };
36
+
37
+ } // namespace autograd
38
+ } // namespace distributed
39
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/rpc/message.h>
4
+ #include <torch/csrc/distributed/rpc/rpc_command_base.h>
5
+
6
+ namespace torch {
7
+ namespace distributed {
8
+ namespace autograd {
9
+
10
+ // Response for the RRefBackwardReq.
11
+ class TORCH_API RRefBackwardResp : public rpc::RpcCommandBase {
12
+ public:
13
+ RRefBackwardResp() = default;
14
+ c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
15
+ static std::unique_ptr<RRefBackwardResp> fromMessage(
16
+ const rpc::Message& message);
17
+ };
18
+
19
+ } // namespace autograd
20
+ } // namespace distributed
21
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <condition_variable>
4
+ #include <memory>
5
+ #include <mutex>
6
+ #include <stdexcept>
7
+ #include <unordered_map>
8
+ #include <utility>
9
+ #include <vector>
10
+
11
+ #include <ATen/ATen.h>
12
+ #include <c10/macros/Macros.h>
13
+
14
+ #include <torch/csrc/distributed/c10d/Types.hpp>
15
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
16
+ #include <torch/csrc/distributed/c10d/Work.hpp>
17
+ #include <torch/csrc/distributed/c10d/debug.h>
18
+
19
+ constexpr auto kBackendDefaultTimeout =
20
+ std::chrono::milliseconds(30 * 60 * 1000);
21
+
22
+ namespace c10d {
23
+
24
+ class TORCH_API Backend : public torch::CustomClassHolder {
25
+ public:
26
+ // Backend Options is a base struct that defines the basic options
27
+ // when constructing a Backend. Each Backend subclass should
28
+ // extend this struct and define its options if it wants to provide more
29
+ // config options (beyond basic ones defined here) to end user.
30
+ struct TORCH_API Options : torch::CustomClassHolder {
31
+ explicit Options(
32
+ std::string backend,
33
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout)
34
+ : timeout(timeout), backend(std::move(backend)) {}
35
+ ~Options() override = default;
36
+
37
+ std::chrono::milliseconds timeout;
38
+
39
+ // backend name
40
+ const std::string backend;
41
+ };
42
+
43
+ explicit Backend(int rank, int size);
44
+ ~Backend() override = 0;
45
+
46
+ int getRank() const {
47
+ return rank_;
48
+ }
49
+
50
+ int getSize() const {
51
+ return size_;
52
+ }
53
+
54
+ // Returns an unique opaque ID of this backend that can be used to correlate
55
+ // with its collectives.
56
+ int64_t getID() const {
57
+ return reinterpret_cast<std::intptr_t>(this);
58
+ }
59
+
60
+ virtual void startCoalescing() {
61
+ TORCH_CHECK(
62
+ false,
63
+ c10::str(
64
+ "Backend ",
65
+ getBackendName(),
66
+ " does not implement startCoalescing"));
67
+ }
68
+
69
+ virtual c10::intrusive_ptr<Work> endCoalescing() {
70
+ TORCH_CHECK(
71
+ false,
72
+ c10::str(
73
+ "Backend ", getBackendName(), " does not implement endCoalescing"));
74
+ }
75
+
76
+ // Subclasses must override this method to return the backend name
77
+ virtual const std::string getBackendName() const {
78
+ TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented.");
79
+ };
80
+
81
+ virtual c10::intrusive_ptr<Work> broadcast(
82
+ std::vector<at::Tensor>& /* tensors */,
83
+ const BroadcastOptions& /* opts */ = BroadcastOptions()) {
84
+ TORCH_CHECK(
85
+ false,
86
+ c10::str("Backend ", getBackendName(), " does not support broadcast"));
87
+ }
88
+
89
+ virtual c10::intrusive_ptr<Work> allreduce(
90
+ std::vector<at::Tensor>& /* tensors */,
91
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
92
+ TORCH_CHECK(
93
+ false,
94
+ c10::str("Backend ", getBackendName(), " does not support allreduce"));
95
+ }
96
+
97
+ virtual c10::intrusive_ptr<Work> allreduce_sparse(
98
+ std::vector<at::Tensor>& /* tensors */,
99
+ const AllreduceOptions& /* opts */ = AllreduceOptions()) {
100
+ TORCH_CHECK(
101
+ false,
102
+ c10::str(
103
+ "Backend ",
104
+ getBackendName(),
105
+ " does not support allreduce sparse"));
106
+ }
107
+
108
+ virtual c10::intrusive_ptr<Work> allreduce_coalesced(
109
+ std::vector<at::Tensor>& /* tensors */,
110
+ const AllreduceCoalescedOptions& /* opts */ =
111
+ AllreduceCoalescedOptions()) {
112
+ TORCH_CHECK(
113
+ false,
114
+ c10::str(
115
+ "Backend ",
116
+ getBackendName(),
117
+ " does not support allreduce_coalesced"));
118
+ }
119
+
120
+ virtual c10::intrusive_ptr<Work> reduce(
121
+ std::vector<at::Tensor>& /* tensors */,
122
+ const ReduceOptions& /* opts */ = ReduceOptions()) {
123
+ TORCH_CHECK(
124
+ false,
125
+ c10::str("Backend ", getBackendName(), " does not support reduce"));
126
+ }
127
+
128
+ virtual c10::intrusive_ptr<Work> allgather(
129
+ std::vector<std::vector<at::Tensor>>& /* outputTensors */,
130
+ std::vector<at::Tensor>& /* inputTensors */,
131
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
132
+ TORCH_CHECK(
133
+ false,
134
+ c10::str("Backend ", getBackendName(), " does not support allgather"));
135
+ }
136
+
137
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
138
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
139
+ // For implementers of ProcessGroup API and advanced users only.
140
+ // Note: this function will be deprecated in near future.
141
+ virtual c10::intrusive_ptr<Work> _allgather_base(
142
+ at::Tensor& /* outputBuffer */,
143
+ at::Tensor& /* inputBuffer */,
144
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
145
+ TORCH_CHECK(
146
+ false,
147
+ c10::str(
148
+ "Backend ", getBackendName(), " does not support _allgather_base"));
149
+ }
150
+
151
+ // This function is deprecated and will be moved out of Backend to comms:
152
+ // * do not add dependencies on this function,
153
+ // * do not implement it in your Backend, implement _allgather_base
154
+ // instead.
155
+ virtual c10::intrusive_ptr<Work> allgather_coalesced(
156
+ std::vector<std::vector<at::Tensor>>& /* outputTensorLists */,
157
+ std::vector<at::Tensor>& /* inputTensors */,
158
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
159
+ TORCH_CHECK(
160
+ false,
161
+ c10::str(
162
+ "Backend ",
163
+ getBackendName(),
164
+ " does not support allgather_coalesced"));
165
+ }
166
+
167
+ // This function is a coalesced version of `allgather_into_tensor` (currently
168
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
169
+ // an input/output of one `allgather_into_tensor` operation.
170
+ virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
171
+ std::vector<at::Tensor>& /* outputs */,
172
+ std::vector<at::Tensor>& /* inputs */,
173
+ const AllgatherOptions& /* opts */ = AllgatherOptions()) {
174
+ TORCH_CHECK(
175
+ false,
176
+ c10::str(
177
+ "Backend ",
178
+ getBackendName(),
179
+ " does not support allgather_into_tensor_coalesced"));
180
+ }
181
+
182
+ virtual c10::intrusive_ptr<Work> gather(
183
+ std::vector<std::vector<at::Tensor>>& /* outputTensors */,
184
+ std::vector<at::Tensor>& /* inputTensors */,
185
+ const GatherOptions& /* opts */ = GatherOptions()) {
186
+ TORCH_CHECK(
187
+ false,
188
+ c10::str("Backend ", getBackendName(), " does not support gather"));
189
+ }
190
+
191
+ virtual c10::intrusive_ptr<Work> scatter(
192
+ std::vector<at::Tensor>& /* outputTensors */,
193
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
194
+ const ScatterOptions& /* opts */ = ScatterOptions()) {
195
+ TORCH_CHECK(
196
+ false,
197
+ c10::str("Backend ", getBackendName(), " does not support scatter"));
198
+ }
199
+
200
+ virtual c10::intrusive_ptr<Work> reduce_scatter(
201
+ std::vector<at::Tensor>& /* outputTensors */,
202
+ std::vector<std::vector<at::Tensor>>& /* inputTensors */,
203
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
204
+ TORCH_CHECK(
205
+ false,
206
+ c10::str(
207
+ "Backend ", getBackendName(), " does not support reduce_scatter"));
208
+ }
209
+
210
+ virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
211
+ at::Tensor& /* outputBuffer */,
212
+ at::Tensor& /* inputBuffer */,
213
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
214
+ TORCH_CHECK(
215
+ false,
216
+ c10::str(
217
+ "Backend ",
218
+ getBackendName(),
219
+ " does not support _reduce_scatter_base"));
220
+ }
221
+
222
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
223
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
224
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
225
+ virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
226
+ std::vector<at::Tensor>& /* outputs */,
227
+ std::vector<at::Tensor>& /* inputs */,
228
+ const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
229
+ TORCH_CHECK(
230
+ false,
231
+ c10::str(
232
+ "Backend ",
233
+ getBackendName(),
234
+ " does not support reduce_scatter_tensor_coalesced"));
235
+ }
236
+
237
+ virtual c10::intrusive_ptr<Work> alltoall_base(
238
+ at::Tensor& /* outputBuffer */,
239
+ at::Tensor& /* inputBuffer */,
240
+ std::vector<int64_t>& /* outputSplitSizes */,
241
+ std::vector<int64_t>& /* inputSplitSizes */,
242
+ const AllToAllOptions& /* opts */ = AllToAllOptions()) {
243
+ TORCH_CHECK(
244
+ false,
245
+ c10::str(
246
+ "Backend ", getBackendName(), " does not support alltoall_base"));
247
+ }
248
+
249
+ virtual c10::intrusive_ptr<Work> alltoall(
250
+ std::vector<at::Tensor>& /* outputTensors */,
251
+ std::vector<at::Tensor>& /* inputTensors */,
252
+ const AllToAllOptions& opts = AllToAllOptions()) {
253
+ TORCH_CHECK(
254
+ false,
255
+ c10::str("Backend ", getBackendName(), " does not support alltoall"));
256
+ }
257
+
258
+ virtual void monitoredBarrier(
259
+ const BarrierOptions& /* unused */,
260
+ bool /* unused */ = false) {
261
+ auto backendName = getBackendName();
262
+ TORCH_CHECK(
263
+ false,
264
+ c10::str(
265
+ "Backend ",
266
+ backendName,
267
+ " does not support monitoredBarrier, only GLOO supports monitored barrier."));
268
+ }
269
+
270
+ // Agrees on an initial sequence number for the whole group by having rank 0
271
+ // create it and broadcast it to other ranks using the store. Only implemented
272
+ // for GLOO and NCCL backends currently.
273
+ virtual void setSequenceNumberForGroup() {
274
+ auto backendName = getBackendName();
275
+ TORCH_CHECK(
276
+ false,
277
+ c10::str(
278
+ "Backend ",
279
+ backendName,
280
+ " does not yet support sequence numbers."));
281
+ }
282
+
283
+ // Retrieves the current sequence number for the whole group, which should be
284
+ // in sync. If the returned number is not consistent across the group, it
285
+ // may indicate that there is some sort of collective desynchronization.
286
+ virtual uint64_t getSequenceNumberForGroup() {
287
+ auto backendName = getBackendName();
288
+ TORCH_CHECK(
289
+ false,
290
+ c10::str(
291
+ "Backend ",
292
+ backendName,
293
+ " does not yet support sequence numbers."));
294
+ }
295
+
296
+ virtual c10::intrusive_ptr<Work> send(
297
+ std::vector<at::Tensor>& /* tensors */,
298
+ int /* dstRank */,
299
+ int /* tag */) {
300
+ TORCH_CHECK(
301
+ false,
302
+ c10::str("Backend ", getBackendName(), " does not support send"));
303
+ }
304
+
305
+ virtual c10::intrusive_ptr<Work> recv(
306
+ std::vector<at::Tensor>& /* tensors */,
307
+ int /* srcRank */,
308
+ int /* tag */) {
309
+ TORCH_CHECK(
310
+ false,
311
+ c10::str("Backend ", getBackendName(), " does not support recv"));
312
+ }
313
+
314
+ virtual c10::intrusive_ptr<Work> recvAnysource(
315
+ std::vector<at::Tensor>& /* tensors */,
316
+ int /* tag */) {
317
+ TORCH_CHECK(
318
+ false,
319
+ c10::str(
320
+ "Backend ", getBackendName(), " does not support recvAnysource"));
321
+ }
322
+
323
+ virtual c10::intrusive_ptr<Work> barrier(
324
+ const BarrierOptions& /* opts */ = BarrierOptions()) {
325
+ TORCH_CHECK(
326
+ false,
327
+ c10::str("Backend ", getBackendName(), " does not support barrier"));
328
+ }
329
+
330
+ virtual void registerOnCompletionHook(
331
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
332
+ TORCH_CHECK(
333
+ false,
334
+ "Only ProcessGrouppNCCL supports onCompletion hook, but got ",
335
+ getBackendName(),
336
+ " backend.");
337
+ }
338
+
339
+ virtual void waitForPendingWorks() {
340
+ TORCH_CHECK(
341
+ false,
342
+ "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ",
343
+ getBackendName(),
344
+ " backend.");
345
+ }
346
+
347
+ virtual void enableCollectivesTiming() {
348
+ TORCH_CHECK(
349
+ false,
350
+ "Backend ",
351
+ getBackendName(),
352
+ " is missing implementation of enableCollectivesTiming.");
353
+ }
354
+
355
+ bool hasHooks() const {
356
+ return onCompletionHook_ != nullptr;
357
+ }
358
+
359
+ // Do not call this directly, use ProcessGroup::setGroupName instead.
360
+ void setGroupName(const std::string& name) {
361
+ pg_name_ = name;
362
+ }
363
+
364
+ const std::string& getGroupName() const {
365
+ return pg_name_;
366
+ }
367
+
368
+ protected:
369
+ // Implementations of this interface need to call this to setup
370
+ // appropriate logging etc.
371
+ void init();
372
+
373
+ const int rank_;
374
+ const int size_;
375
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
376
+ // remains the same across use of this process group.
377
+ DebugLevel dist_debug_level_;
378
+ std::string pg_name_;
379
+
380
+ std::function<void(std::shared_ptr<WorkInfo>)> onCompletionHook_;
381
+ };
382
+
383
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sys/types.h>
4
+
5
+ #include <mutex>
6
+ #include <unordered_map>
7
+
8
+ #include <torch/csrc/distributed/c10d/Store.hpp>
9
+
10
+ namespace c10d {
11
+
12
+ class TORCH_API FileStore : public Store {
13
+ public:
14
+ explicit FileStore(std::string path, int numWorkers);
15
+
16
+ ~FileStore() override;
17
+
18
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
19
+
20
+ std::vector<uint8_t> compareSet(
21
+ const std::string& key,
22
+ const std::vector<uint8_t>& expectedValue,
23
+ const std::vector<uint8_t>& desiredValue) override;
24
+
25
+ std::vector<uint8_t> get(const std::string& key) override;
26
+
27
+ int64_t add(const std::string& key, int64_t value) override;
28
+
29
+ int64_t getNumKeys() override;
30
+
31
+ bool deleteKey(const std::string& key) override;
32
+
33
+ bool check(const std::vector<std::string>& keys) override;
34
+
35
+ void wait(const std::vector<std::string>& keys) override;
36
+
37
+ void wait(
38
+ const std::vector<std::string>& keys,
39
+ const std::chrono::milliseconds& timeout) override;
40
+
41
+ // Returns the path used by the FileStore.
42
+ const std::string& getPath() const noexcept {
43
+ return path_;
44
+ }
45
+
46
+ protected:
47
+ int64_t addHelper(const std::string& key, int64_t i);
48
+
49
+ std::string path_;
50
+ off_t pos_{0};
51
+
52
+ int numWorkers_;
53
+ const std::string cleanupKey_;
54
+ const std::string refCountKey_;
55
+ const std::string regularPrefix_;
56
+ const std::string deletePrefix_;
57
+
58
+ std::unordered_map<std::string, std::vector<uint8_t>> cache_;
59
+
60
+ std::mutex activeFileOpLock_;
61
+ };
62
+
63
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <string>
6
+
7
+ #include <c10/util/Registry.h>
8
+ #include <gloo/config.h>
9
+ #include <gloo/transport/device.h>
10
+
11
+ namespace c10d {
12
+
13
+ class TORCH_API GlooDeviceFactory {
14
+ public:
15
+ // Create new device instance for specific interface.
16
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForInterface(
17
+ const std::string& interface);
18
+
19
+ // Create new device instance for specific hostname or address.
20
+ static std::shared_ptr<::gloo::transport::Device> makeDeviceForHostname(
21
+ const std::string& hostname);
22
+ };
23
+
24
+ TORCH_DECLARE_SHARED_REGISTRY(
25
+ GlooDeviceRegistry,
26
+ ::gloo::transport::Device,
27
+ const std::string&, /* interface */
28
+ const std::string& /* hostname */);
29
+
30
+ } // namespace c10d
31
+
32
+ #endif // USE_C10D_GLOO
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+
5
+ namespace c10d {
6
+
7
+ C10_EXPORT void register_process_group(
8
+ const std::string& group_name,
9
+ c10::intrusive_ptr<c10d::ProcessGroup> group);
10
+
11
+ C10_EXPORT c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
12
+ const std::string& group_name);
13
+
14
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sys/types.h>
4
+
5
+ #include <condition_variable>
6
+ #include <mutex>
7
+ #include <unordered_map>
8
+
9
+ #include <torch/csrc/distributed/c10d/Store.hpp>
10
+
11
+ namespace c10d {
12
+
13
+ class TORCH_API HashStore : public Store {
14
+ public:
15
+ ~HashStore() override = default;
16
+
17
+ void set(const std::string& key, const std::vector<uint8_t>& data) override;
18
+
19
+ std::vector<uint8_t> compareSet(
20
+ const std::string& key,
21
+ const std::vector<uint8_t>& expectedValue,
22
+ const std::vector<uint8_t>& desiredValue) override;
23
+
24
+ std::vector<uint8_t> get(const std::string& key) override;
25
+
26
+ void wait(const std::vector<std::string>& keys) override {
27
+ wait(keys, Store::kDefaultTimeout);
28
+ }
29
+
30
+ void wait(
31
+ const std::vector<std::string>& keys,
32
+ const std::chrono::milliseconds& timeout) override;
33
+
34
+ int64_t add(const std::string& key, int64_t value) override;
35
+
36
+ int64_t getNumKeys() override;
37
+
38
+ bool check(const std::vector<std::string>& keys) override;
39
+
40
+ bool deleteKey(const std::string& key) override;
41
+
42
+ void append(const std::string& key, const std::vector<uint8_t>& value)
43
+ override;
44
+
45
+ std::vector<std::vector<uint8_t>> multiGet(
46
+ const std::vector<std::string>& keys) override;
47
+
48
+ void multiSet(
49
+ const std::vector<std::string>& keys,
50
+ const std::vector<std::vector<uint8_t>>& values) override;
51
+
52
+ // Returns true if this store support append, multiGet and multiSet
53
+ bool hasExtendedApi() const override;
54
+
55
+ protected:
56
+ std::unordered_map<std::string, std::vector<uint8_t>> map_;
57
+ std::mutex m_;
58
+ std::condition_variable cv_;
59
+ };
60
+
61
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PrefixStore.hpp ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Store.hpp>
4
+ #include <memory>
5
+
6
+ namespace c10d {
7
+
8
+ class TORCH_API PrefixStore : public Store {
9
+ public:
10
+ explicit PrefixStore(std::string prefix, c10::intrusive_ptr<Store> store);
11
+
12
+ using Store::set;
13
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
14
+
15
+ using Store::compareSet;
16
+ std::vector<uint8_t> compareSet(
17
+ const std::string& key,
18
+ const std::vector<uint8_t>& expectedValue,
19
+ const std::vector<uint8_t>& desiredValue) override;
20
+
21
+ std::vector<uint8_t> get(const std::string& key) override;
22
+
23
+ int64_t add(const std::string& key, int64_t value) override;
24
+
25
+ bool deleteKey(const std::string& key) override;
26
+
27
+ int64_t getNumKeys() override;
28
+
29
+ bool check(const std::vector<std::string>& keys) override;
30
+
31
+ void wait(const std::vector<std::string>& keys) override;
32
+
33
+ void wait(
34
+ const std::vector<std::string>& keys,
35
+ const std::chrono::milliseconds& timeout) override;
36
+
37
+ const std::chrono::milliseconds& getTimeout() const noexcept override;
38
+
39
+ void setTimeout(const std::chrono::milliseconds& timeout) override;
40
+
41
+ void append(const std::string& key, const std::vector<uint8_t>& value)
42
+ override;
43
+
44
+ std::vector<std::vector<uint8_t>> multiGet(
45
+ const std::vector<std::string>& keys) override;
46
+
47
+ void multiSet(
48
+ const std::vector<std::string>& keys,
49
+ const std::vector<std::vector<uint8_t>>& values) override;
50
+
51
+ // Returns true if this store support append, multiGet and multiSet
52
+ bool hasExtendedApi() const override;
53
+
54
+ c10::intrusive_ptr<Store> getUnderlyingStore();
55
+
56
+ protected:
57
+ std::string prefix_;
58
+ c10::intrusive_ptr<Store> store_;
59
+
60
+ std::string joinKey(const std::string& key);
61
+ std::vector<std::string> joinKeys(const std::vector<std::string>& keys);
62
+ };
63
+
64
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp ADDED
@@ -0,0 +1,918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_NCCL
4
+
5
+ #include <chrono>
6
+ #include <iostream>
7
+ #include <list>
8
+ #include <mutex>
9
+ #include <thread>
10
+ #include <unordered_map>
11
+
12
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
13
+ #include <torch/csrc/distributed/c10d/NCCLUtils.hpp>
14
+ #include <torch/csrc/distributed/c10d/Store.hpp>
15
+
16
+ #include <ATen/DynamicLibrary.h>
17
+ #include <ATen/cuda/CUDAContext.h>
18
+ #include <ATen/cuda/CUDAEvent.h>
19
+ #include <c10/core/Stream.h>
20
+ #include <c10/core/StreamGuard.h>
21
+ #include <c10/cuda/CUDACachingAllocator.h>
22
+ #include <c10/cuda/CUDAGuard.h>
23
+ #include <c10/cuda/CUDAStream.h>
24
+
25
+ #include <torch/custom_class.h>
26
+
27
+ namespace c10d {
28
+ // Environment variable which controls whether we perform a NCCL healt check
29
+ // which ensures communicators are healthy at the beginning of init.
30
+ static std::vector<std::string> TORCH_ENABLE_NCCL_HEALTH_CHECK = {
31
+ "TORCH_ENABLE_NCCL_HEALTH_CHECK",
32
+ "ENABLE_NCCL_HEALTH_CHECK"};
33
+
34
+ // Environment variable which controls whether or not wait() is blocking or
35
+ // non-blocking.
36
+ static std::vector<std::string> TORCH_NCCL_BLOCKING_WAIT = {
37
+ "TORCH_NCCL_BLOCKING_WAIT",
38
+ "NCCL_BLOCKING_WAIT"};
39
+
40
+ // Environment variable which controls whether or not we perform Async Error
41
+ // Handling with NCCL.
42
+ static std::vector<std::string> TORCH_NCCL_ASYNC_ERROR_HANDLING = {
43
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING",
44
+ "NCCL_ASYNC_ERROR_HANDLING"};
45
+
46
+ // Environment Variable to control whether dumping debug info on watchdog
47
+ // timeout is enabled. This variable must be set together with
48
+ // TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0.
49
+ static std::vector<std::string> TORCH_NCCL_DUMP_ON_TIMEOUT = {
50
+ "TORCH_NCCL_DUMP_ON_TIMEOUT"};
51
+
52
+ // Environment Variable to control whether Desync Debug is enabled.
53
+ // This variable must be set together with TORCH_NCCL_ASYNC_ERROR_HANDLING.
54
+ static std::vector<std::string> TORCH_NCCL_DESYNC_DEBUG = {
55
+ "TORCH_NCCL_DESYNC_DEBUG",
56
+ "NCCL_DESYNC_DEBUG"};
57
+
58
+ static std::vector<std::string> TORCH_NCCL_ENABLE_TIMING = {
59
+ "TORCH_NCCL_ENABLE_TIMING",
60
+ "NCCL_ENABLE_TIMING"};
61
+
62
+ static std::vector<std::string> TORCH_NCCL_ENABLE_MONITORING = {
63
+ "TORCH_NCCL_ENABLE_MONITORING"};
64
+
65
+ static std::vector<std::string> TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = {
66
+ "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"};
67
+
68
+ static std::vector<std::string> TORCH_NCCL_TRACE_BUFFER_SIZE = {
69
+ "TORCH_NCCL_TRACE_BUFFER_SIZE"};
70
+
71
+ constexpr const char* NCCL_BACKEND_NAME = "nccl";
72
+
73
+ constexpr auto kProcessGroupNCCLDefaultTimeout =
74
+ std::chrono::milliseconds(10 * 60 * 1000);
75
+
76
+ // NoHandling: do not handle asynchronous NCCL errors
77
+ // TearDown: tear down process upon error, see `WorkNCCL::handleException`
78
+ // CleanUpOnly: just clean up collectives and abort communicators without
79
+ // tearing down process SkipCleanUp: (this is a temporary option and can be
80
+ // removed in future) tear down process without cleaning up NCCL communicators.
81
+ // This should be used as a last resort in case `ncclCommAbort` itself is
82
+ // hanging
83
+ enum ErrorHandlingMode {
84
+ NoHandling = 0,
85
+ TearDown = 1,
86
+ CleanUpOnly = 2,
87
+ SkipCleanUp = 3
88
+ };
89
+
90
+ #define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp)
91
+
92
+ #define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly)
93
+
94
+ // If set, ProcessGroupNCCL doesn't use recordStream calls to ensure
95
+ // caching allocator safety for tensors used on both user-facing and
96
+ // internal comm streams.
97
+ // Instead, it stashes live references to those tensors until after
98
+ // user-facing streams are synced with comm streams.
99
+ // See stashed_for_allocator_safety_ below.
100
+ static std::vector<std::string> TORCH_NCCL_AVOID_RECORD_STREAMS = {
101
+ "TORCH_NCCL_AVOID_RECORD_STREAMS"};
102
+
103
+ // If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache
104
+ // allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL
105
+ // can register/deregister the tensor on all available NCCL communicators.
106
+ static std::vector<std::string> TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK =
107
+ {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK",
108
+ "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"};
109
+
110
+ // ProcessGroupNCCL implements NCCL bindings for c10d.
111
+ //
112
+ // All functions of the class are expected to be called in the same order
113
+ // across all processes in the process group. This is the only way that we
114
+ // can guarantee to match up the same calls among all processes.
115
+ //
116
+ // All NCCL functions provided by this class are asynchronous functions. More
117
+ // specifically, each NCCL call is scheduled on a separate CUDA stream that is
118
+ // different from the current CUDA stream. This is for the purpose of
119
+ // achieving potentially concurrency and better performance. As a result,
120
+ // it is the callers' responsibility to make sure that the CUDA stream their
121
+ // code works on needs to wait for the NCCL operation from
122
+ // this class.
123
+ //
124
+ // This can be done by calling:
125
+ //
126
+ // either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same
127
+ // functionality and are synonyms.
128
+ //
129
+ // Also note that WorkNCCL::finishedGPUExecution() is a helper function only
130
+ // provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has
131
+ // finished execution on the GPU (not just scheduled).
132
+ //
133
+ // Example on using the NCCL process group
134
+ //
135
+ // ProcessGroupNCCL pg(store, rank, size);
136
+ // std::shared_ptr<WorkNCCL> work = pg.allreduce(tensors);
137
+ //
138
+ // // At this point, NCCL kernel has already by queued successfully
139
+ // // Now, let current stream wait for the NCCL to finish, this function is
140
+ // // async operation as well
141
+ //
142
+ // work->wait()
143
+ //
144
+ // // Now continue on other work in the current stream.
145
+ class TORCH_API ProcessGroupNCCL : public Backend {
146
+ public:
147
+ class WorkNCCL : public Work, public std::enable_shared_from_this<WorkNCCL> {
148
+ public:
149
+ friend struct WorkInfo;
150
+
151
+ // Constructor takes a list of CUDA devices
152
+ WorkNCCL(
153
+ const std::vector<at::Device>& devices,
154
+ int rank,
155
+ OpType opType,
156
+ uint64_t seq,
157
+ const char* profilingTitle = nullptr,
158
+ const c10::optional<std::vector<at::Tensor>>& inputs = c10::nullopt,
159
+ bool desyncDebug = false,
160
+ bool enableTiming = false);
161
+ // Copy constructor doing partial copy without outputs_. Cleanup thread
162
+ // monitors and removes finished works. However it will deadlock when
163
+ // destructs outputs_ tensors who are view tensors in autograd graph.
164
+ WorkNCCL(const WorkNCCL& w);
165
+
166
+ ~WorkNCCL() override;
167
+
168
+ // Checks if the NCCL kernel has started to execute.
169
+ bool isStarted();
170
+
171
+ // Checks if request has completed. In this specific case of NCCL, it checks
172
+ // if the NCCL operation has completed on the GPU in its own NCCL stream.
173
+ // Non-blocking operation.
174
+ bool isCompleted() override;
175
+
176
+ bool isSuccess() const override;
177
+
178
+ // Same as calling synchronize() for NCCL work.
179
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
180
+
181
+ void abort() override;
182
+
183
+ // Let current stream wait on the completing of the NCCL work
184
+ // Throws on exceptions. Blocking operation, which will wait for work
185
+ // completion.
186
+ void synchronize() override;
187
+
188
+ // Synchronize streams by blocking each on the NCCL stream
189
+ void synchronizeStreams();
190
+
191
+ // Helper function to handle exception (throw if needed).
192
+ void handleException(ErrorHandlingMode asyncErrorHandling);
193
+
194
+ // Helper function that checks if the NCCL kernels have finished
195
+ // execution on the GPUs
196
+ bool finishedGPUExecution();
197
+
198
+ // Get a Future object that will be marked as completed internally.
199
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
200
+
201
+ float getDuration() const override;
202
+
203
+ uint64_t getSequencenumber() const override;
204
+
205
+ // Helper function that sets an exception_ptr on the WorkNCCL object.
206
+ void setException(std::exception_ptr exception_ptr);
207
+
208
+ // Helper function that returns True if the WorkNCCL object has timed out
209
+ // and False otherwise.
210
+ // In case of timeout, set exception on the WorkNCCL object.
211
+ bool checkTimeout(
212
+ c10::optional<std::chrono::milliseconds> timeout = c10::nullopt);
213
+
214
+ std::vector<at::Tensor> result() override;
215
+
216
+ protected:
217
+ // The cached list of CUDA devices to operate on
218
+ std::vector<at::Device> devices_;
219
+
220
+ // The start CUDA events of NCCL operator tracking this work item on
221
+ // multiple CUDA devices. These start CUDA events are needed by desync
222
+ // debugging if enabled.
223
+ std::shared_ptr<std::vector<at::cuda::CUDAEvent>> ncclStartEvents_;
224
+
225
+ // The end CUDA events of NCCL operator tracking this work item on
226
+ // multiple CUDA devices.
227
+ std::shared_ptr<std::vector<at::cuda::CUDAEvent>> ncclEndEvents_;
228
+
229
+ // The NCCL communicators used for this work item.
230
+ std::vector<std::shared_ptr<NCCLComm>> ncclComms_;
231
+
232
+ // Tensors used for barrier op
233
+ std::vector<at::Tensor> barrierTensors_;
234
+
235
+ // Clone of blockingWait_ from ProcessGroupNCCL.
236
+ bool blockingWait_ = false;
237
+
238
+ // Clone of avoidRecordStreams_ from ProcessGroupNCCL.
239
+ bool avoidRecordStreams_ = false;
240
+
241
+ // Clone of opTimeout_ from ProcessGroupNCCL.
242
+ std::chrono::milliseconds opTimeout_;
243
+
244
+ // Time point representing when the work started.
245
+ std::chrono::time_point<std::chrono::steady_clock> workStartTime_;
246
+
247
+ // Record the collective sequential number.
248
+ uint64_t seq_;
249
+
250
+ // Indicates if the nccl start event has been updated to the store trace.
251
+ // This will be used by desync debug.
252
+ bool startTraceUpdated_{false};
253
+
254
+ // Record collective sizes for debug. We only record the size on the first
255
+ // device as multi-device per process is deprecated
256
+ size_t numelIn_ = -1;
257
+ size_t numelOut_ = -1;
258
+
259
+ // Wrapper method for the static checkForNCCLErrors which can be overridden
260
+ // for tests.
261
+ virtual std::exception_ptr checkForNCCLErrors(
262
+ const std::vector<std::shared_ptr<NCCLComm>>& ncclComms) const;
263
+
264
+ friend std::ostream& operator<<(
265
+ std::ostream& output,
266
+ const WorkNCCL& workNCCL);
267
+
268
+ private:
269
+ // Helper function for synchronize
270
+ void synchronizeInternal(std::chrono::milliseconds timeout);
271
+
272
+ // Checks for NCCL errors and sets an appropriate exception_ptr.
273
+ void checkAndSetException();
274
+
275
+ // Just checks whether GPU execution has started, without modifying
276
+ // exception_ptr.
277
+ bool startedGPUExecutionInternal() const;
278
+
279
+ // Just checks whether GPU execution has completed, without modifying
280
+ // exception_ptr.
281
+ bool finishedGPUExecutionInternal() const;
282
+
283
+ // Reference to the store so that we can write aborted communicators
284
+ // to the store.
285
+ c10::intrusive_ptr<Store> store_;
286
+
287
+ // Store a reference to NCCL collective's outputs, used by result and to
288
+ // give a more descriptive message when representing the Work as a string.
289
+ std::shared_ptr<std::vector<at::Tensor>> outputs_;
290
+
291
+ // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper.
292
+ // Stores references to participating non-output tensors (ie inputs,
293
+ // flattened intermediates).
294
+ // We'll clear this list in synchronizeStreams, just after user-facing
295
+ // stream(s) are synced with the nccl work stream(s).
296
+ // By keeping these refs (as well as outputs_) alive until after the
297
+ // collective's work rejoins the user-facing streams, we achieve
298
+ // caching allocator safety without any recordStream calls.
299
+ // For in-place collectives, some refs stashed here may alias outputs_,
300
+ // but that doesn't do any harm.
301
+ std::shared_ptr<std::vector<at::Tensor>> stashed_for_allocator_safety_;
302
+
303
+ // The future returned by getFuture.
304
+ c10::intrusive_ptr<at::ivalue::Future> future_;
305
+
306
+ bool timingEnabled_;
307
+ // unique id used to tell the trace buffer that this
308
+ // work has completed
309
+ c10::optional<uint64_t> trace_id_;
310
+ friend class ProcessGroupNCCL;
311
+ };
312
+
313
+ class CoalescedWorkNCCL
314
+ : public Work,
315
+ public std::enable_shared_from_this<CoalescedWorkNCCL> {
316
+ public:
317
+ // Constructor takes a list of WorkNCCL works
318
+ CoalescedWorkNCCL(
319
+ std::vector<ProcessGroupNCCL::WorkNCCL> works,
320
+ int rank,
321
+ OpType opType);
322
+
323
+ ~CoalescedWorkNCCL() override;
324
+
325
+ // Same as calling synchronize() for NCCL work.
326
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
327
+
328
+ protected:
329
+ // The cached list of CUDA devices to operate on
330
+ std::vector<ProcessGroupNCCL::WorkNCCL> works_;
331
+
332
+ friend class ProcessGroupNCCL;
333
+ };
334
+
335
+ struct Options : Backend::Options {
336
+ // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for
337
+ // operations. This is only used when blockingWait_ is enabled.
338
+ explicit Options(bool is_high_priority_stream = false);
339
+
340
+ // return intrusive_ptr of the object
341
+ static c10::intrusive_ptr<Options> create(
342
+ bool is_high_priority_stream = false) {
343
+ return c10::make_intrusive<Options>(is_high_priority_stream);
344
+ }
345
+
346
+ // Schedule NCCL operations on high priority CUDA streams
347
+ bool is_high_priority_stream;
348
+
349
+ #ifdef NCCL_HAS_COMM_NONBLOCKING
350
+ // Configure ranks
351
+ ncclConfig_t config = NCCL_CONFIG_INITIALIZER;
352
+ #endif
353
+
354
+ // Optional "parent" backend and color to create communicators from
355
+ // via `ncclCommSplit`
356
+ std::shared_ptr<ProcessGroupNCCL> split_from;
357
+ int64_t split_color{0};
358
+ };
359
+
360
+ // If you wish to create multiple process groups, each with a potentially
361
+ // different rank and size, you can do so by passing a new store instance
362
+ // to each one. If you have only a single store object, you can
363
+ // use the `c10d::PrefixStore` to derive scoped instances.
364
+ // This is also what the Python API in torch.distributed does.
365
+ //
366
+ // The process group instance keeps a reference to the store because
367
+ // it may be used long after the constructor runs. In fact, the constructor
368
+ // doesn't create any NCCL communicators. A single NCCL communicator can
369
+ // only be used on a specific set of devices, and are therefore created
370
+ // on-demand when a collective runs. If another collective is executed later,
371
+ // against a different set of devices, the process group creates another NCCL
372
+ // communicator. These NCCL communicators are cached and reused if possible.
373
+ //
374
+ ProcessGroupNCCL(
375
+ const c10::intrusive_ptr<Store>& store,
376
+ int rank,
377
+ int size,
378
+ c10::intrusive_ptr<Options> options = Options::create());
379
+
380
+ // This constructor includes the deprecated `groupName` argument.
381
+ // If you have existing code that uses the `groupName`, you can replace
382
+ // it by specifying a `c10d::PrefixStore(groupName, store)` for store.
383
+ C10_DEPRECATED ProcessGroupNCCL(
384
+ const c10::intrusive_ptr<Store>& store,
385
+ int rank,
386
+ int size,
387
+ const std::string& groupName,
388
+ c10::intrusive_ptr<Options> options = Options::create())
389
+ : ProcessGroupNCCL(store, rank, size, options) {}
390
+
391
+ ~ProcessGroupNCCL() override;
392
+
393
+ c10::intrusive_ptr<Options> getOptions() {
394
+ return options_;
395
+ }
396
+
397
+ const std::string getBackendName() const override {
398
+ return std::string(NCCL_BACKEND_NAME);
399
+ }
400
+
401
+ void startCoalescing() override;
402
+
403
+ c10::intrusive_ptr<Work> endCoalescing() override;
404
+
405
+ c10::intrusive_ptr<Work> broadcast(
406
+ std::vector<at::Tensor>& tensors,
407
+ const BroadcastOptions& opts = BroadcastOptions()) override;
408
+
409
+ c10::intrusive_ptr<Work> _broadcast_oop(
410
+ std::vector<at::Tensor>& outputTensors,
411
+ std::vector<at::Tensor>& inputTensors,
412
+ const BroadcastOptions& opts = BroadcastOptions());
413
+
414
+ c10::intrusive_ptr<Work> allreduce_sparse(
415
+ std::vector<at::Tensor>& tensors,
416
+ const AllreduceOptions& opts = AllreduceOptions()) override;
417
+
418
+ c10::intrusive_ptr<Work> allreduce(
419
+ std::vector<at::Tensor>& tensors,
420
+ const AllreduceOptions& opts = AllreduceOptions()) override;
421
+
422
+ c10::intrusive_ptr<Work> allreduce_coalesced(
423
+ std::vector<at::Tensor>& tensors,
424
+ const AllreduceCoalescedOptions& opts =
425
+ AllreduceCoalescedOptions()) override;
426
+
427
+ c10::intrusive_ptr<Work> reduce(
428
+ std::vector<at::Tensor>& tensors,
429
+ const ReduceOptions& opts = ReduceOptions()) override;
430
+
431
+ c10::intrusive_ptr<Work> _reduce_oop(
432
+ std::vector<at::Tensor>& outputTensors,
433
+ std::vector<at::Tensor>& inputTensors,
434
+ const ReduceOptions& opts = ReduceOptions());
435
+
436
+ c10::intrusive_ptr<Work> allgather(
437
+ std::vector<std::vector<at::Tensor>>& outputTensors,
438
+ std::vector<at::Tensor>& inputTensors,
439
+ const AllgatherOptions& opts = AllgatherOptions()) override;
440
+
441
+ c10::intrusive_ptr<Work> _allgather_base(
442
+ at::Tensor& outputbuffer,
443
+ at::Tensor& inputbuffer,
444
+ const AllgatherOptions& opts = AllgatherOptions()) override;
445
+
446
+ c10::intrusive_ptr<Work> allgather_coalesced(
447
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
448
+ std::vector<at::Tensor>& inputTensors,
449
+ const AllgatherOptions& opts = AllgatherOptions()) override;
450
+
451
+ c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
452
+ std::vector<at::Tensor>& outputs,
453
+ std::vector<at::Tensor>& inputs,
454
+ const AllgatherOptions& opts = AllgatherOptions()) override;
455
+
456
+ c10::intrusive_ptr<Work> reduce_scatter(
457
+ std::vector<at::Tensor>& outputTensors,
458
+ std::vector<std::vector<at::Tensor>>& inputTensors,
459
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
460
+
461
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
462
+ at::Tensor& outputTensor,
463
+ at::Tensor& inputTensor,
464
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
465
+
466
+ c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
467
+ std::vector<at::Tensor>& outputs,
468
+ std::vector<at::Tensor>& inputs,
469
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
470
+
471
+ c10::intrusive_ptr<Work> barrier(
472
+ const BarrierOptions& opts = BarrierOptions()) override;
473
+
474
+ c10::intrusive_ptr<Work> alltoall_base(
475
+ at::Tensor& outputTensor,
476
+ at::Tensor& inputTensor,
477
+ std::vector<int64_t>& outputSplitSizes,
478
+ std::vector<int64_t>& inputSplitSizes,
479
+ const AllToAllOptions& opts = AllToAllOptions()) override;
480
+
481
+ c10::intrusive_ptr<Work> alltoall(
482
+ std::vector<at::Tensor>& outputTensors,
483
+ std::vector<at::Tensor>& inputTensors,
484
+ const AllToAllOptions& opts = AllToAllOptions()) override;
485
+
486
+ c10::intrusive_ptr<Work> send(
487
+ std::vector<at::Tensor>& tensors,
488
+ int dstRank,
489
+ int tag) override;
490
+
491
+ c10::intrusive_ptr<Work> recv(
492
+ std::vector<at::Tensor>& tensors,
493
+ int srcRank,
494
+ int tag) override;
495
+
496
+ void groupStart();
497
+
498
+ void groupEnd();
499
+
500
+ void groupEndNonblocking(std::vector<std::shared_ptr<NCCLComm>> comms);
501
+
502
+ // Unsupported Ops
503
+ c10::intrusive_ptr<Work> gather(
504
+ std::vector<std::vector<at::Tensor>>& outputTensors,
505
+ std::vector<at::Tensor>& inputTensors,
506
+ const GatherOptions& opts = GatherOptions()) override;
507
+
508
+ c10::intrusive_ptr<Work> scatter(
509
+ std::vector<at::Tensor>& outputTensors,
510
+ std::vector<std::vector<at::Tensor>>& inputTensors,
511
+ const ScatterOptions& opts = ScatterOptions()) override;
512
+
513
+ c10::intrusive_ptr<Work> recvAnysource(
514
+ std::vector<at::Tensor>& tensors,
515
+ int tag) override;
516
+
517
+ // Agrees on an initial sequence number for the whole group by having rank 0
518
+ // create it and broadcast it to other ranks using the store.
519
+ void setSequenceNumberForGroup() override;
520
+
521
+ // Retrieves the current sequence number for the whole group, which should be
522
+ // in sync. If the returned number is not consistent across the group, it
523
+ // may indicate that there is some sort of collective desynchronization.
524
+ uint64_t getSequenceNumberForGroup() override;
525
+
526
+ // Return the total number of splits the communicators held by this process
527
+ // group have performed.
528
+ uint64_t getCommSplitCounter() const;
529
+
530
+ void registerOnCompletionHook(
531
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) override;
532
+ void waitForPendingWorks() override;
533
+
534
+ void enableCollectivesTiming() override;
535
+
536
+ // Provide an API for users to define their own ways to store NCCL debug info.
537
+ void registerDebugInfoWriter(std::unique_ptr<DebugInfoWriter> writer);
538
+
539
+ // Provides an API to abort the ProcessGroup (similar to ncclCommAbort)
540
+ // instead of relying on ProcessGroupNCCL destructor.
541
+ void abort(c10::optional<std::string> abortReason = c10::nullopt);
542
+
543
+ void shutdown();
544
+
545
+ protected:
546
+ // Helper that broadcasts nccl unique ID to all ranks through the store
547
+ void broadcastUniqueNCCLID(
548
+ ncclUniqueId* ncclID,
549
+ bool isSingleP2POp,
550
+ const std::string& devicesKey,
551
+ int p2pRank);
552
+
553
+ // Helper that either looks up the cached NCCL communicators or creates
554
+ // a new set of NCCL communicators as a cache entry
555
+ std::vector<std::shared_ptr<NCCLComm>>& getNCCLComm(
556
+ const std::string& devicesKey,
557
+ const std::vector<at::Device>& devices,
558
+ OpType opType,
559
+ int p2pRank = 0,
560
+ bool isSendRecvSelf = false);
561
+
562
+ // Wrapper method which can be overridden for tests.
563
+ virtual std::exception_ptr checkForNCCLErrors(
564
+ const std::vector<std::shared_ptr<NCCLComm>>& ncclComms);
565
+
566
+ virtual c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL> initWork(
567
+ std::vector<at::Device> devices,
568
+ int rank,
569
+ OpType opType,
570
+ const char* profilingTitle = nullptr,
571
+ const std::vector<at::Tensor>& inputs = {},
572
+ const std::vector<at::Tensor>& outputs = {});
573
+
574
+ virtual c10::intrusive_ptr<ProcessGroupNCCL::CoalescedWorkNCCL>
575
+ initCoalescedWork(
576
+ const std::vector<c10::intrusive_ptr<Work>>& works,
577
+ int rank,
578
+ OpType opType);
579
+
580
+ private:
581
+ // Helper that encapsulates work shared across all collective communication
582
+ // primitives. The callbacks have the following signatures:
583
+ //
584
+ // ncclResult_t fn(at::Tensor& input, at::Tensor& output,
585
+ // ncclComm_t, at::cuda::CUDAStream&);
586
+ // void {pre,post}(std::vector<at::cuda::CUDAStream&>);
587
+ template <typename Fn>
588
+ c10::intrusive_ptr<Work> collective(
589
+ std::vector<at::Tensor>& input,
590
+ std::vector<at::Tensor>& output,
591
+ Fn fn,
592
+ OpType opType,
593
+ const char* profilingTitle = nullptr,
594
+ bool avoidRecordStreams = false);
595
+
596
+ template <typename Fn, typename PreProcess, typename PostProcess>
597
+ c10::intrusive_ptr<Work> collective(
598
+ std::vector<at::Tensor>& input,
599
+ std::vector<at::Tensor>& output,
600
+ Fn fn,
601
+ PreProcess pre,
602
+ PostProcess post,
603
+ OpType opType,
604
+ const char* profilingTitle = nullptr,
605
+ bool avoidRecordStreams = false);
606
+
607
+ // Helper that encapsulates work shared across point-to-point communication
608
+ // primitives. It is the same structure as the helper used for collective
609
+ // communication primitives.
610
+ template <typename Fn>
611
+ c10::intrusive_ptr<Work> pointToPoint(
612
+ std::vector<at::Tensor>& tensor,
613
+ Fn fn,
614
+ int peer,
615
+ OpType opType,
616
+ const char* profilingTitle = nullptr);
617
+ template <typename Fn, typename PreProcess, typename PostProcess>
618
+ c10::intrusive_ptr<Work> pointToPoint(
619
+ std::vector<at::Tensor>& tensor,
620
+ Fn fn,
621
+ int peer,
622
+ OpType opType,
623
+ PreProcess pre,
624
+ PostProcess post,
625
+ const char* profilingTitle);
626
+
627
+ c10::intrusive_ptr<Work> allreduce_impl(
628
+ std::vector<at::Tensor>& tensors,
629
+ const AllreduceOptions& opts = AllreduceOptions());
630
+
631
+ // Checks for NCCL errors on each of the communicators and returns an
632
+ // appropriate exception_ptr (nullptr if no errors).
633
+ static std::exception_ptr checkForNCCLErrorsInternal(
634
+ const std::vector<std::shared_ptr<NCCLComm>>& ncclComms);
635
+
636
+ // Function that runs as part of a separate thread and checks for errors on
637
+ // NCCL communicators. We need a separate thread to check for NCCL errors
638
+ // since we can't rely on the user calling certain methods like wait(),
639
+ // isCompleted() etc. to detect and remediate errors. In addition to this, we
640
+ // need a mechanism to safely abort and remove NCCL communicators from our
641
+ // cache. This can be done cleanly by having a thread for the ProcessGroupNCCL
642
+ // class. Attempting to modify the communicator cache from the WorkNCCL class
643
+ // might run into issues with object lifetime since the ProcessGroupNCCL
644
+ // object might get destroyed before the WorkNCCL object.
645
+ void ncclCommWatchdog();
646
+
647
+ // Performs a health check by initializing dummy NCCL communicators and then
648
+ // destroying them. This will help indicate and signal any NCCL-related issues
649
+ // prior to the first collective. The actual initialization and subsequent
650
+ // destruction is ran on a separate thread and the main thread is signalled
651
+ // about timeouts/errors to report to the application.
652
+ void runHealthCheck();
653
+
654
+ // Destroys initialized NCCL communicators in devNCCLComMap_ given by input
655
+ // key. Throws if there are no communicators to destroy. Also removes
656
+ // communicators from the cache and clears used device indices.
657
+ void destroyNCCLComms(const std::string& devNCCLCommMapKey);
658
+
659
+ // Watchdog's inside loop.
660
+ // Takes care of cleaning up completed work, and aborting upon failure or
661
+ // timeout.
662
+ void workCleanupLoop();
663
+
664
+ void runHookLoop();
665
+
666
+ // In the timeout case and we will dump debug info such as the NCCL flight
667
+ // recorder to storage. Down the road, if we have more complicated or blocking
668
+ // operations, we might need to use a side thread to do it.
669
+ void dumpDebuggingInfo();
670
+
671
+ // Desync debug helper
672
+ void logWorkStart(WorkNCCL& work);
673
+
674
+ // Desync debug helper
675
+ void logWorkEnd(WorkNCCL& work);
676
+
677
+ protected:
678
+ // Function that runs as part of a separate thread aside from watchdog
679
+ // thread because we need to check the heartbeat from watchdog thread
680
+ // so that when we get stuck in some NCCL/CUDA calls,
681
+ // we can dump the debugging information and abort the process.
682
+ virtual void heartbeatMonitor();
683
+
684
+ // Function that directly trigger std::abort so that the whole process
685
+ // gets terminated.
686
+ virtual void terminateProcess(std::string errMsg);
687
+
688
+ // Check the writeDebugInfo_ flag and if it is true, we do nothing.
689
+ // If not, we first set the flag to be true and return a thread which will
690
+ // get and write the debug info into storage.
691
+ c10::optional<std::thread> tryWriteDebugInfo();
692
+
693
+ // When watchdog timeout, this function will be called and return debug info
694
+ // for users. For now we only get information from retrieveDesyncReport.
695
+ // We are working on enabling more useful debug information for watchdog
696
+ // timeout.
697
+ virtual std::string getNCCLWatchdogDebugInfo();
698
+
699
+ static const int64_t kWatchdogThreadSleepMillis;
700
+
701
+ // The store is used to broadcast the NCCL unique ID of rank 0.
702
+ c10::intrusive_ptr<Store> store_;
703
+
704
+ bool storeError_{false};
705
+
706
+ const c10::intrusive_ptr<Options> options_;
707
+
708
+ // The number of NCCL communicators that have been created during
709
+ // the lifetime of this process group. This sequence number is
710
+ // used to scope keys used in the store.
711
+ uint64_t ncclCommCounter_{0};
712
+
713
+ // The store keys to trace the last NCCL collective kernel CUDA events - start
714
+ // event and end event respectively. These are used to do desync root cause
715
+ // analysis.
716
+ const std::string traceKeyStart_;
717
+ const std::string traceKeyEnd_;
718
+
719
+ // The NCCL communicator that the process group has cached.
720
+ //
721
+ // For collective operations:
722
+ // The key is a list of GPU devices that an operation is operating on
723
+ // The GPU devices are stored in a device sequence and the cache NCCL
724
+ // communicator is associated with this GPU device sequence
725
+ //
726
+ // e.g. If the process group op only uses device 0, then the value of
727
+ // the used device string stored (value of the hashmap) would be "0".
728
+ //
729
+ // If the process group op uses device 0 - 7 and the each tensor of the
730
+ // input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately,
731
+ // then the value of the used device string (key) stored would be
732
+ // "0,1,2,3,4,5,6,7"
733
+ //
734
+ // If the process group op uses device 0 - 7 and the each tensor of the
735
+ // input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately,
736
+ // then the value of the used device string stored would be
737
+ // "0,4,5,6,7,1,2,3"
738
+ //
739
+ // Note that the order of the device for the tensor list matters.
740
+ //
741
+ // For point-to-point operations:
742
+ // The key is a string of my current rank and the peer process rank.
743
+ // e.g. If process 1 and process 2 are involved in a point-to-point
744
+ // communication, the key will be "1:2" on both processes. Note: this is for
745
+ // the scenario where there is only 1 GPU per process. When it comes to
746
+ // multiple GPUs per process, this part may need to redesigned.
747
+ std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
748
+ devNCCLCommMap_;
749
+
750
+ // The NCCL communicators currently in process of being initialized.
751
+ std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
752
+ inInitializationCommMap_;
753
+
754
+ // Map from ncclUniqueId to appropriate communicator.
755
+ std::unordered_map<std::string, std::vector<std::shared_ptr<NCCLComm>>>
756
+ ncclIdToCommMap_;
757
+
758
+ // Mutex to guard maps like devNCCLCommMap_ and ncclIdToCommMap_.
759
+ std::mutex mutex_;
760
+
761
+ // Heartbeat of watchdog thread.
762
+ uint64_t heartbeat_;
763
+
764
+ // The time interval used for deciding whether there is no watchdog heartbeat.
765
+ int heartbeatTimeoutInSec_;
766
+
767
+ // Size of ring buffer where we store NCCL Traces for debugging.
768
+ int ncclTraceBufferSize_;
769
+
770
+ // We gate the heartbeat monitor thread so that we can roll it out gradually.
771
+ std::atomic<bool> monitorThreadEnabled_;
772
+
773
+ // Monitor thread which checks the heartbeat of Watchdog thread.
774
+ // If the monitor thread finds there is no heartbeat, it will dump debug info
775
+ // and then kill the watchdog thread to avoid hang.
776
+ std::thread ncclHeartbeatMonitorThread_;
777
+
778
+ // Watchdog thread which looks for errors on the cached NCCL communicators.
779
+ std::thread ncclCommWatchdogThread_;
780
+
781
+ std::thread onCompletionHookThread_;
782
+
783
+ // Whether or not we should terminate the watchdog and workCleanup threads.
784
+ std::atomic<bool> terminateProcessGroup_;
785
+
786
+ // Whether or not we should terminate the heartbeat monitoring threads.
787
+ std::atomic<bool> terminateHeartbeatMonitorThread_;
788
+
789
+ // Whether we are in the shutdown mode when we are trying to get debug info,
790
+ // such as desync report.
791
+ std::atomic<bool> collectiveDebugInfoMode_;
792
+
793
+ // Whether there are hooks pending to be fired
794
+ std::atomic<bool> hasPendingHooks_;
795
+
796
+ // Mutex to Guard workMetaList_
797
+ std::mutex workMetaListMutex_;
798
+
799
+ // Mutex to Guard monitorWakeUpCV_
800
+ std::mutex monitorMutex_;
801
+
802
+ bool writeDebugInfo_ = false;
803
+
804
+ // Mutex to Guard the check of writeDebugInfo_
805
+ std::mutex writeDebugInfoMutex_;
806
+
807
+ // Condition Variable for watchdog thread sleep
808
+ std::condition_variable workMetaListCV_;
809
+
810
+ // Condition Variable for monitor thread to wake up early
811
+ std::condition_variable monitorWakeUpCV_;
812
+
813
+ // Vector to Store WorkNCCL pointers
814
+ std::list<ProcessGroupNCCL::WorkNCCL> workMetaList_;
815
+
816
+ // Mutex to Guard workMetaList_
817
+ std::mutex completedWorkListMutex_;
818
+
819
+ // Condition Variable for watchdog thread sleep
820
+ std::condition_variable completedWorkListCV_;
821
+
822
+ std::list<ProcessGroupNCCL::WorkNCCL> completedWorkList_;
823
+
824
+ // Add Work Pointer to workVector
825
+ void workEnqueue(c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL>);
826
+
827
+ // The CUDA streams used by NCCL kernels
828
+ std::unordered_map<std::string, std::vector<at::cuda::CUDAStream>>
829
+ ncclStreams_;
830
+
831
+ // The CUDA events used to sync NCCL streams
832
+ std::unordered_map<std::string, std::vector<at::cuda::CUDAEvent>> ncclEvents_;
833
+
834
+ // Device Indexes used for all collectives in this group
835
+ std::set<int> usedDeviceIdxs_;
836
+
837
+ // Flag to denote if a coalescing groupStart/groupEnd block is active
838
+ int coalescing_state_ = 0;
839
+
840
+ // Stores device indexes for all collectives run inside a coalescing block
841
+ std::vector<std::vector<at::Device>> coalescedDevices_;
842
+
843
+ // Stores communicators for all collectives run inside a coalescing block
844
+ std::vector<std::vector<std::shared_ptr<NCCLComm>>> coalescedComms_;
845
+
846
+ // map from the key: "group name + pg counter (ID)" to the
847
+ // unique NCCL ID count. This needs to be group and pg specific
848
+ //
849
+ // For each process group, we need a uniform unique NCCL ID counter to ensure
850
+ // that NCCL operation in this process group can be completed successfully.
851
+ // Since each process group ID belongs to a group name, the key to this map
852
+ // is a combination of group name and ProcessGroupNCCL ID.
853
+ static std::unordered_map<std::string, ssize_t> pgUniqueNCCLIDCnt_;
854
+
855
+ // map from group name to the pg counter (ID) within that group
856
+ //
857
+ // For each group with the "group name" (which is the key), we need to
858
+ // keep track of a unique process group ID when creating a new
859
+ // ProcessGroupNCCL for this "group name". Therefore, the value of this
860
+ // map keeps the unique ProcessGroupNCCL's ID for a specific group with
861
+ // the "group name". The reason we need a per-group process group ID counter
862
+ // is that different group can have different ranks and we need ensure that
863
+ // each group has its own uniform process group ID for all its ranks.
864
+ static std::unordered_map<std::string, ssize_t> processGroupCounterMap_;
865
+
866
+ // Whether or not wait() and synchronize() are blocking operations that wait
867
+ // for the operation to complete.
868
+ bool blockingWait_ = false;
869
+
870
+ // Whether or not to hook the cache allocator to register all allocated
871
+ // tensors
872
+ bool useTensorRegisterAllocatorHook_ = false;
873
+
874
+ // Whether or not the workCleanupThread is used to perform async error
875
+ // handling.
876
+ ErrorHandlingMode asyncErrorHandling_ = NoHandling;
877
+
878
+ // Whether or not to enable timeout root cause analysis.
879
+ bool desyncDebug_;
880
+
881
+ // Whether or not to dump debug info on timeout
882
+ bool dumpOnTimeout_;
883
+
884
+ // Whether or not to create start CUDAEvent and enable timing for start
885
+ // and end events. Note that enableTiming_ is always true if desyncDebug_
886
+ // is set to true.
887
+ std::atomic<bool> enableTiming_;
888
+
889
+ // Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set
890
+ bool avoidRecordStreams_ = false;
891
+
892
+ // Set of communicators that this process group has aborted and their
893
+ // ncclUniqueId has been written to the store. We don't need a lock
894
+ // for this map since only the watchdog thread accesses this set. The
895
+ // set contains the string representation of ncclUniqueId.
896
+ std::unordered_set<std::string> abortedComms_;
897
+
898
+ // The number of active ncclGroupStart() calls. This counter will be increased
899
+ // by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd()
900
+ // is called.
901
+ static thread_local uint64_t ncclActiveGroupCounter_;
902
+
903
+ // Counting for the sequential number of NCCL collective call.
904
+ uint64_t seq_{0};
905
+
906
+ std::exception_ptr watchDogException_ = nullptr;
907
+
908
+ // The callback function to store NCCL debug info.
909
+ std::unique_ptr<DebugInfoWriter> debugInfoWriter_ = nullptr;
910
+
911
+ size_t uid_;
912
+ };
913
+
914
+ TORCH_API std::string dump_nccl_trace();
915
+
916
+ } // namespace c10d
917
+
918
+ #endif // USE_C10D_NCCL
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.hpp ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <vector>
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
6
+
7
+ namespace c10d {
8
+
9
+ constexpr const char* ROUND_ROBIN_BACKEND_NAME = "round_robin";
10
+
11
+ // ProcessGroupRoundRobin implements simple load balancing.
12
+ //
13
+ // It is constructed with multiple processes groups. Each call is dispatched to
14
+ // one of the specified process groups in a round robin fashion. Each process
15
+ // group instance must have the same rank and size.
16
+ //
17
+ // All functions of the class are expected to be called in the same order
18
+ // across all processes in the process group. This is the only way that we
19
+ // can guarantee to match up the same calls among all processes.
20
+ //
21
+ class TORCH_API ProcessGroupRoundRobin final : public ProcessGroup {
22
+ public:
23
+ explicit ProcessGroupRoundRobin(
24
+ int rank,
25
+ int size,
26
+ std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups);
27
+
28
+ ~ProcessGroupRoundRobin() override;
29
+
30
+ const std::string getBackendName() const override {
31
+ return std::string(ROUND_ROBIN_BACKEND_NAME);
32
+ }
33
+
34
+ c10::intrusive_ptr<Work> broadcast(
35
+ std::vector<at::Tensor>& tensors,
36
+ const BroadcastOptions& opts = BroadcastOptions()) override;
37
+
38
+ c10::intrusive_ptr<Work> allreduce(
39
+ std::vector<at::Tensor>& tensors,
40
+ const AllreduceOptions& opts = AllreduceOptions()) override;
41
+
42
+ c10::intrusive_ptr<Work> allreduce_coalesced(
43
+ std::vector<at::Tensor>& tensors,
44
+ const AllreduceCoalescedOptions& opts =
45
+ AllreduceCoalescedOptions()) override;
46
+
47
+ c10::intrusive_ptr<Work> reduce(
48
+ std::vector<at::Tensor>& tensors,
49
+ const ReduceOptions& opts = ReduceOptions()) override;
50
+
51
+ c10::intrusive_ptr<Work> allgather(
52
+ std::vector<std::vector<at::Tensor>>& outputs,
53
+ std::vector<at::Tensor>& inputs,
54
+ const AllgatherOptions& opts = AllgatherOptions()) override;
55
+
56
+ c10::intrusive_ptr<Work> _allgather_base(
57
+ at::Tensor& outputBuffer,
58
+ at::Tensor& inputBuffer,
59
+ const AllgatherOptions& opts = AllgatherOptions()) override;
60
+
61
+ c10::intrusive_ptr<Work> allgather_coalesced(
62
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
63
+ std::vector<at::Tensor>& inputTensors,
64
+ const AllgatherOptions& opts = AllgatherOptions()) override;
65
+
66
+ c10::intrusive_ptr<Work> gather(
67
+ std::vector<std::vector<at::Tensor>>& outputs,
68
+ std::vector<at::Tensor>& inputs,
69
+ const GatherOptions& opts = GatherOptions()) override;
70
+
71
+ c10::intrusive_ptr<Work> scatter(
72
+ std::vector<at::Tensor>& outputs,
73
+ std::vector<std::vector<at::Tensor>>& inputs,
74
+ const ScatterOptions& opts = ScatterOptions()) override;
75
+
76
+ c10::intrusive_ptr<Work> reduce_scatter(
77
+ std::vector<at::Tensor>& outputs,
78
+ std::vector<std::vector<at::Tensor>>& inputs,
79
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
80
+
81
+ c10::intrusive_ptr<Work> alltoall_base(
82
+ at::Tensor& outputTensor,
83
+ at::Tensor& inputTensor,
84
+ std::vector<int64_t>& outputSplitSizes,
85
+ std::vector<int64_t>& inputSplitSizes,
86
+ const AllToAllOptions& opts = AllToAllOptions()) override;
87
+
88
+ c10::intrusive_ptr<Work> send(
89
+ std::vector<at::Tensor>& tensors,
90
+ int dstRank,
91
+ int tag) override;
92
+
93
+ c10::intrusive_ptr<Work> recv(
94
+ std::vector<at::Tensor>& tensors,
95
+ int srcRank,
96
+ int tag) override;
97
+
98
+ c10::intrusive_ptr<Work> recvAnysource(
99
+ std::vector<at::Tensor>& tensors,
100
+ int tag) override;
101
+
102
+ c10::intrusive_ptr<Work> barrier(
103
+ const BarrierOptions& opts = BarrierOptions()) override;
104
+
105
+ private:
106
+ std::vector<c10::intrusive_ptr<ProcessGroup>> processGroups_;
107
+ std::vector<c10::intrusive_ptr<ProcessGroup>>::const_iterator iterator_;
108
+
109
+ // Returns the next ProcessGroup to use.
110
+ const c10::intrusive_ptr<ProcessGroup>& next();
111
+ };
112
+
113
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/UCCUtils.hpp>
6
+
7
+ #include <exception>
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <queue>
11
+ #include <thread>
12
+ #include <vector>
13
+
14
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
15
+ #include <torch/csrc/distributed/c10d/Store.hpp>
16
+ #include <torch/csrc/distributed/c10d/Types.hpp>
17
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
18
+ #ifdef USE_CUDA
19
+ #include <ATen/cuda/CUDAEvent.h>
20
+ #include <c10/cuda/CUDAStream.h>
21
+ #endif
22
+
23
+ namespace c10d {
24
+
25
+ #define TORCH_UCC_DEVICE_NOT_SET -2
26
+
27
+ #ifdef USE_CUDA
28
+ #define SAVE_TENSORS(_TENSORS, _DATA) \
29
+ do { \
30
+ if ((_TENSORS)[0].device().is_cuda()) { \
31
+ for (const auto i : c10::irange((_TENSORS).size())) { \
32
+ c10::cuda::CUDACachingAllocator::recordStream( \
33
+ (_TENSORS)[i].storage().data_ptr(), (*stream)); \
34
+ } \
35
+ } else { \
36
+ (_DATA) = (_TENSORS); \
37
+ } \
38
+ } while (0)
39
+
40
+ #else
41
+ #define SAVE_TENSORS(_TENSORS, _DATA) (_DATA) = (_TENSORS);
42
+ #endif
43
+
44
+ constexpr const char* UCC_BACKEND_NAME = "ucc";
45
+
46
+ struct event_pool_t {
47
+ #ifdef USE_CUDA
48
+ std::queue<std::unique_ptr<at::cuda::CUDAEvent>> event_pool;
49
+ #endif
50
+ std::mutex event_pool_mutex;
51
+ };
52
+
53
+ class Comm;
54
+
55
+ // UCC does not support multiple CUDA devices per process.
56
+ class TORCH_API ProcessGroupUCC : public Backend {
57
+ private:
58
+ void set_timeout(ucc_coll_args_t& args);
59
+
60
+ public:
61
+ class WorkData {
62
+ public:
63
+ std::vector<at::Tensor> src;
64
+ std::vector<at::Tensor> dst;
65
+ std::vector<at::Tensor> flat;
66
+ WorkData() {}
67
+ virtual ~WorkData() = default;
68
+ };
69
+ class AlltoallWorkData : public WorkData {
70
+ public:
71
+ AlltoallWorkData(int size)
72
+ : send_lengths(size),
73
+ send_offsets(size),
74
+ recv_lengths(size),
75
+ recv_offsets(size) {}
76
+ std::vector<uint64_t> send_lengths;
77
+ std::vector<uint64_t> send_offsets;
78
+ std::vector<uint64_t> recv_lengths;
79
+ std::vector<uint64_t> recv_offsets;
80
+ };
81
+
82
+ class AllgathervWorkData : public WorkData {
83
+ public:
84
+ AllgathervWorkData(int size) : recv_lengths(size), recv_offsets(size) {}
85
+ std::vector<uint64_t> recv_lengths;
86
+ std::vector<uint64_t> recv_offsets;
87
+ };
88
+
89
+ class ScattervWorkData : public WorkData {
90
+ public:
91
+ ScattervWorkData(int size) : send_lengths(size), send_offsets(size) {}
92
+ std::vector<uint64_t> send_lengths;
93
+ std::vector<uint64_t> send_offsets;
94
+ };
95
+
96
+ class ProgressEntry {
97
+ friend class ProcessGroupUCC;
98
+ friend class Comm;
99
+
100
+ public:
101
+ ProgressEntry(CommBase* comm, ucc_coll_req_h request)
102
+ : status_(UCC_INPROGRESS), comm_(comm), request_(request) {}
103
+ // Finalizes UCC status or exception of collective request.
104
+ void finalize(std::exception_ptr eptr = nullptr);
105
+ ucc_status_t status_;
106
+ CommBase* comm_;
107
+ ucc_coll_req_h request_;
108
+ std::unique_ptr<WorkData> data;
109
+ c10::intrusive_ptr<c10::ivalue::Future> future_;
110
+ std::exception_ptr eptr_;
111
+ };
112
+
113
+ class WorkUCC : public Work {
114
+ friend class ProcessGroupUCC;
115
+ friend class Comm;
116
+
117
+ public:
118
+ WorkUCC(
119
+ OpType opType,
120
+ uint64_t seq,
121
+ const char* prof_title,
122
+ const c10::optional<std::vector<at::Tensor>>& inputs,
123
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger)
124
+ : Work(-1, opType, prof_title, inputs), logger_(logger), seq_(seq) {}
125
+ ~WorkUCC();
126
+ void setException();
127
+ void setAndThrowException();
128
+ bool isCompleted() override;
129
+ bool isSuccess() const override;
130
+ bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
131
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
132
+ std::vector<at::Tensor> result() override;
133
+ int sourceRank() const override;
134
+ #ifdef USE_CUDA
135
+ std::unique_ptr<at::cuda::CUDAEvent> fence = nullptr;
136
+ event_pool_t* ep = nullptr;
137
+ #endif
138
+ int sourceRank_;
139
+
140
+ protected:
141
+ std::shared_ptr<ProgressEntry> entry_;
142
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger_;
143
+ uint64_t seq_;
144
+
145
+ private:
146
+ // The future returned by getFuture.
147
+ c10::intrusive_ptr<at::ivalue::Future> future_;
148
+ // Store a reference to collective's outputs, used by result
149
+ std::shared_ptr<std::vector<at::Tensor>> outputs_;
150
+ };
151
+
152
+ explicit ProcessGroupUCC(
153
+ const c10::intrusive_ptr<Store>& store,
154
+ int rank = -1,
155
+ int size = -1,
156
+ std::chrono::duration<float> timeout = kBackendDefaultTimeout);
157
+
158
+ void initComm(c10::Device dev);
159
+
160
+ ~ProcessGroupUCC() override;
161
+
162
+ const std::string getBackendName() const override {
163
+ return std::string(UCC_BACKEND_NAME);
164
+ }
165
+
166
+ #ifdef USE_CUDA
167
+ std::unique_ptr<at::cuda::CUDAEvent> getPooledEvent();
168
+ #endif
169
+
170
+ // Performs a health check by initializing dummy UCC & UCX communicators and
171
+ // then destroying them. This will help indicate and signal any
172
+ // UCC/UCX-related issues prior to the first collective. The actual
173
+ // initialization and subsequent destruction is ran on a separate thread and
174
+ // the main thread is signalled about timeouts/errors to report to the
175
+ // application.
176
+ void runHealthCheck();
177
+
178
+ template <typename PreProcess, typename PostProcess>
179
+ c10::intrusive_ptr<Work> collective_post(
180
+ OpType opType,
181
+ PreProcess preproc,
182
+ PostProcess postproc,
183
+ ucc_coll_args_t& coll,
184
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
185
+ c10::Device dev,
186
+ std::vector<at::Tensor>& inputTensors,
187
+ std::vector<at::Tensor>& outputTensors,
188
+ const char* prof_title);
189
+
190
+ c10::intrusive_ptr<Work> broadcast(
191
+ std::vector<at::Tensor>& data,
192
+ const BroadcastOptions& opts = BroadcastOptions()) override;
193
+
194
+ c10::intrusive_ptr<Work> allreduce(
195
+ std::vector<at::Tensor>& tensors,
196
+ const AllreduceOptions& opts = AllreduceOptions()) override;
197
+
198
+ c10::intrusive_ptr<Work> allreduce_coalesced(
199
+ std::vector<at::Tensor>& tensors,
200
+ const AllreduceCoalescedOptions& opts =
201
+ AllreduceCoalescedOptions()) override;
202
+
203
+ c10::intrusive_ptr<Work> reduce(
204
+ std::vector<at::Tensor>& tensors,
205
+ const ReduceOptions& opts = ReduceOptions()) override;
206
+
207
+ c10::intrusive_ptr<Work> allgather(
208
+ std::vector<std::vector<at::Tensor>>& outputTensors,
209
+ std::vector<at::Tensor>& inputTensors,
210
+ const AllgatherOptions& opts = AllgatherOptions()) override;
211
+
212
+ c10::intrusive_ptr<Work> _allgather_base(
213
+ at::Tensor& outputBuffer,
214
+ at::Tensor& inputBuffer,
215
+ const AllgatherOptions& opts = AllgatherOptions()) override;
216
+
217
+ c10::intrusive_ptr<Work> barrier(
218
+ const BarrierOptions& opts = BarrierOptions()) override;
219
+
220
+ c10::intrusive_ptr<Work> gather(
221
+ std::vector<std::vector<at::Tensor>>& outputTensors,
222
+ std::vector<at::Tensor>& inputTensors,
223
+ const GatherOptions& opts = GatherOptions()) override;
224
+
225
+ c10::intrusive_ptr<Work> scatter(
226
+ std::vector<at::Tensor>& outputTensors,
227
+ std::vector<std::vector<at::Tensor>>& inputTensors,
228
+ const ScatterOptions& opts = ScatterOptions()) override;
229
+
230
+ c10::intrusive_ptr<Work> reduce_scatter(
231
+ std::vector<at::Tensor>& outputTensors,
232
+ std::vector<std::vector<at::Tensor>>& inputTensors,
233
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
234
+
235
+ c10::intrusive_ptr<Work> alltoall_base(
236
+ at::Tensor& outputTensor,
237
+ at::Tensor& inputTensor,
238
+ std::vector<int64_t>& outputSplitSizes,
239
+ std::vector<int64_t>& inputSplitSizes,
240
+ const AllToAllOptions& opts = AllToAllOptions()) override;
241
+
242
+ c10::intrusive_ptr<Work> alltoall(
243
+ std::vector<at::Tensor>& outputTensors,
244
+ std::vector<at::Tensor>& inputTensors,
245
+ const AllToAllOptions& opts = AllToAllOptions()) override;
246
+
247
+ c10::intrusive_ptr<Work> send(
248
+ std::vector<at::Tensor>& tensors,
249
+ int dstRank,
250
+ int tag) override;
251
+
252
+ c10::intrusive_ptr<Work> recv(
253
+ std::vector<at::Tensor>& tensors,
254
+ int srcRank,
255
+ int tag) override;
256
+
257
+ // Counting for the sequential number of UCC collective_post call.
258
+ uint64_t seq_{0};
259
+
260
+ // Agrees on an initial sequence number for the whole group by having rank 0
261
+ // create it and broadcast it to other ranks using the store.
262
+ void setSequenceNumberForGroup() override;
263
+
264
+ // Retrieves the current sequence number for the whole group, which should be
265
+ // in sync. If the returned number is not consistent across the group, it
266
+ // may indicate that there is some sort of collective desynchronization.
267
+ uint64_t getSequenceNumberForGroup() override;
268
+
269
+ static c10::intrusive_ptr<Backend> createProcessGroupUCC(
270
+ const c10::intrusive_ptr<::c10d::Store>& store,
271
+ int rank,
272
+ int size,
273
+ const std::chrono::duration<float>& timeout);
274
+
275
+ protected:
276
+ const std::chrono::duration<float> timeout_;
277
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
278
+ std::shared_ptr<Comm> comm = {nullptr};
279
+ uint32_t comm_id;
280
+ ucc_team_h team{nullptr};
281
+ ucc_ee_h cuda_ee{nullptr};
282
+ ucc_ee_h cuda_ee_p2p[2]{nullptr, nullptr};
283
+
284
+ #ifdef USE_CUDA
285
+ std::unique_ptr<at::cuda::CUDAStream> stream = nullptr;
286
+ std::unique_ptr<at::cuda::CUDAStream> stream_p2p[2] = {nullptr, nullptr};
287
+ event_pool_t ep;
288
+ #endif
289
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
290
+ };
291
+
292
+ class Comm {
293
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
294
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob;
295
+ CommUCC ucc_comm;
296
+ std::mutex mutex;
297
+ std::thread progress_thread;
298
+ std::condition_variable queue_produce_cv;
299
+ std::condition_variable queue_consume_cv;
300
+ std::deque<std::shared_ptr<ProcessGroupUCC::ProgressEntry>> progress_queue;
301
+ bool stop_progress_loop;
302
+ bool collective_inprogress;
303
+ torch_ucc_phase_t finalize_phase;
304
+
305
+ public:
306
+ c10::DeviceIndex cuda_device_index;
307
+ Comm(
308
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
309
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
310
+ c10::Device dev,
311
+ bool is_health_check);
312
+
313
+ ~Comm();
314
+
315
+ void ucc_create_team(
316
+ ucc_team_h& team,
317
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob);
318
+
319
+ void ucc_destroy_team(ucc_team_h& team);
320
+
321
+ c10::intrusive_ptr<Work> enqueue_p2p(
322
+ OpType opType,
323
+ ucc_coll_req_h request,
324
+ const char* prof_title);
325
+
326
+ #ifdef USE_CUDA
327
+ void enqueue_cuda_collective(
328
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
329
+ c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
330
+ ucc_coll_args_t& coll,
331
+ ucc_team_h team,
332
+ ucc_ee_h ee);
333
+ #endif
334
+
335
+ void enqueue_collective(
336
+ std::unique_ptr<ProcessGroupUCC::WorkData> data,
337
+ c10::intrusive_ptr<ProcessGroupUCC::WorkUCC> work,
338
+ ucc_coll_args_t& coll,
339
+ ucc_team_h team);
340
+
341
+ static std::shared_ptr<Comm> get_comm(
342
+ uint32_t& id,
343
+ c10::Device dev,
344
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
345
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger,
346
+ bool is_health_check = false);
347
+
348
+ void progress_loop();
349
+ };
350
+
351
+ } // namespace c10d
352
+
353
+ #endif // USE_C10D_UCC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupWrapper.hpp ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
6
+ #include <torch/csrc/distributed/c10d/Types.hpp>
7
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
8
+
9
+ namespace c10d {
10
+
11
+ class TORCH_API ProcessGroupWrapper : public Backend {
12
+ public:
13
+ explicit ProcessGroupWrapper(
14
+ c10::intrusive_ptr<Backend> backend,
15
+ c10::intrusive_ptr<Backend> glooBackend);
16
+
17
+ const std::string getBackendName() const override;
18
+
19
+ c10::intrusive_ptr<Work> broadcast(
20
+ std::vector<at::Tensor>& data,
21
+ const BroadcastOptions& opts = BroadcastOptions()) override;
22
+
23
+ c10::intrusive_ptr<Work> allreduce(
24
+ std::vector<at::Tensor>& data,
25
+ const AllreduceOptions& opts = AllreduceOptions()) override;
26
+
27
+ c10::intrusive_ptr<Work> allreduce_coalesced(
28
+ std::vector<at::Tensor>& tensors,
29
+ const AllreduceCoalescedOptions& opts =
30
+ AllreduceCoalescedOptions()) override;
31
+
32
+ c10::intrusive_ptr<Work> reduce(
33
+ std::vector<at::Tensor>& tensors,
34
+ const ReduceOptions& opts = ReduceOptions()) override;
35
+
36
+ c10::intrusive_ptr<Work> allgather(
37
+ std::vector<std::vector<at::Tensor>>& outputTensors,
38
+ std::vector<at::Tensor>& inputTensors,
39
+ const AllgatherOptions& opts = AllgatherOptions()) override;
40
+
41
+ c10::intrusive_ptr<Work> _allgather_base(
42
+ at::Tensor& outputBuffer,
43
+ at::Tensor& inputBuffer,
44
+ const AllgatherOptions& opts = AllgatherOptions()) override;
45
+
46
+ // This function is deprecated and will be moved out of ProcessGroup to comms:
47
+ // * do not add dependencies on this function,
48
+ // * do not implement it in your ProcessGroup, implement _allgather_base
49
+ // instead.
50
+ c10::intrusive_ptr<Work> allgather_coalesced(
51
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
52
+ std::vector<at::Tensor>& inputTensors,
53
+ const AllgatherOptions& opts = AllgatherOptions()) override;
54
+
55
+ c10::intrusive_ptr<Work> gather(
56
+ std::vector<std::vector<at::Tensor>>& outputTensors,
57
+ std::vector<at::Tensor>& inputTensors,
58
+ const GatherOptions& opts = GatherOptions()) override;
59
+
60
+ c10::intrusive_ptr<Work> scatter(
61
+ std::vector<at::Tensor>& outputTensors,
62
+ std::vector<std::vector<at::Tensor>>& inputTensors,
63
+ const ScatterOptions& opts = ScatterOptions()) override;
64
+
65
+ c10::intrusive_ptr<Work> reduce_scatter(
66
+ std::vector<at::Tensor>& outputTensors,
67
+ std::vector<std::vector<at::Tensor>>& inputTensors,
68
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
69
+
70
+ c10::intrusive_ptr<Work> alltoall_base(
71
+ at::Tensor& outputTensor,
72
+ at::Tensor& inputTensor,
73
+ std::vector<int64_t>& outputSplitSizes,
74
+ std::vector<int64_t>& inputSplitSizes,
75
+ const AllToAllOptions& opts = AllToAllOptions()) override;
76
+
77
+ c10::intrusive_ptr<Work> alltoall(
78
+ std::vector<at::Tensor>& outputTensors,
79
+ std::vector<at::Tensor>& inputTensors,
80
+ const AllToAllOptions& opts = AllToAllOptions()) override;
81
+
82
+ void monitoredBarrier(const BarrierOptions& opts, bool waitAllRanks = false)
83
+ override;
84
+
85
+ // Agrees on an initial sequence number for the whole group by having rank 0
86
+ // create it and broadcast it to other ranks using the store. Only implemented
87
+ // for GLOO and NCCL backends currently.
88
+ // dont implement this
89
+ void setSequenceNumberForGroup() override;
90
+
91
+ // Retrieves the current sequence number for the whole group, which should be
92
+ // in sync. If the returned number is not consistent across the group, it
93
+ // may indicate that there is some sort of collective desynchronization.
94
+ uint64_t getSequenceNumberForGroup() override; // just call underlying
95
+
96
+ c10::intrusive_ptr<Work> send(
97
+ std::vector<at::Tensor>& tensors,
98
+ int dstRank,
99
+ int tag) override;
100
+
101
+ c10::intrusive_ptr<Work> recv(
102
+ std::vector<at::Tensor>& tensors,
103
+ int srcRank,
104
+ int tag) override;
105
+
106
+ c10::intrusive_ptr<Work> recvAnysource(
107
+ std::vector<at::Tensor>& tensors,
108
+ int tag) override;
109
+
110
+ c10::intrusive_ptr<Work> barrier(
111
+ const BarrierOptions& opts = BarrierOptions()) override;
112
+
113
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
114
+ at::Tensor& outputBuffer,
115
+ at::Tensor& inputBuffer,
116
+ const ReduceScatterOptions& opts) override;
117
+
118
+ void startCoalescing() override;
119
+
120
+ c10::intrusive_ptr<Work> endCoalescing() override;
121
+
122
+ c10::intrusive_ptr<Backend> getWrappedPg() const;
123
+
124
+ private:
125
+ // Underlying process group that actual application collectives will be
126
+ // dispatched to
127
+ c10::intrusive_ptr<Backend> backend_;
128
+ // Gloo process group responsible for internal coordination such as monitored
129
+ // barrier, sequence number checking, collective fingerprint collecting.
130
+ c10::intrusive_ptr<Backend> glooBackend_;
131
+ // Conducts several checks to ensure that the underlying collective is well
132
+ // formed with the goal of notifying the user about incorrect collective use
133
+ // in the application.
134
+ void runCollectiveChecks(
135
+ OpType op_type,
136
+ const std::vector<at::Tensor>& tensors);
137
+ };
138
+ } // namespace c10d
139
+
140
+ #endif // USE_C10D_GLOO
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/RankLocal.hpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <shared_mutex>
5
+
6
+ #include <torch/csrc/autograd/function.h>
7
+
8
+ namespace c10d {
9
+
10
+ // `RankLocal` maintains a unique instance of T for each non-autograd thread.
11
+ // For non-autograd threads, `RankLocal<T>::get()` functions similar to
12
+ // thread_local. For autograd threads, `RankLocal<T>::get()` returns the
13
+ // instance of T corresponding to the enqueuing non-autograd thread. The
14
+ // mechanism allows for rank-specific context shared between forward and
15
+ // backward. It works for both the one-rank-per-process and one-rank-per-thread
16
+ // scenarios.
17
+ //
18
+ // NOTE: RankLocal doesn't make the underlying objects thread-safe.
19
+ template <typename T>
20
+ class RankLocal {
21
+ public:
22
+ RankLocal(const RankLocal&) = delete;
23
+ RankLocal& operator=(const RankLocal&) = delete;
24
+
25
+ static T& get() {
26
+ // Fast path: non-autograd threads can simply return
27
+ // the object reference cached in TLS.
28
+ if (cached_ != nullptr) {
29
+ return *cached_;
30
+ }
31
+ const auto node = torch::autograd::get_current_node();
32
+ auto fwd_thread_id = node == nullptr ? at::RecordFunction::currentThreadId()
33
+ : node->thread_id();
34
+ // Optimistically aquire the read lock first, since most likely we are in
35
+ // an autograd thread and the object has already been constructed.
36
+ {
37
+ std::shared_lock read_lock(lock_);
38
+ auto it = thread_id_to_rank_local_.find(fwd_thread_id);
39
+ if (it != thread_id_to_rank_local_.end()) {
40
+ // Cache for non-autograd threads
41
+ if (node == nullptr) {
42
+ cached_ = &it->second;
43
+ }
44
+ return it->second;
45
+ }
46
+ }
47
+
48
+ std::unique_lock write_lock(lock_);
49
+ auto [it, _] = thread_id_to_rank_local_.try_emplace(fwd_thread_id);
50
+ // Cache for non-autograd threads
51
+ if (node == nullptr) {
52
+ cached_ = &it->second;
53
+ }
54
+ return it->second;
55
+ }
56
+
57
+ private:
58
+ RankLocal(){};
59
+ thread_local static T* cached_;
60
+ static std::unordered_map<uint64_t, T> thread_id_to_rank_local_;
61
+ static std::shared_mutex lock_;
62
+ };
63
+
64
+ template <typename T>
65
+ thread_local T* RankLocal<T>::cached_ = nullptr;
66
+
67
+ template <typename T>
68
+ std::unordered_map<uint64_t, T> RankLocal<T>::thread_id_to_rank_local_;
69
+
70
+ template <typename T>
71
+ std::shared_mutex RankLocal<T>::lock_;
72
+
73
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStore.hpp ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <memory>
6
+
7
+ #include <torch/csrc/distributed/c10d/Store.hpp>
8
+
9
+ namespace c10d {
10
+ namespace detail {
11
+
12
+ class TCPServer;
13
+
14
+ class TCPClient;
15
+
16
+ struct SocketAddress {
17
+ std::string host{};
18
+ std::uint16_t port{};
19
+ };
20
+
21
+ class Counter {
22
+ public:
23
+ void update(double val);
24
+ std::unordered_map<std::string, double> observe() const;
25
+
26
+ double mean() const noexcept {
27
+ return mean_;
28
+ }
29
+ int64_t count() const noexcept {
30
+ return count_;
31
+ }
32
+ double variance() const noexcept {
33
+ return m2_ / count_;
34
+ }
35
+ double sample_variance() const noexcept {
36
+ return m2_ / (count_ - 1);
37
+ }
38
+
39
+ private:
40
+ int64_t count_ = 0;
41
+ double mean_ = 0;
42
+ double m2_ = 0;
43
+ };
44
+
45
+ } // namespace detail
46
+
47
+ struct TCPStoreOptions {
48
+ static constexpr std::uint16_t kDefaultPort = 29500;
49
+
50
+ std::uint16_t port = kDefaultPort;
51
+ bool isServer = false;
52
+ c10::optional<std::size_t> numWorkers = c10::nullopt;
53
+ bool waitWorkers = true;
54
+ std::chrono::milliseconds timeout = Store::kDefaultTimeout;
55
+
56
+ // A boolean value indicating whether multiple store instances can be
57
+ // initialized with the same host:port pair.
58
+ bool multiTenant = false;
59
+
60
+ // If specified, and if isServer is true, the underlying TCPServer will take
61
+ // over the bound socket associated to this fd. This option is useful to avoid
62
+ // port assignment races in certain scenarios.
63
+ c10::optional<int> masterListenFd = c10::nullopt;
64
+
65
+ // A boolean value indicating whether to use the experimental libUV backend.
66
+ bool useLibUV = false;
67
+ };
68
+
69
+ class TORCH_API TCPStore : public Store {
70
+ public:
71
+ explicit TCPStore(std::string host, const TCPStoreOptions& opts = {});
72
+
73
+ [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore(
74
+ const std::string& masterAddr,
75
+ std::uint16_t masterPort,
76
+ c10::optional<int> numWorkers = c10::nullopt,
77
+ bool isServer = false,
78
+ const std::chrono::milliseconds& timeout = kDefaultTimeout,
79
+ bool waitWorkers = true);
80
+
81
+ ~TCPStore() override;
82
+
83
+ void set(const std::string& key, const std::vector<uint8_t>& value) override;
84
+
85
+ std::vector<uint8_t> compareSet(
86
+ const std::string& key,
87
+ const std::vector<uint8_t>& expectedValue,
88
+ const std::vector<uint8_t>& desiredValue) override;
89
+
90
+ std::vector<uint8_t> get(const std::string& key) override;
91
+
92
+ int64_t add(const std::string& key, int64_t value) override;
93
+
94
+ bool deleteKey(const std::string& key) override;
95
+
96
+ bool check(const std::vector<std::string>& keys) override;
97
+
98
+ int64_t getNumKeys() override;
99
+
100
+ void wait(const std::vector<std::string>& keys) override;
101
+
102
+ void wait(
103
+ const std::vector<std::string>& keys,
104
+ const std::chrono::milliseconds& timeout) override;
105
+
106
+ void append(const std::string& key, const std::vector<uint8_t>& value)
107
+ override;
108
+
109
+ std::vector<std::vector<uint8_t>> multiGet(
110
+ const std::vector<std::string>& keys) override;
111
+
112
+ void multiSet(
113
+ const std::vector<std::string>& keys,
114
+ const std::vector<std::vector<uint8_t>>& values) override;
115
+
116
+ bool hasExtendedApi() const override;
117
+
118
+ // Waits for all workers to join.
119
+ void waitForWorkers();
120
+
121
+ // Returns the hostname used by the TCPStore.
122
+ const std::string& getHost() const noexcept {
123
+ return addr_.host;
124
+ }
125
+
126
+ // Returns the port used by the TCPStore.
127
+ std::uint16_t getPort() const noexcept {
128
+ return addr_.port;
129
+ }
130
+
131
+ std::unordered_map<std::string, std::unordered_map<std::string, double>>
132
+ collectClientCounters() const noexcept;
133
+
134
+ bool isLibUvBackend() const noexcept {
135
+ return usingLibUv_;
136
+ }
137
+
138
+ private:
139
+ int64_t incrementValueBy(const std::string& key, int64_t delta);
140
+
141
+ void validate(void);
142
+
143
+ std::vector<uint8_t> doGet(const std::string& key);
144
+
145
+ void doWait(
146
+ c10::ArrayRef<std::string> keys,
147
+ std::chrono::milliseconds timeout);
148
+
149
+ detail::SocketAddress addr_;
150
+ std::shared_ptr<detail::TCPServer> server_;
151
+ std::unique_ptr<detail::TCPClient> client_;
152
+ c10::optional<std::size_t> numWorkers_;
153
+
154
+ const std::string initKey_ = "init/";
155
+ const std::string keyPrefix_ = "/";
156
+ std::mutex activeOpLock_;
157
+ std::unordered_map<std::string, detail::Counter> clientCounters_;
158
+ bool usingLibUv_ = false;
159
+ };
160
+
161
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TCPStoreBackend.hpp ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <chrono>
4
+ #include <thread>
5
+ #include <vector>
6
+
7
+ #include <torch/csrc/distributed/c10d/TCPStore.hpp>
8
+ #include <torch/csrc/distributed/c10d/socket.h>
9
+
10
+ #ifdef _WIN32
11
+ #include <io.h>
12
+ #include <winsock2.h>
13
+ #else
14
+ #include <poll.h>
15
+ #include <unistd.h>
16
+ #endif
17
+
18
+ namespace c10d {
19
+ namespace detail {
20
+
21
+ // Magic number for client validation.
22
+ static const uint32_t validationMagicNumber = 0x3C85F7CE;
23
+
24
+ enum class QueryType : uint8_t {
25
+ VALIDATE,
26
+ SET,
27
+ COMPARE_SET,
28
+ GET,
29
+ ADD,
30
+ CHECK,
31
+ WAIT,
32
+ GETNUMKEYS,
33
+ DELETE_KEY,
34
+ APPEND,
35
+ MULTI_GET,
36
+ MULTI_SET,
37
+ CANCEL_WAIT,
38
+ };
39
+
40
+ enum class CheckResponseType : uint8_t { READY, NOT_READY };
41
+
42
+ enum class WaitResponseType : uint8_t { STOP_WAITING, WAIT_CANCELED };
43
+
44
+ // Abstract base class to handle thread state for TCPStoreMasterDaemon.
45
+ // Contains the windows/unix implementations to signal a
46
+ // shutdown sequence for the thread
47
+ class BackgroundThread {
48
+ public:
49
+ explicit BackgroundThread();
50
+
51
+ virtual ~BackgroundThread() = 0;
52
+ virtual std::uint16_t port() const = 0;
53
+
54
+ void start();
55
+ bool stop_requested();
56
+
57
+ protected:
58
+ void dispose();
59
+ virtual void run() = 0;
60
+ virtual void stop() = 0;
61
+ bool is_running() {
62
+ return is_running_.load();
63
+ }
64
+
65
+ private:
66
+ std::atomic<bool> is_running_;
67
+ std::thread daemonThread_{};
68
+ };
69
+
70
+ std::unique_ptr<BackgroundThread> create_tcpstore_backend(
71
+ const TCPStoreOptions& opts);
72
+ std::unique_ptr<BackgroundThread> create_libuv_tcpstore_backend(
73
+ const TCPStoreOptions& opts);
74
+ bool is_libuv_tcpstore_backend_available();
75
+
76
+ } // namespace detail
77
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/TraceUtils.h ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ApproximateClock.h>
4
+ #include <c10/util/irange.h>
5
+ #include <torch/csrc/distributed/c10d/Store.hpp>
6
+ #include <torch/csrc/distributed/c10d/Types.hpp>
7
+ #include <torch/csrc/jit/serialization/pickler.h>
8
+ #include <torch/csrc/profiler/combined_traceback.h>
9
+
10
+ #include <sys/types.h>
11
+
12
+ #include <cstdlib>
13
+ #include <string>
14
+ #include <system_error>
15
+ #include <vector>
16
+
17
+ namespace c10d {
18
+
19
+ /* Trace Utils Related to TORCH_NCCL_DESYNC_DEBUG */
20
+
21
+ inline std::string getTraceStartKey(const std::string& pgName, int rank) {
22
+ return pgName + "_" + std::to_string(rank) + "_trace_start";
23
+ }
24
+
25
+ inline std::string getTraceEndKey(const std::string& pgName, int rank) {
26
+ return pgName + "_" + std::to_string(rank) + "_trace_end";
27
+ }
28
+
29
+ inline bool traceUpdate(
30
+ c10::intrusive_ptr<Store>& store,
31
+ const std::string& key,
32
+ uint64_t seq,
33
+ const std::string& col) {
34
+ std::vector<uint8_t> value(col.size() + sizeof(seq) + 1);
35
+ memcpy(value.data(), &seq, sizeof(seq));
36
+ memcpy(value.data() + sizeof(seq), col.data(), col.size());
37
+ try {
38
+ store->set(key, value);
39
+ return true;
40
+ } catch (...) {
41
+ LOG(ERROR) << "Store is down while updating #" << seq << " with key "
42
+ << key;
43
+ return false;
44
+ }
45
+ return true;
46
+ }
47
+
48
+ enum TraceDebugEvent {
49
+ kEventStart,
50
+ kEventEnd,
51
+ };
52
+ // <seq, <rank, <col, start/end>>>
53
+ using TraceMap =
54
+ std::map<uint64_t, std::map<int, std::pair<std::string, TraceDebugEvent>>>;
55
+
56
+ inline std::string ranksToString(const std::vector<int>& ranks) {
57
+ std::string str;
58
+ for (int rank : ranks) {
59
+ if (str.empty()) {
60
+ str = std::to_string(rank);
61
+ } else {
62
+ str += ", " + std::to_string(rank);
63
+ }
64
+ }
65
+ return str;
66
+ }
67
+
68
+ inline std::string ranksFromTrace(
69
+ const std::vector<std::pair<int, std::string>>& items) {
70
+ std::string ranks;
71
+ for (auto& p : items) {
72
+ if (ranks.empty()) {
73
+ ranks = std::to_string(p.first);
74
+ } else {
75
+ ranks += ", " + std::to_string(p.first);
76
+ }
77
+ }
78
+ return ranks;
79
+ }
80
+
81
+ inline std::string analyzeMissingRanks(const std::vector<int>& missingRanks) {
82
+ return c10::str(
83
+ "\n\t - To our best knowledge, ranks [",
84
+ ranksToString(missingRanks),
85
+ "] are the lagging ranks that caused this timeout. "
86
+ "They never joined any collectives");
87
+ }
88
+
89
+ inline std::string analyzeLaggingRanks(const TraceMap& traceMap) {
90
+ uint64_t lagSeq = traceMap.begin()->first;
91
+ std::vector<int> startRanks;
92
+ std::vector<int> endRanks;
93
+ for (auto& p : traceMap.begin()->second) {
94
+ if (p.second.second == kEventStart) {
95
+ startRanks.push_back(p.first);
96
+ } else {
97
+ endRanks.push_back(p.first);
98
+ }
99
+ }
100
+ std::string report =
101
+ "\n\t - To our best knowledge, the lagging/dead/mismatched ranks "
102
+ "that caused the desync are:";
103
+ if (startRanks.size()) {
104
+ report += c10::str(
105
+ "\n\t - [",
106
+ ranksToString(startRanks),
107
+ "] joined but didn't finish collective #",
108
+ lagSeq,
109
+ " (count from 1)");
110
+ }
111
+ if (endRanks.size()) {
112
+ report += c10::str(
113
+ "\n\t [",
114
+ ranksToString(endRanks),
115
+ "] finished collective #",
116
+ lagSeq,
117
+ ", but didn't join collective #",
118
+ lagSeq + 1,
119
+ " (count from 1)");
120
+ }
121
+ return report;
122
+ }
123
+
124
+ inline std::string dumpSnapshot(TraceMap& traceMap) {
125
+ std::string report = "\n\t - Snapshot of ranks' latest states:";
126
+ for (auto& tracePair : traceMap) {
127
+ uint64_t seq = tracePair.first;
128
+ std::map<int, std::pair<std::string, TraceDebugEvent>>& subMap =
129
+ tracePair.second;
130
+
131
+ std::unordered_map<std::string, std::vector<int>> collectivesStart;
132
+ std::unordered_map<std::string, std::vector<int>> collectivesEnd;
133
+ for (auto& p : subMap) {
134
+ int rank = p.first;
135
+ const std::string& col = p.second.first;
136
+ if (p.second.second == kEventStart) {
137
+ collectivesStart[col].push_back(rank);
138
+ } else {
139
+ collectivesEnd[col].push_back(rank);
140
+ }
141
+ }
142
+
143
+ if (collectivesStart.size()) {
144
+ report += c10::str("\n\t #", seq, " started ranks:");
145
+ for (auto& mapPair : collectivesStart) {
146
+ report += c10::str(
147
+ "\n\t [",
148
+ ranksToString(mapPair.second),
149
+ "] started ",
150
+ mapPair.first);
151
+ }
152
+ }
153
+ if (collectivesEnd.size()) {
154
+ report += c10::str("\n\t #", seq, " finished ranks:");
155
+ for (auto& mapPair : collectivesEnd) {
156
+ report += c10::str(
157
+ "\n\t [",
158
+ ranksToString(mapPair.second),
159
+ "] finished ",
160
+ mapPair.first);
161
+ }
162
+ }
163
+ }
164
+ return report;
165
+ }
166
+
167
+ inline bool parseTraceValue(
168
+ c10::intrusive_ptr<Store>& store,
169
+ const std::string& key,
170
+ uint64_t& seq,
171
+ std::string& col) {
172
+ try {
173
+ std::vector<uint8_t> traceValue = store->get(key);
174
+ memcpy(&seq, traceValue.data(), sizeof(seq));
175
+ std::string colName((char*)traceValue.data() + sizeof(seq));
176
+ col = colName;
177
+ return true;
178
+ } catch (...) {
179
+ LOG(ERROR) << "Store is down while getting key " << key;
180
+ return false;
181
+ }
182
+ return true;
183
+ }
184
+
185
+ inline std::string retrieveDesyncReport(
186
+ c10::intrusive_ptr<Store>& store,
187
+ const std::string& pgName,
188
+ int myRank,
189
+ int worldSize) {
190
+ std::string report;
191
+
192
+ uint64_t thisSeq;
193
+ std::string thisCol;
194
+
195
+ std::vector<int> missingRanks;
196
+ TraceMap traceMap;
197
+
198
+ for (const auto rank : c10::irange(worldSize)) {
199
+ // Build traceMapStart.
200
+ uint64_t seqStart;
201
+ {
202
+ std::string traceKeyStart = getTraceStartKey(pgName, rank);
203
+ if (!store->check({traceKeyStart})) {
204
+ missingRanks.push_back(rank);
205
+ continue;
206
+ }
207
+ std::string col;
208
+ if (!parseTraceValue(store, traceKeyStart, seqStart, col)) {
209
+ return report;
210
+ }
211
+ traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart));
212
+ if (rank == myRank) {
213
+ thisSeq = seqStart;
214
+ thisCol = std::move(col);
215
+ }
216
+ }
217
+
218
+ // Build traceMapEnd.
219
+ {
220
+ std::string traceKeyEnd = getTraceEndKey(pgName, rank);
221
+ if (!store->check({traceKeyEnd})) {
222
+ continue;
223
+ }
224
+ uint64_t seq;
225
+ std::string col;
226
+ if (!parseTraceValue(store, traceKeyEnd, seq, col)) {
227
+ return report;
228
+ }
229
+ if (seq == seqStart) {
230
+ traceMap[seq][rank].second = kEventEnd;
231
+ }
232
+ }
233
+ }
234
+
235
+ TORCH_INTERNAL_ASSERT(
236
+ !missingRanks.empty() || !traceMap.empty(),
237
+ "Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG");
238
+ TORCH_INTERNAL_ASSERT(
239
+ !thisCol.empty(),
240
+ "Timeout rank [",
241
+ myRank,
242
+ "] must have collective tracking iteam in c10::Store trace");
243
+ TORCH_INTERNAL_ASSERT(
244
+ traceMap[thisSeq][myRank].second == kEventStart,
245
+ "Timeout rank [",
246
+ myRank,
247
+ "] last trace item must be kEventStart. thisSeq = ",
248
+ thisSeq,
249
+ ", col = ",
250
+ thisCol);
251
+
252
+ report += c10::str(
253
+ "\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq);
254
+
255
+ if (!missingRanks.empty()) {
256
+ report += analyzeMissingRanks(missingRanks);
257
+ } else {
258
+ report += analyzeLaggingRanks(traceMap);
259
+ report += dumpSnapshot(traceMap);
260
+ }
261
+
262
+ return report;
263
+ }
264
+
265
+ /* Trace Utils Related to Flight Recorder */
266
+
267
+ /* Note: this is only used by PGNCCL (could be generalized in an ideal world but
268
+ * wasn't done that way, so isn't expected to be fully general at the moment) */
269
+
270
+ #ifdef USE_C10D_NCCL
271
+
272
+ DebugInfoWriter::DebugInfoWriter(int rank) {
273
+ std::string fileName = getCvarString(
274
+ {"TORCH_NCCL_DEBUG_INFO_TEMP_FILE"}, "/tmp/nccl_trace_rank_");
275
+ filename_ = c10::str(fileName, rank);
276
+ }
277
+
278
+ DebugInfoWriter::~DebugInfoWriter() = default;
279
+
280
+ void DebugInfoWriter::write(const std::string& ncclTrace) {
281
+ // Open a file for writing. The ios::binary flag is used to write data as
282
+ // binary.
283
+ std::ofstream file(filename_, std::ios::binary);
284
+
285
+ // Check if the file was opened successfully.
286
+ if (!file.is_open()) {
287
+ LOG(ERROR) << "Error opening file for writing NCCLPG debug info: "
288
+ << filename_;
289
+ return;
290
+ }
291
+
292
+ file.write(ncclTrace.data(), ncclTrace.size());
293
+ LOG(INFO) << "Finished writing NCCLPG debug info to " << filename_;
294
+ }
295
+
296
+ inline std::string pickle_str(const c10::IValue& v) {
297
+ std::vector<char> result;
298
+ {
299
+ auto writer = [&](const char* data, size_t size) {
300
+ result.insert(result.end(), data, data + size);
301
+ };
302
+ torch::jit::Pickler pickler(
303
+ writer, nullptr, nullptr, nullptr, nullptr, false);
304
+ pickler.protocol();
305
+ pickler.pushIValue(v);
306
+ pickler.stop();
307
+ }
308
+ return std::string(result.begin(), result.end());
309
+ }
310
+
311
+ inline c10::Dict<c10::IValue, c10::IValue> new_dict() {
312
+ return c10::Dict<c10::IValue, c10::IValue>(
313
+ c10::AnyType::get(), c10::AnyType::get());
314
+ }
315
+
316
+ inline c10::List<c10::IValue> new_list() {
317
+ return c10::List<c10::IValue>(c10::AnyType::get());
318
+ }
319
+
320
+ struct NCCLTraceBuffer {
321
+ static NCCLTraceBuffer* get() {
322
+ // intentionally leak on exit
323
+ // because this will hold python state that may get destructed
324
+ static NCCLTraceBuffer* instance = new NCCLTraceBuffer();
325
+ return instance;
326
+ }
327
+ NCCLTraceBuffer() {
328
+ max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0);
329
+ capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false);
330
+ enabled_ = max_entries_ > 0;
331
+ }
332
+ using EventList = std::vector<at::cuda::CUDAEvent>;
333
+ struct Entry {
334
+ size_t id_; // incremented id in the trace buffer
335
+ // used to figure out where in the circular entries
336
+ // buffer this entry will be located to
337
+ // update state information
338
+ size_t pg_id_;
339
+ size_t seq_id_; // as tracked by the process group
340
+ const char* profiling_name_;
341
+
342
+ std::shared_ptr<torch::CapturedTraceback> traceback_;
343
+ // we borrow pointser to start_ and end_ so we can query the state
344
+ // on reporting. However, once the event is completed, the call
345
+ // to `complete` will clear these.
346
+ EventList *start_, *end_;
347
+
348
+ // timestamp when the entry was created, likely close to the time the work
349
+ // was 'enqueued'- not necessarily started
350
+ c10::time_t time_created_;
351
+
352
+ const char* state_ = "scheduled";
353
+
354
+ // size information for input/output tensors
355
+ c10::SmallVector<int, 4> input_dims_;
356
+ c10::SmallVector<int, 4> output_dims_;
357
+ c10::SmallVector<int64_t, 8> sizes_; // flattened from inputs, outputs
358
+ bool retired_ = false; // is this work entry no longer in the workMetaList_?
359
+ // a retired but not completed event has timed out
360
+ };
361
+
362
+ bool enabled_ = false;
363
+ bool capture_cpp_stack_ = false;
364
+ std::mutex mutex_;
365
+ std::vector<Entry> entries_;
366
+ size_t max_entries_ = 0;
367
+ size_t next_ = 0;
368
+ size_t id_ = 0;
369
+
370
+ c10::optional<size_t> record(
371
+ size_t pg_id,
372
+ size_t seq_id,
373
+ const char* profiling_name,
374
+ const std::vector<at::Tensor>& inputs,
375
+ const std::vector<at::Tensor>& outputs,
376
+ EventList* start,
377
+ EventList* end) {
378
+ if (!enabled_) {
379
+ return c10::nullopt;
380
+ }
381
+ auto traceback =
382
+ torch::CapturedTraceback::gather(true, true, capture_cpp_stack_);
383
+ std::lock_guard<std::mutex> guard(mutex_);
384
+
385
+ auto te = Entry{
386
+ id_,
387
+ pg_id,
388
+ seq_id,
389
+ profiling_name,
390
+ std::move(traceback),
391
+ std::move(start),
392
+ std::move(end),
393
+ c10::getTime()};
394
+
395
+ for (const auto& input : inputs) {
396
+ c10::IntArrayRef sizes = input.sizes();
397
+ te.input_dims_.push_back(sizes.size());
398
+ te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
399
+ }
400
+
401
+ for (const auto& output : outputs) {
402
+ c10::IntArrayRef sizes = output.sizes();
403
+ te.output_dims_.push_back(sizes.size());
404
+ te.sizes_.insert(te.sizes_.end(), sizes.begin(), sizes.end());
405
+ }
406
+
407
+ if (entries_.size() < max_entries_) {
408
+ entries_.emplace_back(std::move(te));
409
+ } else {
410
+ entries_[next_++] = std::move(te);
411
+ if (next_ == max_entries_) {
412
+ next_ = 0;
413
+ }
414
+ }
415
+ return id_++;
416
+ }
417
+
418
+ void update_state(Entry& r) {
419
+ if (r.start_ != nullptr) {
420
+ bool started = true;
421
+ for (auto& ev : *r.start_) {
422
+ if (!ev.query()) {
423
+ started = false;
424
+ break;
425
+ }
426
+ }
427
+ if (started) {
428
+ r.state_ = "started";
429
+ }
430
+ }
431
+ if (r.end_ != nullptr) {
432
+ bool completed = true;
433
+ for (auto& ev : *r.end_) {
434
+ if (!ev.query()) {
435
+ completed = false;
436
+ break;
437
+ }
438
+ }
439
+ if (completed) {
440
+ r.state_ = "completed";
441
+ }
442
+ }
443
+ }
444
+
445
+ std::vector<Entry> dump_entries() {
446
+ std::lock_guard<std::mutex> guard(mutex_);
447
+ std::vector<Entry> result;
448
+ result.reserve(entries_.size());
449
+ result.insert(result.end(), entries_.begin() + next_, entries_.end());
450
+ result.insert(result.end(), entries_.begin(), entries_.begin() + next_);
451
+ // query any remaining events
452
+ for (auto& r : result) {
453
+ update_state(r);
454
+ r.start_ = r.end_ = nullptr;
455
+ }
456
+ return result;
457
+ }
458
+
459
+ void retire_id(c10::optional<size_t> id) {
460
+ if (!enabled_ || !id) {
461
+ return;
462
+ }
463
+ std::lock_guard<std::mutex> guard(mutex_);
464
+ auto& entry = entries_.at(*id % max_entries_);
465
+ if (entry.id_ == *id) {
466
+ update_state(entry);
467
+ entry.retired_ = true;
468
+ entry.start_ = entry.end_ = nullptr;
469
+ }
470
+ }
471
+
472
+ std::string dump() {
473
+ auto result = dump_entries();
474
+ auto entries = new_list();
475
+ c10::IValue pg_id_s = "pg_id";
476
+ c10::IValue seq_id_s = "seq_id";
477
+ c10::IValue profiling_name_s = "profiling_name";
478
+ c10::IValue input_sizes_s = "input_sizes";
479
+ c10::IValue output_sizes_s = "output_sizes";
480
+ c10::IValue time_created_s = "time_created_us";
481
+
482
+ c10::IValue frames_s = "frames";
483
+ c10::IValue state_s = "state";
484
+ c10::IValue line_s = "line";
485
+ c10::IValue name_s = "name";
486
+ c10::IValue filename_s = "filename";
487
+ c10::IValue retired_s = "retired";
488
+
489
+ std::vector<torch::CapturedTraceback*> tracebacks;
490
+ for (auto& e : result) {
491
+ tracebacks.push_back(e.traceback_.get());
492
+ }
493
+ torch::SymbolizedTracebacks stracebacks = torch::symbolize(tracebacks);
494
+ std::vector<c10::IValue> all_frames;
495
+ for (const auto& f : stracebacks.all_frames) {
496
+ auto d = new_dict();
497
+ d.insert(name_s, f.funcname);
498
+ d.insert(filename_s, f.filename);
499
+ d.insert(line_s, int64_t(f.lineno));
500
+ all_frames.emplace_back(std::move(d));
501
+ }
502
+
503
+ for (auto i : c10::irange(result.size())) {
504
+ auto& e = result.at(i);
505
+ auto& tb = stracebacks.tracebacks.at(i);
506
+ auto dict = new_dict();
507
+ dict.insert(pg_id_s, int64_t(e.pg_id_));
508
+ dict.insert(seq_id_s, int64_t(e.seq_id_));
509
+ dict.insert(profiling_name_s, e.profiling_name_);
510
+ dict.insert(time_created_s, int64_t(e.time_created_ / 1000));
511
+
512
+ auto it = e.sizes_.begin();
513
+ auto read_sizes = [&](const c10::SmallVector<int, 4>& dims) {
514
+ auto sizes = new_list();
515
+ for (auto dim : dims) {
516
+ auto arg_sizes = new_list();
517
+ for (auto i : c10::irange(dim)) {
518
+ (void)i;
519
+ arg_sizes.push_back(*it++);
520
+ }
521
+ sizes.push_back(arg_sizes);
522
+ }
523
+ return sizes;
524
+ };
525
+
526
+ dict.insert(input_sizes_s, read_sizes(e.input_dims_));
527
+ dict.insert(output_sizes_s, read_sizes(e.output_dims_));
528
+ dict.insert(state_s, e.state_);
529
+ dict.insert(retired_s, e.retired_);
530
+
531
+ auto frames = new_list();
532
+ for (int64_t frame : tb) {
533
+ frames.push_back(all_frames.at(frame));
534
+ }
535
+ dict.insert(frames_s, frames);
536
+ entries.push_back(dict);
537
+ }
538
+ return pickle_str(entries);
539
+ }
540
+ };
541
+
542
+ #endif
543
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/UCCUtils.hpp>
6
+
7
+ namespace c10d {
8
+
9
+ #define RECORD_COMMS_TRACE( \
10
+ _comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \
11
+ do { \
12
+ if (torch_ucc_config.enable_comms_logger) { \
13
+ _comms_tracer->recordComms( \
14
+ opTypeToString(_opType), \
15
+ (uintptr_t)_work.get(), \
16
+ _rank, \
17
+ _comm_size, \
18
+ _inTensors, \
19
+ _outTensors); \
20
+ } \
21
+ } while (0)
22
+
23
+ // interfaces to collect communication traces
24
+ class TORCH_API CommTraceLogger : public torch::CustomClassHolder {
25
+ private:
26
+ std::vector<std::string> comms_trace_;
27
+ std::vector<std::string> curBlocks_; /* unused */
28
+ std::vector<int64_t> curOutSplitSizes_;
29
+ std::vector<int64_t> curInSplitSizes_;
30
+ int curRoot_ = -1;
31
+ unsigned long seqnum = 0;
32
+
33
+ public:
34
+ void setCurBlock(const std::string& name); /* unused */
35
+ void popBlock(); /* unused */
36
+ // record root info if applicable, e.g., broadcast, gather, scatter
37
+ void recordOptionalInfo(int root = -1);
38
+ // record input/output splits of Alltoallv
39
+ void recordOptionalInfo(
40
+ const std::vector<int64_t>& outputSplitSizes = {},
41
+ const std::vector<int64_t>& inputSplitSizes = {});
42
+ // record essential comms information
43
+ void recordComms(
44
+ const std::string& collName,
45
+ const uintptr_t workReq = 0,
46
+ const int rank = -1,
47
+ const int world_size = -1,
48
+ const std::vector<at::Tensor>& inputTensors = {},
49
+ const std::vector<at::Tensor>& outputTensor = {});
50
+ // return collected comms traces
51
+ std::vector<std::string>& getCommsTrace() {
52
+ return comms_trace_;
53
+ }
54
+ };
55
+
56
+ } // namespace c10d
57
+
58
+ #endif // USE_C10D_UCC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
4
+
5
+ namespace c10d {
6
+ namespace tcputil {
7
+
8
+ #define CONNECT_SOCKET_OFFSET 2
9
+
10
+ inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) {
11
+ return ::poll(fds, nfds, timeout);
12
+ }
13
+
14
+ inline void addPollfd(
15
+ std::vector<struct pollfd>& fds,
16
+ int socket,
17
+ short events) {
18
+ fds.push_back({.fd = socket, .events = events});
19
+ }
20
+
21
+ inline struct ::pollfd getPollfd(int socket, short events) {
22
+ struct ::pollfd res = {.fd = socket, .events = events};
23
+ return res;
24
+ }
25
+
26
+ } // namespace tcputil
27
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/accumulate.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/distributed/c10d/Types.hpp>
8
+
9
+ #ifdef _WIN32
10
+ #include <winsock2.h>
11
+ #include <ws2tcpip.h>
12
+ typedef SSIZE_T ssize_t;
13
+ #pragma comment(lib, "Ws2_32.lib")
14
+ #else
15
+ #include <fcntl.h>
16
+ #include <netdb.h>
17
+ #include <sys/poll.h>
18
+ #include <sys/socket.h>
19
+ #include <unistd.h>
20
+ #endif
21
+
22
+ #include <sys/types.h>
23
+
24
+ #include <chrono>
25
+ #include <cstdint>
26
+ #include <cstdlib>
27
+ #include <functional>
28
+ #include <limits>
29
+ #include <string>
30
+ #include <system_error>
31
+ #include <tuple>
32
+ #include <vector>
33
+
34
+ namespace c10d {
35
+
36
+ // Retrieve tensor shapes from a given tensor.
37
+ TORCH_API std::vector<at::Tensor> getTensorShapes(
38
+ const std::vector<at::Tensor>& tensors);
39
+
40
+ // Use -2 to represent unset state of env vars
41
+ #define C10D_ENV_NOT_SET -2
42
+
43
+ // Turns at::IntArrayRef into "(1, 2, 3, 4)".
44
+ inline std::string toString(at::IntArrayRef l) {
45
+ std::stringstream ss;
46
+ ss << "(";
47
+ for (const auto i : c10::irange(l.size())) {
48
+ if (i > 0) {
49
+ ss << ", ";
50
+ }
51
+ ss << l[i];
52
+ }
53
+ ss << ")";
54
+ return ss.str();
55
+ }
56
+
57
+ inline std::string toString(const c10::Layout& layout) {
58
+ std::stringstream ss;
59
+ ss << layout;
60
+ return ss.str();
61
+ }
62
+
63
+ inline void assertSameType(
64
+ const at::DeprecatedTypeProperties& type,
65
+ const std::vector<at::Tensor>& tensors) {
66
+ for (const auto i : c10::irange(tensors.size())) {
67
+ if (!tensors[i].options().type_equal(type.options())) {
68
+ const std::string expected = type.toString();
69
+ const std::string actual = tensors[i].toString();
70
+ throw std::invalid_argument(
71
+ "mixed types (" + expected + " and " + actual + ")");
72
+ }
73
+ }
74
+ }
75
+
76
+ inline std::vector<std::string> split(
77
+ char separator,
78
+ const std::string& string) {
79
+ std::vector<std::string> pieces;
80
+ std::stringstream ss(string);
81
+ std::string item;
82
+ while (std::getline(ss, item, separator)) {
83
+ pieces.push_back(std::move(item));
84
+ }
85
+ return pieces;
86
+ }
87
+
88
+ inline std::string getCvarString(
89
+ const std::vector<std::string>& env,
90
+ const char* def) {
91
+ const char* ret = def;
92
+
93
+ if (env.empty()) {
94
+ TORCH_CHECK(false, "No environment variables passed");
95
+ return ret;
96
+ }
97
+
98
+ /* parse environment variable in reverse order, so the early
99
+ * versions of a variable get higher priority than the latter
100
+ * versions of the same variable */
101
+ for (int i = env.size() - 1; i >= 0; i--) {
102
+ const char* val = std::getenv(env[i].c_str());
103
+ if (val == nullptr) {
104
+ continue;
105
+ } else if (i) {
106
+ TORCH_WARN(
107
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
108
+ " instead");
109
+ }
110
+
111
+ ret = val;
112
+ }
113
+
114
+ return ret;
115
+ }
116
+
117
+ inline int getCvarInt(const std::vector<std::string>& env, int def) {
118
+ int ret = def;
119
+
120
+ if (env.empty()) {
121
+ TORCH_CHECK(false, "No environment variables passed");
122
+ return ret;
123
+ }
124
+
125
+ /* parse environment variable in reverse order, so the early
126
+ * versions of a variable get higher priority than the latter
127
+ * versions of the same variable */
128
+ for (int i = env.size() - 1; i >= 0; i--) {
129
+ char* val = std::getenv(env[i].c_str());
130
+ if (val == nullptr) {
131
+ continue;
132
+ } else if (i) {
133
+ TORCH_WARN(
134
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
135
+ " instead");
136
+ }
137
+
138
+ try {
139
+ ret = std::stoi(val);
140
+ } catch (std::exception& e) {
141
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
142
+ }
143
+ }
144
+
145
+ return ret;
146
+ }
147
+
148
+ inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
149
+ bool ret = def;
150
+
151
+ if (env.empty()) {
152
+ TORCH_CHECK(false, "No environment variables passed");
153
+ return ret;
154
+ }
155
+
156
+ /* parse environment variable in reverse order, so the early
157
+ * versions of a variable get higher priority than the latter
158
+ * versions of the same variable */
159
+ for (int i = env.size() - 1; i >= 0; i--) {
160
+ char* val_ = std::getenv(env[i].c_str());
161
+ if (val_ == nullptr) {
162
+ continue;
163
+ } else if (i) {
164
+ TORCH_WARN(
165
+ "Environment variable " + env[i] + " is deprecated; use " + env[0] +
166
+ " instead");
167
+ }
168
+
169
+ std::string val = std::string(val_);
170
+ for (auto& x : val) {
171
+ x = std::tolower(x);
172
+ }
173
+
174
+ if (val == "y" || val == "yes" || val == "1" || val == "t" ||
175
+ val == "true") {
176
+ ret = true;
177
+ } else if (
178
+ val == "n" || val == "no" || val == "0" || val == "f" ||
179
+ val == "false") {
180
+ ret = false;
181
+ } else {
182
+ TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
183
+ return ret;
184
+ }
185
+ }
186
+
187
+ return ret;
188
+ }
189
+
190
+ inline void assertSameSizes(
191
+ const at::IntArrayRef& sizes,
192
+ const std::vector<at::Tensor>& tensors) {
193
+ for (const auto i : c10::irange(tensors.size())) {
194
+ if (!tensors[i].sizes().equals(sizes)) {
195
+ const auto expected = toString(sizes);
196
+ const auto actual = toString(tensors[i].sizes());
197
+ throw std::invalid_argument(
198
+ "mixed sizes (" + expected + " and " + actual + ")");
199
+ }
200
+ }
201
+ }
202
+
203
+ inline void assertSameSizeAndType(const std::vector<at::Tensor>& tensors) {
204
+ // Ensure we have at least one tensor
205
+ if (tensors.empty()) {
206
+ throw std::invalid_argument("argument is empty");
207
+ }
208
+
209
+ // Ensure all tensors have identical type and shape
210
+ auto options = tensors[0].options();
211
+ auto sizes = tensors[0].sizes();
212
+ for (const auto i : c10::irange(1, tensors.size())) {
213
+ if (!tensors[i].options().type_equal(options)) {
214
+ const auto expected = toString(options);
215
+ const auto actual = toString(tensors[i].options());
216
+ throw std::invalid_argument(
217
+ "argument contains mixed types (" + expected + " and " + actual +
218
+ ")");
219
+ }
220
+ if (!tensors[i].sizes().equals(sizes)) {
221
+ const auto expected = toString(sizes);
222
+ const auto actual = toString(tensors[i].sizes());
223
+ throw std::invalid_argument(
224
+ "argument contains mixed sizes (" + expected + " and " + actual +
225
+ ")");
226
+ }
227
+ }
228
+ }
229
+
230
+ inline void assertTypeMatch(
231
+ std::function<void(const std::string&)> fn,
232
+ const at::DeprecatedTypeProperties& type,
233
+ const at::ArrayRef<at::Tensor> tensors,
234
+ size_t index) {
235
+ if (!tensors[index].options().type_equal(type.options())) {
236
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
237
+ type.toString() + ", got " + tensors[index].toString() + ")");
238
+ }
239
+ }
240
+
241
+ inline void assertTypeMatch(
242
+ std::function<void(const std::string&)> fn,
243
+ const at::TensorOptions& options,
244
+ const at::ArrayRef<at::Tensor> tensors,
245
+ size_t index) {
246
+ if (!tensors[index].options().type_equal(options)) {
247
+ fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
248
+ toString(options) + ", got " + toString(tensors[index].options()) + ")");
249
+ }
250
+ }
251
+
252
+ inline void assertSizesMatch(
253
+ std::function<void(const std::string&)> fn,
254
+ const at::IntArrayRef& sizes,
255
+ const at::ArrayRef<at::Tensor> tensors,
256
+ size_t index) {
257
+ if (tensors[index].sizes() != sizes) {
258
+ fn("invalid tensor size at index " + std::to_string(index) + " (expected " +
259
+ toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")");
260
+ }
261
+ }
262
+
263
+ inline void assertLayoutMatch(
264
+ std::function<void(const std::string&)> fn,
265
+ const c10::Layout& expected,
266
+ const at::ArrayRef<at::Tensor> tensors,
267
+ size_t index) {
268
+ const auto& actual = tensors[index].layout();
269
+ if (actual != expected) {
270
+ fn("invalid tensor layout at index " + std::to_string(index) +
271
+ " (expected " + toString(expected) + ", got " + toString(actual) + ")");
272
+ }
273
+ }
274
+
275
+ inline void assertLayoutMatch(
276
+ std::function<void(const std::string&)> fn,
277
+ const at::ArrayRef<at::Tensor> tensors) {
278
+ const auto& layout = tensors[0].layout();
279
+ for (const auto i : c10::irange(1, tensors.size())) {
280
+ assertLayoutMatch(fn, layout, tensors, i);
281
+ }
282
+ }
283
+
284
+ inline void assertNonEmpty(
285
+ std::function<void(const std::string&)> fn,
286
+ const at::ArrayRef<at::Tensor> tensors) {
287
+ if (tensors.empty()) {
288
+ fn("requires non-empty tensor list");
289
+ }
290
+ }
291
+
292
+ inline void assertSingleElement(
293
+ std::function<void(const std::string&)> fn,
294
+ const at::ArrayRef<at::Tensor> tensors) {
295
+ if (tensors.size() != 1) {
296
+ fn("requires a single-element tensor list");
297
+ }
298
+ }
299
+
300
+ inline void assertSingleElementInput(
301
+ std::function<void(const std::string&)> fn,
302
+ const at::ArrayRef<at::Tensor> tensors) {
303
+ if (tensors.size() != 1) {
304
+ fn("requires a single-element input tensor list");
305
+ }
306
+ }
307
+
308
+ inline void assertSingleElementOutput(
309
+ std::function<void(const std::string&)> fn,
310
+ const at::ArrayRef<at::Tensor> tensors) {
311
+ if (tensors.size() != 1) {
312
+ fn("requires a single-element output tensor list");
313
+ }
314
+ }
315
+
316
+ inline void assertRootRank(
317
+ std::function<void(const std::string&)> fn,
318
+ int rank,
319
+ int size) {
320
+ if (rank < 0 || rank >= size) {
321
+ fn("invalid root rank: " + std::to_string(rank));
322
+ }
323
+ }
324
+
325
+ inline void assertRootTensor(
326
+ std::function<void(const std::string&)> fn,
327
+ int rank,
328
+ int size) {
329
+ if (rank < 0 || rank >= size) {
330
+ fn("invalid root tensor: " + std::to_string(rank));
331
+ }
332
+ }
333
+
334
+ inline void assertDense(
335
+ std::function<void(const std::string&)> fn,
336
+ const at::ArrayRef<at::Tensor> tensors) {
337
+ const auto& layout = tensors[0].layout();
338
+ if (layout != at::kStrided) {
339
+ fn("only supports dense tensors");
340
+ }
341
+ }
342
+
343
+ inline void assertCPU(
344
+ std::function<void(const std::string&)> fn,
345
+ const at::ArrayRef<at::Tensor> tensors) {
346
+ const auto& device = tensors[0].device();
347
+ if (device.type() != at::kCPU) {
348
+ fn("only supports CPU tensors");
349
+ }
350
+ }
351
+
352
+ inline void assertSameDevice(
353
+ std::function<void(const std::string&)> fn,
354
+ const at::ArrayRef<at::Tensor> tensors) {
355
+ if (tensors.size() < 2) {
356
+ return;
357
+ }
358
+ const auto& device = tensors[0].device();
359
+ for (const auto i : c10::irange(1, tensors.size())) {
360
+ if (tensors[i].device() != device) {
361
+ fn("tensors should be on the same device");
362
+ }
363
+ }
364
+ }
365
+
366
+ inline void assertTypeAndSizesMatch(
367
+ std::function<void(const std::string&)> fn,
368
+ const at::ArrayRef<at::Tensor> tensors,
369
+ const at::DeprecatedTypeProperties& type,
370
+ const at::IntArrayRef& sizes) {
371
+ for (const auto i : c10::irange(tensors.size())) {
372
+ assertTypeMatch(fn, type, tensors, i);
373
+ assertSizesMatch(fn, sizes, tensors, i);
374
+ }
375
+ }
376
+
377
+ inline void assertTypeAndSizesMatch(
378
+ std::function<void(const std::string&)> fn,
379
+ const at::ArrayRef<at::Tensor> tensors,
380
+ const at::TensorOptions& options,
381
+ const at::IntArrayRef& sizes) {
382
+ for (const auto i : c10::irange(tensors.size())) {
383
+ assertTypeMatch(fn, options, tensors, i);
384
+ assertSizesMatch(fn, sizes, tensors, i);
385
+ }
386
+ }
387
+
388
+ inline void assertTypeAndSizesMatch(
389
+ std::function<void(const std::string&)> fn,
390
+ const at::ArrayRef<at::Tensor> tensors) {
391
+ const auto& options = tensors[0].options();
392
+ const auto sizes = tensors[0].sizes();
393
+ assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes);
394
+ }
395
+
396
+ // Copied from ATen/core/functional.h.
397
+ template <typename F, typename T>
398
+ inline auto fmap(T& inputs, const F& fn)
399
+ -> std::vector<decltype(fn(*inputs.begin()))> {
400
+ std::vector<decltype(fn(*inputs.begin()))> r;
401
+ r.reserve(inputs.size());
402
+ for (auto& input : inputs) {
403
+ r.push_back(fn(input));
404
+ }
405
+ return r;
406
+ }
407
+
408
+ // Copied from torch/csrc/utils/tensor_flatten.h.
409
+ inline at::Tensor flattenDenseTensors(at::TensorList tensors) {
410
+ static const auto flatten = [](const at::Tensor& t) {
411
+ return t.contiguous().view({-1});
412
+ };
413
+ if (tensors.size() == 1) {
414
+ return flatten(tensors[0]);
415
+ }
416
+ return at::cat(::c10d::fmap(tensors, flatten));
417
+ }
418
+
419
+ inline at::Tensor newLikeFlat(
420
+ std::vector<std::vector<at::Tensor>>& tensors,
421
+ size_t deviceIdx) {
422
+ if (tensors.empty() || tensors[0].empty()) {
423
+ TORCH_CHECK(false, "Received an empty list");
424
+ }
425
+ if (deviceIdx >= tensors.size()) {
426
+ TORCH_CHECK(false, "Invalid device index");
427
+ }
428
+ auto& t = tensors[deviceIdx][0];
429
+ auto device = t.device();
430
+ for (const auto i : c10::irange(1, tensors[deviceIdx].size())) {
431
+ if (tensors[deviceIdx][i].device() != device) {
432
+ TORCH_CHECK(false, "Expecting all tensors on the same device");
433
+ }
434
+ }
435
+ at::DeviceGuard gpuGuard(device);
436
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors[deviceIdx].size())};
437
+ std::vector<int64_t> strides{static_cast<int64_t>(t.numel())};
438
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
439
+ strides.insert(strides.end(), t.strides().begin(), t.strides().end());
440
+ return at::empty_strided(
441
+ sizes, strides, t.options().memory_format(c10::nullopt));
442
+ }
443
+
444
+ inline at::Tensor newLikeFlat(std::vector<at::Tensor>& tensors) {
445
+ if (tensors.empty()) {
446
+ TORCH_CHECK(false, "Received an empty list");
447
+ }
448
+ auto& t = tensors[0];
449
+ at::DeviceGuard gpuGuard(t.device());
450
+ std::vector<int64_t> sizes{static_cast<int64_t>(tensors.size())};
451
+ sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
452
+ return at::empty(sizes, t.options());
453
+ }
454
+
455
+ inline std::vector<std::vector<int64_t>> getSizes(
456
+ const std::vector<at::Tensor>& tensors) {
457
+ std::vector<std::vector<int64_t>> sizes(tensors.size());
458
+ for (const auto i : c10::irange(tensors.size())) {
459
+ sizes[i] = tensors[i].sizes().vec();
460
+ }
461
+ return sizes;
462
+ }
463
+
464
+ inline std::vector<int> getDevices(const std::vector<at::Tensor>& tensors) {
465
+ std::vector<int> devices(tensors.size(), -1);
466
+ if (tensors[0].device().is_cuda()) {
467
+ for (const auto i : c10::irange(tensors.size())) {
468
+ devices[i] = tensors[i].storage().device().index();
469
+ }
470
+ }
471
+ return devices;
472
+ }
473
+
474
+ template <typename T>
475
+ inline T* getDataPointer(const at::Tensor& tensor) {
476
+ // This method is only used in ProcessGroupGloo for now. Call sites must make
477
+ // sure that the input tensor is contiguous. It is OK if the tensor does not
478
+ // start from the beginning of the storage. For example, it could come from
479
+ // chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of
480
+ // tensor.storage().data()
481
+ // NB: not using tensor.data<T>() because tensor is not aware of gloo::TYPE
482
+ return static_cast<T*>(tensor.data_ptr());
483
+ }
484
+
485
+ template <typename T>
486
+ std::vector<T*> getDataPointers(const std::vector<at::Tensor>& tensors) {
487
+ std::vector<T*> ptrs(tensors.size());
488
+ for (const auto i : c10::irange(tensors.size())) {
489
+ ptrs[i] = getDataPointer<T>(tensors[i]);
490
+ }
491
+ return ptrs;
492
+ }
493
+
494
+ // For alltoall split size sanity check
495
+ inline void checkSplitSizes(
496
+ const std::vector<int64_t>& split_sizes,
497
+ const at::Tensor& tensor,
498
+ int group_size) {
499
+ if (split_sizes.empty()) {
500
+ TORCH_CHECK(
501
+ tensor.size(0) % group_size == 0,
502
+ "Tensor's dim 0 does not divide equally across group size");
503
+ } else {
504
+ TORCH_CHECK(
505
+ split_sizes.size() == static_cast<size_t>(group_size),
506
+ "Number of tensor splits not equal to group size");
507
+ const auto sum = c10::sum_integers(split_sizes);
508
+ TORCH_CHECK(
509
+ sum == tensor.size(0), "Split sizes doesn't match total dim 0 size");
510
+ }
511
+ }
512
+
513
+ // Compute alltoall lengths and offsets, handling multi-dimension tensors
514
+ template <typename T>
515
+ size_t computeLengthsAndOffsets(
516
+ const std::vector<int64_t>& split_sizes,
517
+ const at::Tensor& tensor,
518
+ std::vector<T>* lengths,
519
+ std::vector<T>* offsets) {
520
+ size_t group_size = lengths->size();
521
+ bool equal_splits = false;
522
+ size_t dim0_size = tensor.size(0);
523
+ size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1);
524
+ size_t split_size = 0;
525
+ size_t offset = 0;
526
+
527
+ if (split_sizes.empty()) {
528
+ equal_splits = true;
529
+ split_size = tensor.size(0) / group_size;
530
+ }
531
+ for (const auto i : c10::irange(group_size)) {
532
+ size_t length = row_size * (equal_splits ? split_size : split_sizes[i]);
533
+ (*lengths)[i] = length;
534
+ (*offsets)[i] = offset;
535
+ // TODO: see if we should add overflow protection for offset
536
+ offset += length;
537
+ }
538
+ return offset;
539
+ }
540
+
541
+ template <typename T>
542
+ size_t computeLengthsAndOffsets(
543
+ const std::vector<at::Tensor>& tensors,
544
+ std::vector<T>* lengths,
545
+ std::vector<T>* offsets) {
546
+ size_t group_size = lengths->size();
547
+ size_t offset = 0;
548
+ for (const auto i : c10::irange(group_size)) {
549
+ size_t length = tensors[i].numel();
550
+ (*lengths)[i] = length;
551
+ (*offsets)[i] = offset;
552
+ offset += length;
553
+ }
554
+ return offset;
555
+ }
556
+
557
+ using RankType = uint32_t;
558
+ using SizeType = uint64_t;
559
+
560
+ // `errno` is only meaningful when it fails. E.g., a successful `fork()` sets
561
+ // `errno` to `EINVAL` in child process on some macos
562
+ // (https://stackoverflow.com/a/20295079), and thus `errno` should really only
563
+ // be inspected if an error occurred.
564
+ //
565
+ // `success_cond` is an expression used to check if an error has happend. So for
566
+ // `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output
567
+ // is stored in variable `__output` and may be used in `success_cond`.
568
+ #ifdef _WIN32
569
+ #define SYSCHECK(expr, success_cond) \
570
+ while (true) { \
571
+ auto __output = (expr); \
572
+ auto errno_local = WSAGetLastError(); \
573
+ (void)__output; \
574
+ if (!(success_cond)) { \
575
+ if (errno == EINTR) { \
576
+ continue; \
577
+ } else if ( \
578
+ errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \
579
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
580
+ } else { \
581
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \
582
+ } \
583
+ } else { \
584
+ break; \
585
+ } \
586
+ }
587
+ #else
588
+ #define SYSCHECK(expr, success_cond) \
589
+ while (true) { \
590
+ auto __output = (expr); \
591
+ (void)__output; \
592
+ if (!(success_cond)) { \
593
+ if (errno == EINTR) { \
594
+ continue; \
595
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) { \
596
+ C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
597
+ } else { \
598
+ C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \
599
+ } \
600
+ } else { \
601
+ break; \
602
+ } \
603
+ }
604
+ #endif
605
+
606
+ // Most functions indicate error by returning `-1`. This is a helper macro for
607
+ // this common case with `SYSCHECK`.
608
+ // Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1
609
+ #define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1)
610
+
611
+ namespace tcputil {
612
+
613
+ // Send and receive
614
+ template <typename T>
615
+ void sendBytes(
616
+ int socket,
617
+ const T* buffer,
618
+ size_t length,
619
+ bool moreData = false) {
620
+ size_t bytesToSend = sizeof(T) * length;
621
+ if (bytesToSend == 0) {
622
+ return;
623
+ }
624
+
625
+ auto bytes = reinterpret_cast<const uint8_t*>(buffer);
626
+ uint8_t* currentBytes = const_cast<uint8_t*>(bytes);
627
+
628
+ int flags = 0;
629
+
630
+ #ifdef MSG_MORE
631
+ if (moreData) { // there is more data to send
632
+ flags |= MSG_MORE;
633
+ }
634
+ #endif
635
+
636
+ // Ignore SIGPIPE as the send() return value is always checked for error
637
+ #ifdef MSG_NOSIGNAL
638
+ flags |= MSG_NOSIGNAL;
639
+ #endif
640
+
641
+ while (bytesToSend > 0) {
642
+ ssize_t bytesSent;
643
+ SYSCHECK_ERR_RETURN_NEG1(
644
+ bytesSent =
645
+ ::send(socket, (const char*)currentBytes, bytesToSend, flags))
646
+ if (bytesSent == 0) {
647
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
648
+ }
649
+
650
+ bytesToSend -= bytesSent;
651
+ currentBytes += bytesSent;
652
+ }
653
+ }
654
+
655
+ template <typename T>
656
+ void recvBytes(int socket, T* buffer, size_t length) {
657
+ size_t bytesToReceive = sizeof(T) * length;
658
+ if (bytesToReceive == 0) {
659
+ return;
660
+ }
661
+
662
+ auto bytes = reinterpret_cast<uint8_t*>(buffer);
663
+ uint8_t* currentBytes = bytes;
664
+
665
+ while (bytesToReceive > 0) {
666
+ ssize_t bytesReceived;
667
+ SYSCHECK_ERR_RETURN_NEG1(
668
+ bytesReceived = recv(socket, (char*)currentBytes, bytesToReceive, 0))
669
+ if (bytesReceived == 0) {
670
+ C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
671
+ }
672
+
673
+ bytesToReceive -= bytesReceived;
674
+ currentBytes += bytesReceived;
675
+ }
676
+ }
677
+
678
+ // send a vector's length and data
679
+ template <typename T>
680
+ void sendVector(int socket, const std::vector<T>& vec, bool moreData = false) {
681
+ SizeType size = vec.size();
682
+ sendBytes<SizeType>(socket, &size, 1, true);
683
+ sendBytes<T>(socket, vec.data(), size, moreData);
684
+ }
685
+
686
+ // receive a vector as sent in sendVector
687
+ template <typename T>
688
+ std::vector<T> recvVector(int socket) {
689
+ SizeType valueSize;
690
+ recvBytes<SizeType>(socket, &valueSize, 1);
691
+ std::vector<T> value(valueSize);
692
+ recvBytes<T>(socket, value.data(), value.size());
693
+ return value;
694
+ }
695
+
696
+ // this is only for convenience when sending rvalues
697
+ template <typename T>
698
+ void sendValue(int socket, const T& value, bool moreData = false) {
699
+ sendBytes<T>(socket, &value, 1, moreData);
700
+ }
701
+
702
+ template <typename T>
703
+ T recvValue(int socket) {
704
+ T value;
705
+ recvBytes<T>(socket, &value, 1);
706
+ return value;
707
+ }
708
+
709
+ // send a string's length and data
710
+ inline void sendString(
711
+ int socket,
712
+ const std::string& str,
713
+ bool moreData = false) {
714
+ SizeType size = str.size();
715
+ sendBytes<SizeType>(socket, &size, 1, true);
716
+ sendBytes<char>(socket, str.data(), size, moreData);
717
+ }
718
+
719
+ // receive a string as sent in sendString
720
+ inline std::string recvString(int socket) {
721
+ SizeType valueSize;
722
+ recvBytes<SizeType>(socket, &valueSize, 1);
723
+ std::vector<char> value(valueSize);
724
+ recvBytes<char>(socket, value.data(), value.size());
725
+ return std::string(value.data(), value.size());
726
+ }
727
+
728
+ } // namespace tcputil
729
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <stdexcept>
5
+ #include <vector>
6
+
7
+ constexpr auto kNoTimeout = std::chrono::milliseconds(0);
8
+
9
+ namespace c10d {
10
+
11
+ constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY";
12
+
13
+ enum class OpType : std::uint8_t {
14
+ BROADCAST = 0,
15
+ ALLREDUCE = 1,
16
+ ALLREDUCE_COALESCED = 2,
17
+ REDUCE = 3,
18
+ ALLGATHER = 4,
19
+ _ALLGATHER_BASE = 5,
20
+ ALLGATHER_COALESCED = 6,
21
+ GATHER = 7,
22
+ SCATTER = 8,
23
+ REDUCE_SCATTER = 9,
24
+ ALLTOALL_BASE = 10,
25
+ ALLTOALL = 11,
26
+ SEND = 12,
27
+ RECV = 13,
28
+ RECVANYSOURCE = 14,
29
+ BARRIER = 15,
30
+ _REDUCE_SCATTER_BASE = 16,
31
+ COALESCED = 17,
32
+ _ALLREDUCE_SPARSE = 18,
33
+ UNKNOWN = 100,
34
+ };
35
+
36
+ // Converts OpType to human readable string.
37
+ TORCH_API std::string opTypeToString(OpType opType);
38
+
39
+ // Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE)
40
+ TORCH_API bool isP2POp(OpType opType, bool batchP2P = false);
41
+
42
+ // Please do not use Work API, it is going away, to be
43
+ // replaced by ivalue::Future.
44
+ // Python binding for this class might change, please do not assume
45
+ // this will be bound using pybind.
46
+ class TORCH_API Work : public torch::CustomClassHolder {
47
+ public:
48
+ Work(
49
+ int rank = -1,
50
+ OpType opType = OpType::UNKNOWN,
51
+ const char* profilingTitle = nullptr,
52
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
53
+ c10::nullopt);
54
+
55
+ ~Work() override;
56
+
57
+ // Checks if request has completed. Non-blocking operation.
58
+ virtual bool isCompleted();
59
+
60
+ // Returns if the work completed successfully.
61
+ // If false, the exception function can be called to get details.
62
+ virtual bool isSuccess() const;
63
+
64
+ // Returns exception if isSuccess() returned false.
65
+ virtual std::exception_ptr exception() const;
66
+
67
+ // Returns source rank if this objects represents a recv-from-any.
68
+ virtual int sourceRank() const;
69
+
70
+ // Returns result tensors, if applicable.
71
+ // If work is not supposed to have result, we return empty list.
72
+ virtual std::vector<at::Tensor> result();
73
+
74
+ // Ensures that operations on the output tensors that are invoked
75
+ // after this function returns are correctly sequenced after the
76
+ // asynchronous completion of this work.
77
+ //
78
+ // For CUDA tensors, it inserts stream synchronization such that
79
+ // the streams of the caller wait for completion of the
80
+ // asynchronous operations on the destination tensors.
81
+ //
82
+ // For CPU tensors, it is currently a nop.
83
+ //
84
+ // This function should only be used if the caller polls for
85
+ // completion through the `isCompleted` function, it has returned
86
+ // true, and the `isSuccess` function also has returned true.
87
+ //
88
+ virtual void synchronize();
89
+
90
+ // Waits until request completes. Blocking operation.
91
+ // Throws if the work completed with an exception.
92
+ // Returns false if the work is aborted.
93
+ // Otherwise, it always returns true, indicating the work is completed.
94
+ //
95
+ // Functionally equivalent to:
96
+ //
97
+ // while (!isCompleted()) { /* nop */ }
98
+ // auto success = isSuccess();
99
+ // if (!success) { std::rethrow_exception(exception()); }
100
+ // return success;
101
+ //
102
+ virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout);
103
+
104
+ virtual void abort();
105
+
106
+ // Returns a Future object that will be associated with the completion of
107
+ // work. Only NCCL backend is currently supported.
108
+ virtual c10::intrusive_ptr<c10::ivalue::Future> getFuture();
109
+
110
+ virtual float getDuration() const;
111
+
112
+ virtual uint64_t getSequencenumber() const;
113
+
114
+ OpType retrieveOpType() const;
115
+
116
+ static c10::intrusive_ptr<Work> create_from_future(
117
+ const c10::intrusive_ptr<c10::ivalue::Future>&);
118
+
119
+ protected:
120
+ // Completes the work object and optionally sets the exception in a
121
+ // thread-safe manner. Notifies all waiting condition variables as well.
122
+ void finish(std::exception_ptr exception = nullptr);
123
+
124
+ // Similar to finish, but throws an exception if one is already set or
125
+ // provided by the user.
126
+ void finishAndThrow(std::exception_ptr exception);
127
+
128
+ mutable std::mutex mutex_;
129
+ std::condition_variable cv_;
130
+ bool completed_ = false;
131
+ std::exception_ptr exception_;
132
+
133
+ // Current rank of the node.
134
+ const int rank_;
135
+
136
+ // Operation type that this work object refers to.
137
+ OpType opType_;
138
+
139
+ // When profiling, the callback to record end of operation event. This
140
+ // callback needs to be called when collective operation is complete.
141
+ std::function<void()> recordFunctionEndCallback_;
142
+ };
143
+
144
+ struct TORCH_API WorkInfo {
145
+ WorkInfo(
146
+ const OpType& opType,
147
+ const std::chrono::time_point<std::chrono::system_clock>& timeStarted,
148
+ const std::chrono::time_point<std::chrono::system_clock>& timeFinished,
149
+ const std::chrono::duration<float>& activeDuration)
150
+ : opType(opType),
151
+ timeStarted(timeStarted),
152
+ timeFinished(timeFinished),
153
+ activeDuration(activeDuration) {}
154
+
155
+ OpType opType;
156
+ std::chrono::time_point<std::chrono::system_clock> timeStarted;
157
+ std::chrono::time_point<std::chrono::system_clock> timeFinished;
158
+ std::chrono::duration<float> activeDuration;
159
+ };
160
+
161
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ namespace torch {
6
+ namespace distributed {
7
+ namespace c10d {
8
+
9
+ PyMethodDef* python_functions();
10
+
11
+ } // namespace c10d
12
+ } // namespace distributed
13
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/comm.hpp ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
7
+ #include <utility>
8
+
9
+ namespace c10d {
10
+
11
+ // Broadcast many tensors to all processes in the process group.
12
+ TORCH_API void broadcast_coalesced(
13
+ const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
14
+ at::TensorList tensors,
15
+ size_t buffer_size,
16
+ int rank = 0);
17
+
18
+ // This class passes bucket contents tensor to DDP communication hook.
19
+ class TORCH_API GradBucket {
20
+ public:
21
+ explicit GradBucket(
22
+ size_t index,
23
+ size_t bucket_count,
24
+ at::Tensor tensor,
25
+ std::vector<size_t> offsets,
26
+ std::vector<size_t> lengths,
27
+ std::vector<c10::IntArrayRef> sizes_vec,
28
+ std::vector<at::Tensor> parameters,
29
+ c10::optional<at::Tensor> sparse_grad_indices)
30
+ : index_(index),
31
+ bucket_count_(bucket_count),
32
+ buffer_(std::move(tensor)),
33
+ offsets_(std::move(offsets)),
34
+ lengths_(std::move(lengths)),
35
+ sizes_vec_(std::move(sizes_vec)),
36
+ parameters_(std::move(parameters)),
37
+ sparse_grad_indices_(std::move(sparse_grad_indices)) {}
38
+
39
+ // Returns the index of the bucket, which is unique across all the buckets.
40
+ size_t getIndex() const {
41
+ return index_;
42
+ }
43
+
44
+ const at::Tensor& getBuffer() const {
45
+ return buffer_;
46
+ }
47
+
48
+ // Returns a mutable buffer compared with the above method.
49
+ at::Tensor& getBufferRef() {
50
+ return buffer_;
51
+ }
52
+
53
+ // Overwrites the buffer at a specific index.
54
+ void setBuffer(at::Tensor& buffer) {
55
+ buffer_ = buffer;
56
+ }
57
+
58
+ // Each tensor in the list that getGradients corresponds to a
59
+ // parameter.
60
+ std::vector<at::Tensor> getGradients() const;
61
+
62
+ // Returns model parameters belonging to this bucket. They are returned in the
63
+ // same order as gradient tensors via getGradients(). For example,
64
+ // getParameters[i] will have its gradient stored in
65
+ // getGradients[i]
66
+ const std::vector<at::Tensor> getParameters() const {
67
+ return parameters_;
68
+ }
69
+
70
+ // Returns whther this bucket is the last bucket to allreduce in an iteration.
71
+ bool isLast() const {
72
+ return index_ == bucket_count_ - 1;
73
+ }
74
+
75
+ c10::optional<at::Tensor>& getSparseGradIndices() {
76
+ return sparse_grad_indices_;
77
+ }
78
+
79
+ private:
80
+ size_t index_;
81
+ size_t bucket_count_;
82
+ at::Tensor buffer_;
83
+
84
+ // Per-variable info in buffer_.
85
+ std::vector<size_t> offsets_;
86
+ std::vector<size_t> lengths_;
87
+ std::vector<c10::IntArrayRef> sizes_vec_;
88
+
89
+ // Model parameters for this bucket.
90
+ const std::vector<at::Tensor> parameters_;
91
+
92
+ // Predefined sparse indices for this bucket (only used for sparse tensors).
93
+ // The gradients will be updated to have indices with these tensor values
94
+ c10::optional<at::Tensor> sparse_grad_indices_;
95
+ };
96
+
97
+ // Base class of both `PythonCommHook` and `CppCommHook`.
98
+ // Requires implementing 1) `runHook` method that communicates gradients
99
+ // asynchronously, and 2) `parseHookResult` method that converts the hook
100
+ // result into a tensor.
101
+ class TORCH_API CommHookInterface {
102
+ public:
103
+ virtual ~CommHookInterface() = default;
104
+
105
+ // Passes the input grad bucket to the registered communication hook.
106
+ // Once the tensor in the bucket are ready, kicks off the hook asynchronously
107
+ // and returns a future that holds the communication results.
108
+ virtual c10::intrusive_ptr<c10::ivalue::Future> runHook(
109
+ GradBucket& bucket) = 0;
110
+
111
+ // Returns the resulting tensor once the communication hook result is
112
+ // ready. The resulting tensor will then be copied to the grads of
113
+ // individual parameters.
114
+ virtual at::Tensor parseHookResult(const c10::IValue& result) = 0;
115
+ };
116
+
117
+ namespace detail {
118
+ // This helper function is called both by CppCommHookInterface below and inside
119
+ // reducer.
120
+ TORCH_API at::Tensor parseCppCommHookResult(const c10::IValue& result);
121
+ } // namespace detail
122
+
123
+ // This CppCommHook interface only requires implementing runHook method that
124
+ // potentially uses a state.
125
+ template <typename T>
126
+ class CppCommHookInterface : public CommHookInterface {
127
+ public:
128
+ explicit CppCommHookInterface(T state) : state_(std::move(state)) {}
129
+
130
+ ~CppCommHookInterface() override = default;
131
+
132
+ at::Tensor parseHookResult(const c10::IValue& result) override {
133
+ return detail::parseCppCommHookResult(result);
134
+ }
135
+
136
+ protected:
137
+ T state_;
138
+ };
139
+
140
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <c10/macros/Macros.h>
10
+
11
+ namespace c10d {
12
+
13
+ enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
14
+
15
+ TORCH_API void setDebugLevel(DebugLevel level);
16
+
17
+ // Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
18
+ // environment variable.
19
+ TORCH_API void setDebugLevelFromEnvironment();
20
+
21
+ TORCH_API DebugLevel debug_level() noexcept;
22
+
23
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/default_comm_hooks.hpp ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+ #include <torch/csrc/distributed/c10d/comm.hpp>
5
+
6
+ namespace c10d {
7
+
8
+ enum class BuiltinCommHookType {
9
+ ALLREDUCE = 1,
10
+ FP16_COMPRESS = 2,
11
+ };
12
+
13
+ class AllReduceCommHook
14
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
15
+ public:
16
+ explicit AllReduceCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
17
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
18
+
19
+ ~AllReduceCommHook() override = default;
20
+
21
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
22
+ };
23
+
24
+ class FP16CompressCommHook
25
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
26
+ public:
27
+ explicit FP16CompressCommHook(const c10::intrusive_ptr<ProcessGroup>& state)
28
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
29
+
30
+ ~FP16CompressCommHook() override = default;
31
+
32
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
33
+ };
34
+
35
+ // Almost same as AllReduceCommHook, but without division inside the hook.
36
+ // This enables the optimization of fusing copy and division and saves one scan
37
+ // over all the input parameters, when no communication hook is provided by the
38
+ // user. Only used internally and not released as a public built-in
39
+ // communication hook.
40
+ class _AllReduceBySumCommHook
41
+ : public CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>> {
42
+ public:
43
+ explicit _AllReduceBySumCommHook(
44
+ const c10::intrusive_ptr<ProcessGroup>& state)
45
+ : CppCommHookInterface<c10::intrusive_ptr<ProcessGroup>>(state) {}
46
+
47
+ ~_AllReduceBySumCommHook() override = default;
48
+
49
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
50
+ };
51
+
52
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logger.hpp ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/util/Logging.h>
2
+ #include <torch/csrc/distributed/c10d/reducer.hpp>
3
+
4
+ #include <mutex>
5
+
6
+ namespace c10d {
7
+
8
+ class TORCH_API Logger {
9
+ public:
10
+ explicit Logger(std::shared_ptr<c10d::Reducer> reducer);
11
+ // Set logging data that can be got during DistributedDataParallel
12
+ // construction time.
13
+ void set_construction_data_and_log(
14
+ const std::string& module_name,
15
+ const std::vector<int>& device_ids,
16
+ int output_device,
17
+ bool broadcast_buffers,
18
+ bool has_sync_bn,
19
+ bool static_graph);
20
+
21
+ void set_static_graph();
22
+
23
+ // An interface for users to get DDPLoggingData and log them
24
+ // in the applications. Explanation of logging fields are in
25
+ // "struct DDPLoggingData" of "torch/c10/util/Logging.h".
26
+ at::DDPLoggingData get_ddp_logging_data();
27
+
28
+ // Stream insertion operator for logging data to stream under
29
+ // TORCH_DISTRIBUTED_DEBUG.
30
+ friend std::ostream& operator<<(std::ostream& output, const Logger& logger);
31
+
32
+ ~Logger() noexcept(false) {
33
+ // Log if DDP graph is static in Logger dtor instead of Reducer dtor since
34
+ // Logger is deleted before Reducer.
35
+ log_if_graph_static(reducer_->ddp_graph_static());
36
+ }
37
+
38
+ // Set environment variables.
39
+ void set_env_variables();
40
+ // Set parameters stats.
41
+ void set_parameter_stats();
42
+ // Get size of each bucket (Bytes).
43
+ std::vector<int64_t> get_bucket_sizes();
44
+ // Get variable indices for each bucket.
45
+ std::vector<std::vector<size_t>> get_per_bucket_variable_indices();
46
+ // Set comm. hook, if used
47
+ void set_comm_hook(const std::string& hook);
48
+ // Set running with uneven input detection (model.join() context manager)
49
+ void set_uneven_input_join();
50
+
51
+ // Reset performance stats at current iteration
52
+ void reset_performance_stats();
53
+
54
+ // Calculate avg stats using cpu timer and gpu timer
55
+ // that has been recorded in reducer.
56
+ void calculate_avg_time(
57
+ int64_t& avg_time,
58
+ int64_t& time_duration,
59
+ Timer& timer,
60
+ Timer::Event start_event,
61
+ Timer::Event end_event);
62
+
63
+ // Set the absolute time of the event that has been recorded in reducer.
64
+ void set_event_time(int64_t& event_time, Timer& timer, Timer::Event event);
65
+ // Set stats that can be collected only during
66
+ // training loop. It is called at the beginning of forward call
67
+ // to record the run time stats of sampled iterations that previously ran.
68
+ // GPU performance stats are collected only for single process
69
+ // single device program and single device module right now.
70
+ // TODO to support single process multiple devices and multi device modules,
71
+ // events need to be created and recorded on multiple devices.
72
+ void set_runtime_stats_and_log();
73
+
74
+ // Called when DDP/reducer is failing with an error. The
75
+ // logging data structure will have two fields filled: "has_error" indicating
76
+ // that this iteration encountered an error and other fields are not valid,
77
+ // and "error", a string which contains the error message that DDP failed
78
+ // with.
79
+ template <typename... Args>
80
+ void set_error_and_log(const std::string& ddp_error, const Args&... args) {
81
+ ddp_logging_data_->ints_map["has_error"] = 1;
82
+ auto err = c10::str(ddp_error, args...);
83
+ ddp_logging_data_->strs_map["error"] = err;
84
+ // Report the iteration we are erroring at so user knows how many examples
85
+ // successfully processed before this error was hit.
86
+ ddp_logging_data_->ints_map["iteration"] = reducer_->num_iterations_;
87
+ at::LogPyTorchDDPUsage(*ddp_logging_data_);
88
+ }
89
+
90
+ // When running without static graph, called when reducer is destroyed to log
91
+ // if graph was actually static and is a candidate for static graph
92
+ // optimization.
93
+ void log_if_graph_static(bool is_static);
94
+
95
+ private:
96
+ // ddp_logging_data_ is used to hold all the ddp related logging
97
+ // data fields.
98
+ std::unique_ptr<at::DDPLoggingData> ddp_logging_data_;
99
+ std::shared_ptr<c10d::Reducer> reducer_;
100
+ // track the number of iterations when runtime stats are collected so far.
101
+ long num_iterations_stats_recorded_ = 0;
102
+ };
103
+
104
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/logging.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <string>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Logging.h>
13
+ #include <fmt/format.h>
14
+
15
+ namespace c10d {
16
+ namespace detail {
17
+
18
+ enum class LogLevel { Trace, Debug, Info, Warning, Error };
19
+
20
+ TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept;
21
+
22
+ template <typename... T>
23
+ std::string formatLogMessage(fmt::string_view fmt, T&&... args) {
24
+ return fmt::vformat(fmt, fmt::make_format_args(args...));
25
+ }
26
+
27
+ } // namespace detail
28
+ } // namespace c10d
29
+
30
+ #define C10D_ERROR(...) \
31
+ LOG_IF( \
32
+ ERROR, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \
33
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
34
+
35
+ #define C10D_WARNING(...) \
36
+ LOG_IF( \
37
+ WARNING, \
38
+ c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \
39
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
40
+
41
+ #define C10D_INFO(...) \
42
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \
43
+ << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
44
+
45
+ #define C10D_DEBUG(...) \
46
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \
47
+ << "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__)
48
+
49
+ #define C10D_TRACE(...) \
50
+ LOG_IF(INFO, c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \
51
+ << "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__)
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <atomic>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <tuple>
8
+ #include <unordered_map>
9
+ #include <vector>
10
+
11
+ #include <ATen/core/ivalue_inl.h>
12
+ #include <c10/macros/Macros.h>
13
+ #include <c10/util/intrusive_ptr.h>
14
+ #include <torch/csrc/autograd/function.h>
15
+ #include <torch/csrc/autograd/profiler.h>
16
+ #include <torch/csrc/autograd/variable.h>
17
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
18
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
19
+ #include <torch/csrc/distributed/c10d/comm.hpp>
20
+ #include <torch/csrc/distributed/c10d/debug.h>
21
+ #include <torch/csrc/distributed/c10d/default_comm_hooks.hpp>
22
+ #include <torch/csrc/distributed/c10d/reducer_timer.hpp>
23
+ #ifndef _WIN32
24
+ #include <torch/csrc/distributed/autograd/context/context.h>
25
+ #endif
26
+
27
+ namespace c10d {
28
+
29
+ constexpr int kDefaultFirstBucketBytes = int(1024 * 1024);
30
+ constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024);
31
+ // Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations.
32
+ constexpr int kDDPRuntimeLoggingSampleRate = 100;
33
+
34
+ // Forward declaration
35
+ class Logger;
36
+
37
+ // Local accumulator type for a single bucket.
38
+ struct BucketAccumulator {
39
+ std::vector<size_t> indices;
40
+ size_t size = 0;
41
+ size_t size_limit = 0;
42
+ };
43
+
44
+ class TORCH_API Reducer {
45
+ public:
46
+ // The constructor takes a list of variables (i.e. parameters) for this
47
+ // process's single model replica (as DDP assumes single-process
48
+ // single-device). The bucket assignment for this reducer, `bucket_indices`,
49
+ // is specified as a list of buckets, each of which is specified as a list of
50
+ // indices into the bucket's `variables` list.
51
+ explicit Reducer(
52
+ std::vector<at::Tensor> params,
53
+ std::vector<std::vector<size_t>> bucket_indices,
54
+ std::vector<size_t> per_bucket_size_limits,
55
+ c10::intrusive_ptr<c10d::ProcessGroup> process_group,
56
+ std::vector<bool> expect_sparse_gradients,
57
+ int64_t bucket_bytes_cap,
58
+ bool find_unused_parameters,
59
+ bool gradient_as_bucket_view,
60
+ std::unordered_map<size_t, std::string> param_names,
61
+ int64_t first_bucket_bytes_cap);
62
+
63
+ ~Reducer() noexcept(false);
64
+
65
+ // To (re-)initialize bucket assignment, pass a list of buckets, each of
66
+ // which is specified by a list of indices in the bucket's `variables` list.
67
+ // This function performs validation that the variables within a bucket
68
+ // all live on the same device and have the same dimensionality.
69
+ void initialize_buckets(std::vector<std::vector<size_t>> bucket_indices);
70
+
71
+ void autograd_hook(size_t index);
72
+
73
+ // This function is called when the forward function has produced an output,
74
+ // and the user wishes to reduce gradients in the backwards pass.
75
+ // If they don't, and wish to accumulate gradients before reducing them,
76
+ // a call to this function can simply be omitted.
77
+ void prepare_for_backward(const std::vector<at::Tensor>& outputs);
78
+
79
+ // Called at the beginning of forward() inside DistributedDataParallel,
80
+ // right now it captures the starting time of forward in each iteration.
81
+ void prepare_for_forward();
82
+
83
+ // Returns the relative time in nanoseconds when gradients were ready,
84
+ // with respect to the time `prepare_for_backward` was called. The
85
+ // vector is for parameters for a single model replica.
86
+ std::vector<int64_t> get_backward_stats() const {
87
+ return backward_stats_;
88
+ }
89
+
90
+ // Registers a hook to the reducer. The hook is `CommHookInterface`
91
+ // type to allow both Python and CPP hooks. This function can only
92
+ // be called once before calling backward.
93
+ // Cannot combine with the call of `register_builtin_comm_hook`.
94
+ void register_comm_hook(std::unique_ptr<CommHookInterface> iface);
95
+
96
+ // Registers a built-in C++ comm hook to the reducer. This function can only
97
+ // be called once before calling backward.
98
+ // Cannot combine with the call of `register_comm_hook`.
99
+ void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type);
100
+
101
+ // Informs reducer that optimizer is running in backward, so gradients
102
+ // don't need to be copied from buckets as the optimizer would've already
103
+ // been applied.
104
+ void set_optimizer_in_backward() {
105
+ optim_in_backward_ = true;
106
+ };
107
+
108
+ // Runs allreduce or installed communication hook given GradBucket instance.
109
+ c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(
110
+ GradBucket& grad_bucket);
111
+
112
+ // Runs default allreduce hook.
113
+ c10::intrusive_ptr<c10::ivalue::Future> run_allreduce_hook(
114
+ GradBucket& grad_bucket);
115
+
116
+ // Returns gradient buckets in sequential order of buckets_. This is the order
117
+ // in which buckets are reduced across processes. If return_zero_tensors=true,
118
+ // will return zero tensors of the same shape instead of the true tensors.
119
+ std::vector<c10d::GradBucket> get_grad_buckets(
120
+ bool return_zero_tensors = true) const;
121
+
122
+ // Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_
123
+ // according to when tensors received grads in the backward pass.
124
+ // TODO this function makes broadcast communication call and
125
+ // could be overlapped with next forward() call, thus
126
+ // it could be async. Will make it async when rebuilding buckets for
127
+ // find_unused_parameters = true case, as we could rebuild buckets more than
128
+ // once for find_unused_parameters = true case, where subgraphs are trained
129
+ // and parameter indices order may change more frequently.
130
+ // For find_unused_parameters = false case, buckets are only rebuilt once,
131
+ // the performance cost is negligible. Returns true if the buckets were
132
+ // rebuilt.
133
+ bool rebuild_buckets();
134
+
135
+ void setSparseMetadata(std::map<std::string, at::Tensor>& metadata);
136
+
137
+ // Install futures that should be awaited at end of backwards. Currently these
138
+ // are only used by user-defined custom buffer reduction hooks, but can be
139
+ // generalized to any user-originating futures that need to be awaited.
140
+ void install_futures(c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs);
141
+
142
+ // Returns true if we should rebuild buckets, else false. We only rebuild
143
+ // buckets once after the first iteration and never rebuild them if
144
+ // find_unused_parameters_.
145
+ inline bool should_rebuild_buckets() const {
146
+ return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_;
147
+ }
148
+
149
+ // Pushes all parameters to be rebuilt.
150
+ void push_rebuilt_params_for_all_indices();
151
+
152
+ // Creates and sets ForwardPassWorkHandle given a Work and the
153
+ // corresponding tensor being reduced.
154
+ void set_forward_pass_work_handle(
155
+ c10::intrusive_ptr<c10d::Work> forwardPassWorkHandle,
156
+ bool useStaticWorldSize);
157
+
158
+ // Retrieve on-device tensors used to track locally unused parameters. It is
159
+ // a tensor where index i = 1 if the Variable with that index has been used.
160
+ at::Tensor get_local_used_map_on_device() const;
161
+
162
+ // An function for users to set sample_rate of collecting
163
+ // runtime stats. The time stats will be recorded for the
164
+ // first 10 iterations, after 10 iterations time stats will be
165
+ // recorded once every "sample_rate" training iterations.
166
+ void set_ddp_runtime_logging_sample_rate(int sample_rate);
167
+
168
+ // Specify the training graph is static.
169
+ void set_static_graph();
170
+
171
+ // Delay all reduce to be after all gradients' calculation is complete.
172
+ void delay_all_reduce();
173
+
174
+ void set_mixed_precision_param_dtype(c10::ScalarType dtype);
175
+
176
+ // Weak reference to associated DDP logger. The reference is weak to avoid
177
+ // refcycle between reducer and logger.
178
+ void set_logger(std::weak_ptr<c10d::Logger> logger);
179
+
180
+ // When graph is not explicitly set by user as static and has unused
181
+ // parameters, this will return whether the graph has been static until the
182
+ // current iteration, which means unused params set has not changed.
183
+ bool ddp_graph_static();
184
+
185
+ // Removes autograd hooks registered by the Reducer on the model parameters.
186
+ void remove_autograd_hooks();
187
+
188
+ // Checks whether or not the reducer has finalized the current backward
189
+ // iteration.
190
+ void check_finalized();
191
+
192
+ // Updates the underlying process group used by DDP with the new process
193
+ // group.
194
+ void update_process_group(
195
+ c10::intrusive_ptr<c10d::ProcessGroup> new_process_group);
196
+
197
+ // Resets reducer state.
198
+ void reset_state();
199
+
200
+ protected:
201
+ // Forward declaration.
202
+ struct Bucket;
203
+
204
+ void push_rebuilt_params(const size_t& index);
205
+
206
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
207
+ mutable std::mutex mutex_;
208
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
209
+ const std::vector<at::Tensor> params_;
210
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
211
+ c10::intrusive_ptr<::c10d::ProcessGroup> process_group_;
212
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
213
+ std::vector<bool> expect_sparse_gradients_;
214
+
215
+ std::vector<std::shared_ptr<torch::autograd::Node>>
216
+ grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
217
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
218
+ std::unordered_map<torch::autograd::Node*, size_t> gradAccToVariableMap_;
219
+ std::vector<std::pair<uintptr_t, std::shared_ptr<torch::autograd::Node>>>
220
+ hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
221
+
222
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
223
+ bool expect_autograd_hooks_;
224
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
225
+ bool require_finalize_;
226
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
227
+ size_t next_bucket_;
228
+
229
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
230
+ bool has_marked_unused_parameters_;
231
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
232
+ const bool find_unused_parameters_;
233
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
234
+ const bool gradient_as_bucket_view_;
235
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
236
+ std::vector<size_t> unused_parameters_;
237
+ // Previous iteration's unused params, used for checking if unused parameters
238
+ // change between iterations. Only filled during the first backwards call.
239
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
240
+ std::vector<size_t> prev_iteration_unused_parameters_;
241
+ // Whether graph is static or not. When user does not explicitly set static
242
+ // graph, the only possible dynamism is set of unused parameters changing
243
+ // between iterations which is tracked by this flag.
244
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
245
+ bool ddp_graph_static_{true};
246
+ // Locally used parameter maps indicating if parameters are used locally
247
+ // during the current iteration or no_sync session if no_sync is on.
248
+ // Each map is a one-dim int32 tensor of number of parameters. These tensors
249
+ // are marked in autograd_hook to indicate the corresponding param has been
250
+ // used, and get allreduced in the end of backward step of current iteration
251
+ // or no_sync session for figuring out the globally unused parameters.
252
+ //
253
+ // local_used_map_: CPU tensor for bookkeeping locally used params
254
+ // local_used_map_dev_: dev tensor for reducing globally unused params
255
+ at::Tensor local_used_map_;
256
+ at::Tensor local_used_map_dev_;
257
+ // Indicate that reduction is done and D2H copy is done as well.
258
+ bool local_used_map_reduced_;
259
+
260
+ // Weak pointer to associated DDP logger.
261
+ std::weak_ptr<c10d::Logger> logger_;
262
+ // List of futures installed by Reducer::install_futures that should be
263
+ // awaited at the end of backwards pass.
264
+ c10::optional<c10::List<c10::intrusive_ptr<c10::ivalue::Future>>>
265
+ installed_futures_{c10::nullopt};
266
+ // Mixed precision parameter dtype for bucket type checking.
267
+ c10::optional<c10::ScalarType> mixed_precision_param_dtype_{c10::nullopt};
268
+
269
+ // Work handle for allreduce on local_used_map_
270
+ c10::intrusive_ptr<c10d::Work> local_used_work_;
271
+
272
+ void mark_variable_ready_dense(size_t variable_index);
273
+
274
+ void mark_variable_ready_sparse(size_t variable_index);
275
+
276
+ void mark_variable_ready(size_t variable_index);
277
+
278
+ void mark_bucket_ready(size_t bucket_index);
279
+
280
+ void finalize_bucket_dense(Bucket& bucket);
281
+
282
+ void finalize_backward();
283
+
284
+ // Returns list of model parameters corresponding to the given bucket.
285
+ // bucket_index is a key to cache after buckets are rebuilt, after which this
286
+ // mapping never changes.
287
+ std::vector<at::Tensor> get_variables_for_bucket(
288
+ size_t bucket_index,
289
+ const Bucket& bucket) const;
290
+
291
+ // Asserts that the reduction for the previous iteration has finished before
292
+ // rebuilding buckets or kicking off the next one.
293
+ void ensure_prior_reduction_finished();
294
+
295
+ // Broadcast rebuilt buckets from rank 0 to other ranks before initializing
296
+ // the buckets
297
+ void sync_bucket_indices(std::vector<std::vector<size_t>>& bucket_indices);
298
+
299
+ // We'd like to use DistAutogradContext::GradCallback here but dist autograd
300
+ // doesn't exist under Windows. So we just directly use the concrete type but
301
+ // to preserve and enforce our original intent we do a static assert when dist
302
+ // autograd is available.
303
+ using GradCallback = std::function<bool(at::Tensor&)>;
304
+ #ifndef _WIN32
305
+ static_assert(
306
+ std::is_same<
307
+ GradCallback,
308
+ torch::distributed::autograd::DistAutogradContext::GradCallback>::
309
+ value,
310
+ "");
311
+ #endif
312
+ void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
313
+
314
+ // This function is called inside `initialize_buckets()`. It initializes both
315
+ // `bucket_views_in` and `bucket_views_out` with views for each variable's
316
+ // gradient into the bucket's flattened `gradients` tensor. Views serve as
317
+ // entry points to `copy_()` each grad's data in/out of the flattened
318
+ // `gradients` tensor.
319
+ void initialize_bucket_views(Bucket& bucket);
320
+
321
+ // This function is called inside `finalize_backward`, it happens only if
322
+ // DDP communication hook was registered to recreate just bucket_views_out
323
+ // with the result of `future_work`.
324
+ void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor);
325
+
326
+ // If gradient_as_bucket_view_ is false, after allreduce buckets,
327
+ // copy bucket results back to grads.
328
+ void copy_bucket_to_grad(
329
+ at::Tensor& variable,
330
+ Reducer::Bucket& bucket,
331
+ size_t intra_bucket_index,
332
+ bool global_unused);
333
+ // Check layout of grad and bucket_view before copying the grad to bucket.
334
+ void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view);
335
+
336
+ // A bucket contains [1..N] gradients to be reduced, where the gradients
337
+ // have the same dtype and device.
338
+ // Coalescing gradients together before reducing can result in lower overhead
339
+ // and/or faster time to completion. Coalescing requires the constituent
340
+ // gradients to have the same dtype and device, and the resulting flattened
341
+ // tensor uses that common dtype and device. The flattened tensor is filled
342
+ // as the corresponding gradients are computed (triggered by autograd hooks),
343
+ // and the buckets are reduced in a predetermined order consistent across
344
+ // processes.
345
+ struct Bucket {
346
+ // Gradients of the bucket flattened into a 1-dimensional tensor
347
+ at::Tensor gradients;
348
+
349
+ // Views into the `gradients` tensor for each individual gradient
350
+ // Each view is created with layout (size and stride) matching the
351
+ // gradient's expected layout (see the "Gradient Layout Contract" in
352
+ // torch/csrc/autograd/functions/accumulate_grad.h).
353
+ // `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])`
354
+ // provide convenient ways to copy gradient data in/out of `gradients`,
355
+ // respectively.
356
+ // We keep both `bucket_views_in` and `bucket_views_out` because
357
+ // registering a DDP communication hook may re-initialize
358
+ // `bucket_views_out` with the value of the hook's `future_work` but we
359
+ // still need separate views into the bucket's original flattened gradient
360
+ // to copy in gradient data.
361
+ std::vector<at::Tensor> bucket_views_in;
362
+ std::vector<at::Tensor> bucket_views_out;
363
+
364
+ // Variables whose gradients are held in this bucket
365
+ // We use refcounted tensors here so that we can easily unflatten the
366
+ // bucket's flattened `gradients` tensor into the participating variables
367
+ // after reduction has completed.
368
+ std::vector<at::Tensor> variables;
369
+
370
+ // Per-variable offset/length into the flattened `gradients` tensor and
371
+ // the corresponding `GradBucket` instance for communication hooks
372
+ std::vector<size_t> offsets;
373
+ std::vector<size_t> lengths;
374
+
375
+ // Per-variable sizes slicing into the bucket's `gradients` tensor
376
+ std::vector<c10::IntArrayRef> sizes_vec;
377
+
378
+ // Number of gradients left to be computed before the bucket is ready to
379
+ // be reduced
380
+ size_t pending;
381
+
382
+ // Global indices of participating variables in the bucket
383
+ std::vector<size_t> variable_indices;
384
+
385
+ // Future work handle for DDP communication hook
386
+ // If no hook is registered, a temporary vanilla allreduce hook is used.
387
+ c10::intrusive_ptr<at::ivalue::Future> future_work;
388
+
389
+ // If this bucket should expect a single sparse gradient
390
+ // If `true`, then this implies that `bucket.variables.size() == 1`.
391
+ bool expect_sparse_gradient = false;
392
+
393
+ // Sparse indices tensor
394
+ c10::optional<at::Tensor> sparse_tensor_indices = c10::nullopt;
395
+
396
+ // TODO(@pietern)
397
+ // Memory copies from gradient tensors into the bucket are potentially
398
+ // done on different CUDA streams. We record an event for every copy
399
+ // so that we can synchronize with them prior to kicking off the reduction.
400
+ // std::vector<at::cuda::CUDAEvent> events;
401
+ };
402
+
403
+ std::vector<Bucket> buckets_;
404
+
405
+ // A variable locator locates a particular variable in the reducer's buckets
406
+ struct VariableLocator {
407
+ // Index of the bucket containing the variable in the `buckets_` vector
408
+ size_t bucket_index;
409
+ // Index of the variable in the bucket, which may be used consistently
410
+ // across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`,
411
+ // `lengths`, `sizes_vec`, and `variable_indices` in `Bucket`
412
+ size_t intra_bucket_index;
413
+
414
+ VariableLocator() = default;
415
+
416
+ VariableLocator(size_t bucket_index_, size_t intra_bucket_index_)
417
+ : bucket_index(bucket_index_),
418
+ intra_bucket_index(intra_bucket_index_) {}
419
+ };
420
+
421
+ // Map the index of a variable to its location in the bucket structure.
422
+ std::vector<VariableLocator> variable_locators_;
423
+
424
+ // track the number of iterations to synchronize grads in training so far.
425
+ long num_iterations_;
426
+ // track distinct iteration of backward call. This is distinct from
427
+ // num_iterations_, for example in the case of multiple forward before
428
+ // backward.
429
+ long num_bwd_calls_;
430
+ // whether the first autograd hook for a distinct backward pass has been
431
+ // called.
432
+ bool first_autograd_hook_called_;
433
+ // track the number of buckets that have been ready for
434
+ // communication calls like allReduce or communication hooks.
435
+ int num_buckets_ready_;
436
+
437
+ // Timing information.
438
+ int64_t backward_compute_start_time_ = -1;
439
+ std::unique_ptr<Timer> timer_;
440
+
441
+ // We collect the relative timestamp of every gradient being ready
442
+ // when executing autograd. This can be used to derive a timeline of
443
+ // the point in time buckets were ready, or ideal bucket assignment/ordering.
444
+ std::vector<int64_t> backward_stats_;
445
+
446
+ bool should_collect_runtime_stats();
447
+ void record_forward_compute_start_time();
448
+ void record_backward_compute_start_time();
449
+ void record_backward_compute_end_time();
450
+ void record_backward_comm_start_time();
451
+ void record_backward_comm_end_time();
452
+
453
+ int get_ddp_runtime_logging_sample_rate();
454
+ int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate;
455
+
456
+ bool is_multi_device_module_ = false;
457
+
458
+ // Following variables are to help build dynamic bucket order
459
+ bool has_rebuilt_bucket_;
460
+ std::vector<at::Tensor> rebuilt_params_;
461
+ std::vector<int64_t> rebuilt_param_indices_;
462
+ const int64_t bucket_bytes_cap_;
463
+
464
+ #ifndef _WIN32
465
+ struct RpcContext {
466
+ using ContextPtr = torch::distributed::autograd::ContextPtr;
467
+ // The shared_ptr is to hold the context instance.
468
+ ContextPtr context_ptr_holder;
469
+ std::atomic<ContextPtr::element_type*> context_ptr{nullptr};
470
+
471
+ void set(ContextPtr&& new_context_ptr);
472
+ };
473
+ RpcContext rpc_context_;
474
+ #endif
475
+
476
+ // A struct containing work handle and tensor for allreduce scheduled in
477
+ // forward pass, if applicable.
478
+ struct ForwardPassAllreduceWork {
479
+ c10::intrusive_ptr<c10d::Work> workHandle;
480
+ at::Tensor resultTensor;
481
+ // whether we should divide by the initial world_size or the no. of
482
+ // remaining DDP ranks.
483
+ bool useStaticWorldSize;
484
+ };
485
+
486
+ // Handle for the currently scheduled allreduce in the forward pass, if
487
+ // applicable.
488
+ ForwardPassAllreduceWork forwardPassWorkHandle_;
489
+
490
+ // Division factor for reduction of gradients.
491
+ // Equal to the process group size, with an exception of handling uneven
492
+ // input.
493
+ int div_factor_;
494
+
495
+ bool static_graph_;
496
+
497
+ // Key: size_t (index), Value: the number of times that a variable's
498
+ // autograd_hook() should be triggered before marking this variable's grad as
499
+ // ready for communication. Map will not change after 1st iteration.
500
+ std::unordered_map<size_t, int> numGradHooksTriggeredMap_;
501
+ // Key: size_t (index), Value: the number of times that a variable's
502
+ // autograd_hook() are left to be triggered before marking this variable's
503
+ // grad as ready for communication. Map will change after 1st iteration to
504
+ // track a grad is ready for communication or not.
505
+ std::unordered_map<size_t, int> numGradHooksTriggeredMapPerIteration_;
506
+
507
+ private:
508
+ // reset counting for buckets before backward starts
509
+ void reset_bucket_counting();
510
+ // search unused parameters beore backward starts
511
+ void search_unused_parameters(
512
+ const std::vector<torch::autograd::Variable>& outputs);
513
+ void set_divide_factor();
514
+ // kick off all reduce for the ready bucket
515
+ void all_reduce_bucket(Bucket& bucket);
516
+ // kick off all reduce to local used map, it can help find global unused
517
+ // parameters
518
+ void all_reduce_local_used_map();
519
+ // initialize locally used parameter maps
520
+ void initialize_local_used_map();
521
+ // get current cuda stream
522
+ const c10::Stream get_current_stream();
523
+ bool dynamic_graph_find_unused();
524
+ bool static_graph_first_iteration();
525
+ bool static_graph_after_first_iteration();
526
+
527
+ // comm_hook_ is used to access the DDP communication hook if registered.
528
+ std::unique_ptr<CommHookInterface> comm_hook_;
529
+
530
+ // Sparse metadata contains the indices that will be used
531
+ // when calling into sparse allreduce.
532
+ // This is only used in the sparse allreduce collective calls
533
+ std::unique_ptr<std::map<std::string, at::Tensor>> sparse_metadata_;
534
+
535
+ // Debug level setting. It is parsed once when Reducer is constructed, and
536
+ // remains the same across a single invocation of DDP training.
537
+ DebugLevel ddp_debug_level_;
538
+ // Mapping of variable index to fully qualified name of model to notify users
539
+ // about errors when certain parameters do not get gradient.
540
+ std::unordered_map<size_t, std::string> param_names_;
541
+ // Variable indices stored sequentially in order of when the gradient is ready
542
+ // for the current backwards pass.
543
+ std::vector<int> grad_ready_order_indices_;
544
+ // Bytes capacity of first bucket, can be configured by user
545
+ int64_t first_bucket_bytes_cap_;
546
+ // Per iteration set of parameter indices that have been marked ready.
547
+ std::unordered_set<size_t> perIterationReadyParams_;
548
+ // Retrieves parameter names that have not been marked as ready as part of
549
+ // previous iteration.
550
+ std::vector<std::string> getUnmarkedParamsForIteration();
551
+ // Retrieves parameter indices that have not been marked as ready as part of
552
+ // previous iteration.
553
+ std::vector<size_t> getUnmarkedParamIndicesForIteration();
554
+ // Raises appropriate error if mark_variable_ready is called on the same
555
+ // variable twice, which is unexpected.
556
+ void checkAndRaiseMarkedTwiceError(size_t curVariableIndex);
557
+ // Retrieves parameter corresponding to the given VariableIndex.
558
+ at::Tensor& get_param_from_index(size_t index);
559
+
560
+ // Cached bucket index to model parameter mapping. Populated after buckets
561
+ // are rebuilt after which this mapping is static.
562
+ mutable std::unordered_map<size_t, std::vector<at::Tensor>>
563
+ cached_variables_for_bucket_;
564
+
565
+ bool optim_in_backward_{false};
566
+ friend class Logger;
567
+ };
568
+
569
+ // This is equivalent to take_tensors but returns indices into the
570
+ // tensor list argument for bucket assignment. Also, it is aware
571
+ // of device placement and will not allow buckets to span devices.
572
+ // The index of tensors[i] assigned to bucket is tensor_indices[i],
573
+ // when tensor_indices is empty, the index of tensors[i] assigned to
574
+ // bucket is i.
575
+ TORCH_API std::tuple<std::vector<std::vector<size_t>>, std::vector<size_t>>
576
+ compute_bucket_assignment_by_size(
577
+ const std::vector<at::Tensor>& tensors,
578
+ const std::vector<size_t>& bucket_size,
579
+ const std::vector<bool>& expect_sparse_gradient = {},
580
+ const std::vector<int64_t>& tensor_indices = {},
581
+ const c10::optional<std::weak_ptr<c10d::Logger>>& logger = {});
582
+
583
+ // Verify models across all processes are the same as model on rank 0 with
584
+ // respect to no. of params and matching dtype/size/layout.
585
+ TORCH_API void verify_params_across_processes(
586
+ const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
587
+ const std::vector<at::Tensor>& params,
588
+ const c10::optional<std::weak_ptr<c10d::Logger>>& logger);
589
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/sequence_num.hpp ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/irange.h>
6
+ #include <vector>
7
+
8
+ namespace c10d {
9
+ const int kUnsetSeqNum = 0;
10
+
11
+ namespace {
12
+ constexpr int kByteOffset = 8;
13
+ }
14
+
15
+ // Converts from int to char vec to write in store
16
+ template <typename T>
17
+ inline std::vector<T> toVec(uint64_t num, int numBytes) {
18
+ std::vector<T> values;
19
+ // Read off bytes from right to left, pushing them into
20
+ // char array.
21
+ for (const auto i : c10::irange(numBytes)) {
22
+ uint8_t x = (num >> (kByteOffset * i)) & 0xff;
23
+ values.push_back(static_cast<T>(x));
24
+ }
25
+ return values;
26
+ }
27
+
28
+ // Converts from char vec (such as from store read) to int.
29
+ template <typename T>
30
+ inline uint64_t fromVec(const std::vector<T>& values) {
31
+ uint64_t num = 0;
32
+ // Set each byte at the correct location on num
33
+ for (const auto i : c10::irange(values.size())) {
34
+ uint8_t x = static_cast<uint8_t>(values[i]);
35
+ num |= (static_cast<int64_t>(x) << (kByteOffset * i));
36
+ }
37
+ return num;
38
+ }
39
+
40
+ class TORCH_API SequenceNum {
41
+ public:
42
+ SequenceNum();
43
+ explicit SequenceNum(const uint64_t num);
44
+ // Retrieve num_. Will throw if not set.
45
+ uint64_t get() const;
46
+ // Increment num_. Will throw if not set.
47
+ void increment();
48
+ // Increment num_ and return the old value. Will throw if not set.
49
+ uint64_t getAndIncrement();
50
+ // Sets num_
51
+ void set(const uint64_t num);
52
+ // Returns true if this SequenceNum is properly initialized with a value, else
53
+ // false.
54
+ bool isSet() const;
55
+
56
+ SequenceNum& operator=(const SequenceNum& other);
57
+
58
+ SequenceNum(const SequenceNum& other);
59
+
60
+ private:
61
+ c10::optional<uint64_t> num_;
62
+ mutable std::mutex lock_;
63
+ };
64
+
65
+ } // namespace c10d