applied-ai-018 commited on
Commit
ea5af34
·
verified ·
1 Parent(s): b292467

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp +12 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp +473 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp +139 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp +721 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +438 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +271 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp +181 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp +101 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp +180 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp +187 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp +27 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h +56 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h +33 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h +34 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp +81 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h +93 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h +11 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h +12 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h +144 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h +47 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h +59 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h +238 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h +52 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h +298 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h +32 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h +49 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h +47 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h +286 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h +14 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h +41 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h +43 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h +80 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h +124 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h +259 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h +62 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h +78 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h +79 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h +56 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h +15 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h +7 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h +85 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h +71 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h +17 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h +24 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h +52 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h +27 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h +152 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h +106 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h +17 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h +19 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
2
+
3
+ namespace c10d_functional {
4
+
5
+ void register_process_group(
6
+ const std::string& tag,
7
+ c10::intrusive_ptr<c10d::ProcessGroup> pg);
8
+
9
+ c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
10
+ const std::string& tag);
11
+
12
+ } // namespace c10d_functional
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_NCCL
4
+
5
+ #include <stdio.h>
6
+ #include <stdlib.h>
7
+
8
+ #include <memory>
9
+ #include <mutex>
10
+
11
+ #include <c10/util/Exception.h>
12
+ #include <c10/util/Optional.h>
13
+ #include <nccl.h>
14
+
15
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
16
+ (NCCL_MINOR >= 14)
17
+ #define NCCL_HAS_COMM_NONBLOCKING
18
+ #endif
19
+
20
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
21
+ (NCCL_MINOR >= 18)
22
+ #define NCCL_HAS_COMM_SPLIT
23
+ #endif
24
+
25
+ // ncclGetLastError() is enabled only for NCCL versions 2.13+
26
+ // ncclRemoteError only exists in NCCL versions 2.13+
27
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
28
+ (NCCL_MINOR >= 13)
29
+ #define ENABLE_NCCL_GET_LAST_ERROR
30
+ #define NCCL_REMOTE_ERROR
31
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
32
+ #define ENABLE_NCCL_GET_LAST_ERROR
33
+ #define NCCL_REMOTE_ERROR
34
+ #endif
35
+
36
+ // Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort()
37
+ // and ncclCommGetAsyncError() are not supported in earlier versions.
38
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
39
+ (NCCL_MINOR >= 4)
40
+ #define ENABLE_NCCL_ERROR_CHECKING
41
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
42
+ #define ENABLE_NCCL_ERROR_CHECKING
43
+ #endif
44
+
45
+ // P2P is enabled only for NCCL versions 2.7+ since ncclSend()
46
+ // and ncclRecv() are not supported in earlier versions.
47
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
48
+ (NCCL_MINOR >= 7)
49
+ #define ENABLE_NCCL_P2P_SUPPORT
50
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
51
+ #define ENABLE_NCCL_P2P_SUPPORT
52
+ #endif
53
+
54
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
55
+ (NCCL_MINOR >= 11)
56
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
57
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
58
+ #define ENABLE_NCCL_PREMUL_SUM_SUPPORT
59
+ #endif
60
+
61
+ #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
62
+ (NCCL_MINOR >= 17)
63
+ #define NCCL_HAS_COMM_CTA_CGA
64
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
65
+ #define NCCL_HAS_COMM_CTA_CGA
66
+ #endif
67
+
68
+ #if defined(NCCL_REGISTRATION_SUPPORTED) || \
69
+ ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
70
+ (NCCL_MINOR >= 19)))
71
+ #define NCCL_HAS_COMM_REGISTER
72
+ #elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
73
+ #define NCCL_HAS_COMM_REGISTER
74
+ #endif
75
+
76
+ // Macro to throw on a non-successful NCCL return value.
77
+ #define C10D_NCCL_CHECK(cmd, failureReason) \
78
+ do { \
79
+ ncclResult_t result = cmd; \
80
+ if (result != ncclSuccess) { \
81
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
82
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
83
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
84
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
85
+ } \
86
+ } while (0)
87
+
88
+ // Macro to throw on a non-successful NCCL return value, non-blocking.
89
+ #define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \
90
+ ncclResult_t result = cmd; \
91
+ auto startTimepoint = std::chrono::steady_clock::now(); \
92
+ while (result == ncclInProgress) { \
93
+ if (nccl_nonblocking_timeout() > 0) { \
94
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
95
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
96
+ currentTimepoint - startTimepoint) \
97
+ .count(); \
98
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
99
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \
100
+ std::to_string(__LINE__) + ", " + \
101
+ ncclGetErrorWithVersion(result) + "\n" + \
102
+ getNcclErrorDetailStr(result, failureReason); \
103
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
104
+ } \
105
+ } \
106
+ ncclCommGetAsyncError(comm, &result); \
107
+ } \
108
+ if (result != ncclSuccess) { \
109
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
110
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
111
+ "\n" + getNcclErrorDetailStr(result, failureReason); \
112
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
113
+ }
114
+
115
+ #define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comms_, failureReason) \
116
+ ncclResult_t state = cmd; \
117
+ auto startTimepoint = std::chrono::steady_clock::now(); \
118
+ if (state == ncclInProgress) { \
119
+ for (const auto i : c10::irange(comms_.size())) { \
120
+ do { \
121
+ if (nccl_nonblocking_timeout() > 0) { \
122
+ auto currentTimepoint = std::chrono::steady_clock::now(); \
123
+ auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
124
+ currentTimepoint - startTimepoint) \
125
+ .count(); \
126
+ if (timeElapsed > nccl_nonblocking_timeout()) { \
127
+ std::string err = "NCCL timeout in: " + std::string(__FILE__) + \
128
+ ":" + std::to_string(__LINE__) + ", " + \
129
+ ncclGetErrorWithVersion(state) + "\n" + \
130
+ getNcclErrorDetailStr(state, failureReason); \
131
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
132
+ } \
133
+ } \
134
+ ncclCommGetAsyncError(comms_[i]->getNcclComm(), &state); \
135
+ } while (state == ncclInProgress); \
136
+ if (state != ncclSuccess) { \
137
+ break; /* fall through to failed case */ \
138
+ } \
139
+ } \
140
+ } \
141
+ if (state != ncclSuccess) { \
142
+ std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
143
+ std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \
144
+ "\n" + getNcclErrorDetailStr(state, failureReason); \
145
+ TORCH_CHECK_WITH(DistBackendError, false, err); \
146
+ }
147
+
148
+ // Macro to print and abort on a non-successful NCCL return value.
149
+ #define C10D_NCCL_ASSERT(cmd) \
150
+ do { \
151
+ ncclResult_t result = cmd; \
152
+ if (result != ncclSuccess) { \
153
+ std::string err = ncclGetErrorWithVersion(result); \
154
+ fprintf( \
155
+ stderr, \
156
+ "NCCL error in: %s:%d, %s\n", \
157
+ __FILE__, \
158
+ __LINE__, \
159
+ err.c_str()); \
160
+ abort(); \
161
+ } \
162
+ } while (0)
163
+
164
+ namespace c10d {
165
+
166
+ std::string getNcclVersion();
167
+ std::string ncclGetErrorWithVersion(ncclResult_t error);
168
+ bool nccl_use_nonblocking();
169
+ int nccl_nonblocking_timeout();
170
+
171
+ // Provides additional detail into NCCL error codes based on when these are
172
+ // thrown in the NCCL codebase.
173
+ std::string getNcclErrorDetailStr(
174
+ ncclResult_t error,
175
+ c10::optional<std::string> processGroupFailureReason = c10::nullopt);
176
+
177
+ // Write NCCL debug info to local disk or any storage users define.
178
+ class TORCH_API DebugInfoWriter {
179
+ public:
180
+ DebugInfoWriter(int rank);
181
+ virtual ~DebugInfoWriter();
182
+ virtual void write(const std::string& ncclTrace);
183
+
184
+ protected:
185
+ std::string filename_;
186
+ };
187
+
188
+ // RAII wrapper for NCCL communicator
189
+ class NCCLComm {
190
+ public:
191
+ explicit NCCLComm(ncclComm_t ncclComm)
192
+ : ncclComm_(ncclComm),
193
+ aborted_(false),
194
+ ncclAsyncErr_(ncclSuccess),
195
+ commFailureReason_(c10::nullopt) {}
196
+
197
+ NCCLComm() : NCCLComm(nullptr) {}
198
+
199
+ ~NCCLComm() noexcept {
200
+ // Add lock in this destructor, as aborted_ needs to be read after memory
201
+ // barrier here.
202
+ std::unique_lock<std::mutex> lock(mutex_);
203
+ if (ncclComm_ && !aborted_) {
204
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
205
+ // Use ncclCommAbort instead of ncclCommDestroy here since
206
+ // ncclCommDestroy could block forever waiting for work to complete on
207
+ // the communicator.
208
+ C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_));
209
+ #else
210
+ C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_));
211
+ #endif
212
+ }
213
+ }
214
+
215
+ static std::shared_ptr<NCCLComm> create(
216
+ int numRanks,
217
+ int rank,
218
+ ncclUniqueId commId) {
219
+ auto comm = std::make_shared<NCCLComm>();
220
+ C10D_NCCL_CHECK(
221
+ ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank),
222
+ c10::nullopt);
223
+ comm->ncclId_ = commId;
224
+ comm->rank_ = rank;
225
+ return comm;
226
+ }
227
+
228
+ #ifdef NCCL_HAS_COMM_NONBLOCKING
229
+ static std::shared_ptr<NCCLComm> create(
230
+ int numRanks,
231
+ int rank,
232
+ ncclUniqueId commId,
233
+ ncclConfig_t& config) {
234
+ auto comm = std::make_shared<NCCLComm>();
235
+ if (nccl_use_nonblocking()) {
236
+ config.blocking = 0;
237
+ C10D_NCCL_CHECK_TIMEOUT(
238
+ ncclCommInitRankConfig(
239
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
240
+ comm->ncclComm_,
241
+ c10::nullopt);
242
+ } else {
243
+ C10D_NCCL_CHECK(
244
+ ncclCommInitRankConfig(
245
+ &(comm->ncclComm_), numRanks, commId, rank, &config),
246
+ c10::nullopt);
247
+ }
248
+ comm->ncclId_ = commId;
249
+ comm->rank_ = rank;
250
+ return comm;
251
+ }
252
+ #endif
253
+
254
+ #ifdef NCCL_HAS_COMM_SPLIT
255
+ static std::shared_ptr<NCCLComm> split(
256
+ NCCLComm* source,
257
+ int color_id,
258
+ int rank,
259
+ ncclConfig_t& config) {
260
+ auto comm = std::make_shared<NCCLComm>();
261
+ C10D_NCCL_CHECK(
262
+ ncclCommSplit(
263
+ source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config),
264
+ c10::nullopt);
265
+ ++source->ncclCommSplitCounter_;
266
+ return comm;
267
+ }
268
+ #endif
269
+
270
+ ncclUniqueId getNcclId() {
271
+ return ncclId_;
272
+ }
273
+
274
+ // Must not be copyable
275
+ NCCLComm(const NCCLComm&) = delete;
276
+ NCCLComm& operator=(const NCCLComm&) = delete;
277
+
278
+ // Do not support move assignment as there is no valid use case
279
+ NCCLComm& operator=(NCCLComm&& other) = delete;
280
+
281
+ // Move constructable
282
+ NCCLComm(NCCLComm&& other) {
283
+ // Using other's lock, as it reads other's states
284
+ // Can not use this.mutex_, as this object is being constructed.
285
+ std::unique_lock<std::mutex> lock(other.mutex_);
286
+ std::swap(ncclComm_, other.ncclComm_);
287
+ std::swap(aborted_, other.aborted_);
288
+ std::swap(ncclAsyncErr_, other.ncclAsyncErr_);
289
+ }
290
+
291
+ ncclComm_t getNcclComm();
292
+
293
+ c10::optional<std::string> getNcclCommFailureReason() const {
294
+ std::unique_lock<std::mutex> lock(mutex_);
295
+ return commFailureReason_;
296
+ }
297
+
298
+ void ncclCommAbort(
299
+ c10::optional<std::string> commFailureReason = c10::nullopt) {
300
+ std::unique_lock<std::mutex> lock(mutex_);
301
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
302
+ if (aborted_) {
303
+ // Should not abort twice.
304
+ return;
305
+ }
306
+
307
+ #ifdef NCCL_HAS_COMM_REGISTER
308
+ // Deregister all registered segments before aborting.
309
+ for (auto& it : registeredSegmentHandles_) {
310
+ void* handle = it.second;
311
+ C10D_NCCL_CHECK(
312
+ ::ncclCommDeregister(ncclComm_, handle),
313
+ c10::str(
314
+ "Failed to deregister segment handle ",
315
+ handle,
316
+ " on ncclComm_ ",
317
+ ncclComm_));
318
+ }
319
+ registeredSegmentHandles_.clear();
320
+ #endif
321
+
322
+ // Set true failure reason if provided by ProcessGroupNCCL (e.g. work
323
+ // timeout)
324
+ commFailureReason_ = commFailureReason;
325
+ #ifndef NCCL_HAS_COMM_NONBLOCKING
326
+ C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_);
327
+ #else
328
+ C10D_NCCL_CHECK_TIMEOUT(
329
+ ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_);
330
+ #endif
331
+ aborted_ = true;
332
+ ncclComm_ = nullptr;
333
+
334
+ // Set an appropriate error so that we avoid using the communicator.
335
+ if (ncclAsyncErr_ == ncclSuccess) {
336
+ ncclAsyncErr_ = ncclSystemError;
337
+ }
338
+ #else
339
+ // This is a NOOP, if error checks are disabled.
340
+ return;
341
+ #endif
342
+ }
343
+
344
+ bool isAborted() const {
345
+ std::unique_lock<std::mutex> lock(mutex_);
346
+ return aborted_;
347
+ }
348
+
349
+ uint64_t getCommSplitCounter() const {
350
+ return ncclCommSplitCounter_;
351
+ }
352
+
353
+ ncclResult_t checkForNcclError() {
354
+ std::unique_lock<std::mutex> lock(mutex_);
355
+ #ifdef ENABLE_NCCL_ERROR_CHECKING
356
+ if (ncclAsyncErr_ != ncclSuccess) {
357
+ return ncclAsyncErr_;
358
+ }
359
+ C10D_NCCL_CHECK(
360
+ ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_);
361
+ return ncclAsyncErr_;
362
+ #else
363
+ // Always return success, if error checks are disabled.
364
+ return ncclSuccess;
365
+ #endif
366
+ }
367
+
368
+ ncclResult_t registerSegment(void* ptr, size_t size) {
369
+ std::unique_lock<std::mutex> lock(mutex_);
370
+ #ifdef NCCL_HAS_COMM_REGISTER
371
+ // We register only segments from cache allocator
372
+ // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always
373
+ // maps to a unique handle and should not be registered before the current
374
+ // ptr is deregistered and freed.
375
+ TORCH_CHECK(
376
+ registeredSegmentHandles_.count(ptr) == 0,
377
+ "Segment with ptr ",
378
+ ptr,
379
+ " has already been registered on ncclComm_ ",
380
+ ncclComm_);
381
+
382
+ void* handle;
383
+ C10D_NCCL_CHECK(
384
+ ncclCommRegister(ncclComm_, ptr, size, &handle),
385
+ c10::str(
386
+ "Failed to register segment with ptr ",
387
+ ptr,
388
+ ", size ",
389
+ size,
390
+ " on ncclComm_ ",
391
+ ncclComm_));
392
+ registeredSegmentHandles_[ptr] = handle;
393
+ return ncclSuccess;
394
+ #else
395
+ return ncclInvalidUsage;
396
+ #endif
397
+ }
398
+
399
+ ncclResult_t deregisterSegment(void* ptr) {
400
+ std::unique_lock<std::mutex> lock(mutex_);
401
+ #ifdef NCCL_HAS_COMM_REGISTER
402
+ TORCH_CHECK(
403
+ registeredSegmentHandles_.count(ptr) == 1,
404
+ "Segment with ptr ",
405
+ ptr,
406
+ " is not registered on ncclComm_ ",
407
+ ncclComm_);
408
+
409
+ void* handle = registeredSegmentHandles_[ptr];
410
+ C10D_NCCL_CHECK(
411
+ ncclCommDeregister(ncclComm_, handle),
412
+ c10::str(
413
+ "Failed to deregister segment handle ",
414
+ handle,
415
+ " on ncclComm_ ",
416
+ ncclComm_));
417
+ registeredSegmentHandles_.erase(ptr);
418
+ return ncclSuccess;
419
+ #else
420
+ return ncclInvalidUsage;
421
+ #endif
422
+ }
423
+
424
+ protected:
425
+ ncclComm_t ncclComm_;
426
+ // Unique nccl_id for this communicator.
427
+ ncclUniqueId ncclId_;
428
+ bool aborted_;
429
+ uint64_t ncclCommSplitCounter_{0};
430
+ ncclResult_t ncclAsyncErr_;
431
+ mutable std::mutex mutex_;
432
+ // Rank that this communicator corresponds to.
433
+ int rank_;
434
+ // Optional reason for communicator failure, provided by ProcessGroupNCCL for
435
+ // better error messaging.
436
+ c10::optional<std::string> commFailureReason_;
437
+ #ifdef NCCL_HAS_COMM_REGISTER
438
+ // Stores handlers for tensors registered by NCCL
439
+ std::unordered_map<void*, void*> registeredSegmentHandles_;
440
+ #endif
441
+ };
442
+
443
+ // Helper that automatically cleans up premul sums.
444
+ struct ncclRedOpRAII {
445
+ ncclRedOpRAII() = default;
446
+ ncclRedOpRAII(ncclRedOp_t op) : op_(op) {}
447
+ ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm)
448
+ : op_(op), comm_(comm), premul_sum_(true) {}
449
+ ncclRedOpRAII(const ncclRedOpRAII&) = delete;
450
+ ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete;
451
+ ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() {
452
+ std::swap(tmp.op_, this->op_);
453
+ std::swap(tmp.comm_, this->comm_);
454
+ std::swap(tmp.premul_sum_, this->premul_sum_);
455
+ }
456
+ #if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT)
457
+ ~ncclRedOpRAII() {
458
+ if (premul_sum_) {
459
+ ncclRedOpDestroy(op_, comm_);
460
+ }
461
+ }
462
+ #endif
463
+ operator ncclRedOp_t() const {
464
+ return op_;
465
+ }
466
+ ncclRedOp_t op_;
467
+ ncclComm_t comm_;
468
+ bool premul_sum_ = false;
469
+ };
470
+
471
+ } // namespace c10d
472
+
473
+ #endif // USE_C10D_NCCL
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ParamCommsUtils.hpp ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <ATen/record_function.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/ThreadLocalDebugInfo.h>
7
+ #include <string>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+
12
+ class TORCH_API ParamCommsDebugInfo : public c10::DebugInfoBase {
13
+ public:
14
+ ParamCommsDebugInfo() = default;
15
+ ParamCommsDebugInfo(
16
+ int rank,
17
+ std::string&& colName,
18
+ int inNelems,
19
+ int outNelems,
20
+ at::ScalarType dType,
21
+ std::vector<int64_t> inSplitSizes,
22
+ std::vector<int64_t> outSplitSizes,
23
+ int worldSize);
24
+
25
+ ~ParamCommsDebugInfo() override = default;
26
+
27
+ int getRank() const {
28
+ return rank_;
29
+ }
30
+
31
+ int getWorldSize() const {
32
+ return worldSize_;
33
+ }
34
+
35
+ const std::string getColumnName() const {
36
+ return columnName_;
37
+ }
38
+
39
+ int getInMessageNelems() const {
40
+ return inMessageNelems_;
41
+ }
42
+
43
+ int getOutMessageNelems() const {
44
+ return outMessageNelems_;
45
+ }
46
+
47
+ at::ScalarType getDType() const {
48
+ return dType_;
49
+ }
50
+
51
+ const std::vector<int64_t>& getInputSplitSizes() const {
52
+ return inputSplitSizes_;
53
+ }
54
+
55
+ const std::vector<int64_t>& getOutputSplitSizes() const {
56
+ return outputSplitSizes_;
57
+ }
58
+
59
+ private:
60
+ int rank_{};
61
+ int worldSize_{};
62
+ std::string columnName_;
63
+ int inMessageNelems_{};
64
+ int outMessageNelems_{};
65
+ at::ScalarType dType_ = at::kByte;
66
+ std::vector<int64_t> inputSplitSizes_;
67
+ std::vector<int64_t> outputSplitSizes_;
68
+ };
69
+
70
+ #define RECORD_PARAM_COMMS( \
71
+ seq, \
72
+ pg_ptr, \
73
+ rank, \
74
+ colName, \
75
+ inNelems, \
76
+ outNelems, \
77
+ dType, \
78
+ inSplitSizes, \
79
+ outSplitSizes, \
80
+ worldSize) \
81
+ auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
82
+ rank, \
83
+ colName, \
84
+ inNelems, \
85
+ outNelems, \
86
+ dType, \
87
+ inSplitSizes, \
88
+ outSplitSizes, \
89
+ worldSize); \
90
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
91
+ std::initializer_list<const c10::IValue> paramList = { \
92
+ c10::IValue(seq), \
93
+ c10::IValue(pg_ptr), \
94
+ rank, \
95
+ colName, \
96
+ inSplitSizes, \
97
+ outSplitSizes, \
98
+ worldSize}; \
99
+ c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
100
+ RECORD_FUNCTION(at::kParamCommsCallName, paramInputs);
101
+
102
+ #define RECORD_PARAM_COMMS_DATA( \
103
+ seq, \
104
+ pg_ptr, \
105
+ InputTensors, \
106
+ OutputTensors, \
107
+ rank, \
108
+ colName, \
109
+ inNelems, \
110
+ outNelems, \
111
+ dType, \
112
+ inSplitSizes, \
113
+ outSplitSizes, \
114
+ worldSize) \
115
+ auto paramCommsInfo = std::make_shared<torch::ParamCommsDebugInfo>( \
116
+ rank, \
117
+ colName, \
118
+ inNelems, \
119
+ outNelems, \
120
+ dType, \
121
+ inSplitSizes, \
122
+ outSplitSizes, \
123
+ worldSize); \
124
+ c10::DebugInfoGuard g(c10::DebugInfoKind::PARAM_COMMS_INFO, paramCommsInfo); \
125
+ std::initializer_list<const c10::IValue> paramList = { \
126
+ c10::IValue(InputTensors), \
127
+ c10::IValue(seq), \
128
+ c10::IValue(pg_ptr), \
129
+ rank, \
130
+ colName, \
131
+ inSplitSizes, \
132
+ outSplitSizes, \
133
+ worldSize}; \
134
+ c10::ArrayRef<const c10::IValue> paramInputs(paramList); \
135
+ RECORD_FUNCTION_WITH_INPUTS_OUTPUTS( \
136
+ at::kParamCommsCallName, \
137
+ paramInputs, \
138
+ std::vector<c10::IValue>(1, c10::IValue(OutputTensors)));
139
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
4
+ #include <condition_variable>
5
+ #include <memory>
6
+ #include <mutex>
7
+ #include <stdexcept>
8
+ #include <unordered_map>
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <ATen/core/dispatch/Dispatcher.h>
14
+ #include <c10/macros/Macros.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Work.hpp>
17
+ // *************************************************************************
18
+ // PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN
19
+ // versions 1.7 and 1.8.
20
+ // PLEASE DO NOT ADD ANY DEPENDENCIES.
21
+ // SEE RFC: https://github.com/pytorch/pytorch/issues/39662
22
+ // *************************************************************************
23
+
24
+ constexpr auto kProcessGroupDefaultTimeout =
25
+ std::chrono::milliseconds(30 * 60 * 1000);
26
+
27
+ namespace c10d {
28
+
29
+ // ProcessGroup is a base class that captures collective and point to
30
+ // point communication in a fixed set of processes.
31
+ //
32
+ // The functions specified in the class below describe the API alone;
33
+ // implementations are provided in subclasses.
34
+ //
35
+ // Every function that performs I/O is executed asynchronously by a
36
+ // thread pool owned by the ProcessGroup (by default). They return an
37
+ // object that can be used to wait for completion or error.
38
+ //
39
+ // The ProcessGroup can instantiate subgroups with fewer or an equal
40
+ // number of members. Implementations must take care that multiple
41
+ // process groups can be used in parallel and synchronize accordingly.
42
+ //
43
+ // The ProcessGroup assumes a fixed set of processes. If the set
44
+ // changes, existing instances must be destructed and instantiation
45
+ // and initialization must start from scratch. For members of the
46
+ // process group to find each other (referred to as rendezvous from
47
+ // hereon)
48
+ //
49
+ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
50
+ public:
51
+ // ProcessGroup Options is a base struct that defines the basic options
52
+ // when constructing a ProcessGroup. Each ProcessGroup subclass should
53
+ // extend this struct and define its options if it wants to provide more
54
+ // config options (beyond basic ones defined here) to end user.
55
+ struct TORCH_API Options : torch::CustomClassHolder {
56
+ explicit Options(
57
+ std::string backend,
58
+ std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout)
59
+ : timeout(timeout), backend(std::move(backend)) {}
60
+ ~Options() override = default;
61
+
62
+ std::chrono::milliseconds timeout;
63
+
64
+ // backend name
65
+ const std::string backend;
66
+ };
67
+
68
+ enum BackendType {
69
+ UNDEFINED = 0,
70
+ GLOO = 1,
71
+ NCCL = 2,
72
+ UCC = 3,
73
+ MPI = 4,
74
+ CUSTOM = 5,
75
+ };
76
+
77
+ // Not used, set for backwards compatibility and only used for TypeDef in
78
+ // Ops.cpp
79
+ explicit ProcessGroup(int rank, int size);
80
+
81
+ explicit ProcessGroup(
82
+ const c10::intrusive_ptr<::c10d::Store>& store,
83
+ int rank,
84
+ int size,
85
+ c10::intrusive_ptr<Options> options);
86
+ ~ProcessGroup() override;
87
+
88
+ int getRank() const {
89
+ return rank_;
90
+ }
91
+
92
+ int getSize() const {
93
+ return size_;
94
+ }
95
+
96
+ // Returns an unique opaque ID of this process group object.
97
+ int64_t getID() const {
98
+ return reinterpret_cast<std::intptr_t>(this);
99
+ }
100
+
101
+ // Returns an unique opaque ID of a backend for the specific backend type
102
+ // that can correlate with this process group's collectives.
103
+ int64_t getBackendID(BackendType backend_type) const {
104
+ return reinterpret_cast<std::intptr_t>(getBackend(backend_type).get());
105
+ }
106
+
107
+ virtual const std::string getBackendName() const {
108
+ return options_->backend;
109
+ };
110
+
111
+ BackendType getBackendType() const {
112
+ return backendType_;
113
+ };
114
+
115
+ virtual void startCoalescing(c10::DeviceType deviceType) {
116
+ // only nccl has implemented startCoalescing so only execute for nccl
117
+ // backends
118
+ auto backend = getBackend(deviceType);
119
+ backend->startCoalescing();
120
+ }
121
+
122
+ virtual c10::intrusive_ptr<Work> endCoalescing(c10::DeviceType deviceType) {
123
+ // only nccl has implemented endCoalescing so only execute for nccl
124
+ // backends
125
+ auto backend = getBackend(deviceType);
126
+ auto work = backend->endCoalescing();
127
+ return work;
128
+ }
129
+
130
+ virtual c10::intrusive_ptr<Work> broadcast(
131
+ std::vector<at::Tensor>& tensors,
132
+ const BroadcastOptions& opts = BroadcastOptions()) {
133
+ static auto op =
134
+ c10::Dispatcher::singleton()
135
+ .findSchemaOrThrow("c10d::broadcast_", "")
136
+ .typed<
137
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
138
+ at::TensorList,
139
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
140
+ int64_t,
141
+ int64_t,
142
+ bool,
143
+ int64_t)>();
144
+ // It's awakward to unbox the opts here and box them again in the custom C++
145
+ // op. But it's also complicated to make opts as a CustomClassHolder. Leave
146
+ // it as it is now.
147
+ return std::get<1>(op.call(
148
+ tensors,
149
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
150
+ opts.rootRank,
151
+ opts.rootTensor,
152
+ opts.asyncOp,
153
+ opts.timeout.count()));
154
+ }
155
+
156
+ virtual c10::intrusive_ptr<Work> allreduce(
157
+ std::vector<at::Tensor>& tensors,
158
+ const AllreduceOptions& opts = AllreduceOptions()) {
159
+ static auto op =
160
+ c10::Dispatcher::singleton()
161
+ .findSchemaOrThrow("c10d::allreduce_", "")
162
+ .typed<
163
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
164
+ at::TensorList,
165
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
166
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
167
+ const c10::optional<at::Tensor>& sparse_indices,
168
+ int64_t)>();
169
+
170
+ return std::get<1>(op.call(
171
+ tensors,
172
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
173
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
174
+ opts.sparseIndices,
175
+ opts.timeout.count()));
176
+ }
177
+
178
+ virtual c10::intrusive_ptr<Work> allreduce_coalesced(
179
+ std::vector<at::Tensor>& tensors,
180
+ const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) {
181
+ static auto op = c10::Dispatcher::singleton()
182
+ .findSchemaOrThrow("c10d::allreduce_coalesced_", "")
183
+ .typed<c10::intrusive_ptr<::c10d::Work>(
184
+ at::TensorList,
185
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
186
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
187
+ int64_t)>();
188
+
189
+ return op.call(
190
+ tensors,
191
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
192
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
193
+ opts.timeout.count());
194
+ }
195
+
196
+ virtual c10::intrusive_ptr<Work> reduce(
197
+ std::vector<at::Tensor>& tensors,
198
+ const ReduceOptions& opts = ReduceOptions()) {
199
+ static auto op = c10::Dispatcher::singleton()
200
+ .findSchemaOrThrow("c10d::reduce_", "")
201
+ .typed<c10::intrusive_ptr<::c10d::Work>(
202
+ at::TensorList,
203
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
204
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
205
+ int64_t,
206
+ int64_t,
207
+ int64_t)>();
208
+ return op.call(
209
+ tensors,
210
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
211
+ c10::make_intrusive<ReduceOp>(opts.reduceOp),
212
+ opts.rootRank,
213
+ opts.rootTensor,
214
+ opts.timeout.count());
215
+ }
216
+
217
+ virtual c10::intrusive_ptr<Work> allgather(
218
+ std::vector<std::vector<at::Tensor>>& outputTensors,
219
+ std::vector<at::Tensor>& inputTensors,
220
+ const AllgatherOptions& opts = AllgatherOptions()) {
221
+ static auto op = c10::Dispatcher::singleton()
222
+ .findSchemaOrThrow("c10d::allgather_", "")
223
+ .typed<std::tuple<
224
+ std::vector<std::vector<at::Tensor>>,
225
+ c10::intrusive_ptr<Work>>(
226
+ const std::vector<std::vector<at::Tensor>>&,
227
+ at::TensorList,
228
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
229
+ int64_t)>();
230
+
231
+ return std::get<1>(op.call(
232
+ outputTensors,
233
+ inputTensors,
234
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
235
+ opts.timeout.count()));
236
+ }
237
+
238
+ // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
239
+ // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
240
+ // For implementers of ProcessGroup API and advanced users only.
241
+ // Note: this function will be deprecated in near future.
242
+ virtual c10::intrusive_ptr<Work> _allgather_base(
243
+ at::Tensor& outputBuffer,
244
+ at::Tensor& inputBuffer,
245
+ const AllgatherOptions& opts = AllgatherOptions()) {
246
+ static auto op =
247
+ c10::Dispatcher::singleton()
248
+ .findSchemaOrThrow("c10d::_allgather_base_", "")
249
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
250
+ at::Tensor&,
251
+ at::Tensor&,
252
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
253
+ bool,
254
+ int64_t)>();
255
+
256
+ return std::get<1>(op.call(
257
+ outputBuffer,
258
+ inputBuffer,
259
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
260
+ opts.asyncOp,
261
+ opts.timeout.count()));
262
+ }
263
+
264
+ // This function is deprecated and will be moved out of ProcessGroup to comms:
265
+ // * do not add dependencies on this function,
266
+ // * do not implement it in your ProcessGroup, implement _allgather_base
267
+ // instead.
268
+ virtual c10::intrusive_ptr<Work> allgather_coalesced(
269
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
270
+ std::vector<at::Tensor>& inputTensors,
271
+ const AllgatherOptions& opts = AllgatherOptions()) {
272
+ static auto op =
273
+ c10::Dispatcher::singleton()
274
+ .findSchemaOrThrow("c10d::allgather_coalesced_", "")
275
+ .typed<c10::intrusive_ptr<Work>(
276
+ const std::vector<std::vector<at::Tensor>>&,
277
+ const at::TensorList&,
278
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
279
+
280
+ return op.call(
281
+ outputTensorLists,
282
+ inputTensors,
283
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
284
+ }
285
+
286
+ // This function is a coalesced version of `allgather_into_tensor` (currently
287
+ // still named as `_allgather_base`). Each tensor in the vector corresponds to
288
+ // an input/output of one `allgather_into_tensor` operation.
289
+ virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
290
+ std::vector<at::Tensor>& outputTensors,
291
+ std::vector<at::Tensor>& inputTensors,
292
+ const AllgatherOptions& opts = AllgatherOptions()) {
293
+ static auto op =
294
+ c10::Dispatcher::singleton()
295
+ .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "")
296
+ .typed<c10::intrusive_ptr<Work>(
297
+ const at::TensorList,
298
+ const at::TensorList,
299
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
300
+
301
+ return op.call(
302
+ outputTensors,
303
+ inputTensors,
304
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
305
+ }
306
+
307
+ virtual c10::intrusive_ptr<Work> gather(
308
+ std::vector<std::vector<at::Tensor>>& outputTensors,
309
+ std::vector<at::Tensor>& inputTensors,
310
+ const GatherOptions& opts = GatherOptions()) {
311
+ static auto op = c10::Dispatcher::singleton()
312
+ .findSchemaOrThrow("c10d::gather_", "")
313
+ .typed<c10::intrusive_ptr<::c10d::Work>(
314
+ const std::vector<std::vector<at::Tensor>>&,
315
+ const at::TensorList&,
316
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
317
+ int64_t,
318
+ int64_t)>();
319
+ return op.call(
320
+ outputTensors,
321
+ inputTensors,
322
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
323
+ opts.rootRank,
324
+ opts.timeout.count());
325
+ }
326
+
327
+ virtual c10::intrusive_ptr<Work> scatter(
328
+ std::vector<at::Tensor>& outputTensors,
329
+ std::vector<std::vector<at::Tensor>>& inputTensors,
330
+ const ScatterOptions& opts = ScatterOptions()) {
331
+ static auto op =
332
+ c10::Dispatcher::singleton()
333
+ .findSchemaOrThrow("c10d::scatter_", "")
334
+ .typed<
335
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
336
+ const at::TensorList&,
337
+ const std::vector<std::vector<at::Tensor>>&,
338
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
339
+ int64_t,
340
+ bool,
341
+ int64_t)>();
342
+ return std::get<1>(op.call(
343
+ outputTensors,
344
+ inputTensors,
345
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
346
+ opts.rootRank,
347
+ opts.asyncOp,
348
+ opts.timeout.count()));
349
+ }
350
+
351
+ virtual c10::intrusive_ptr<Work> reduce_scatter(
352
+ std::vector<at::Tensor>& outputTensors,
353
+ std::vector<std::vector<at::Tensor>>& inputTensors,
354
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
355
+ static auto op =
356
+ c10::Dispatcher::singleton()
357
+ .findSchemaOrThrow("c10d::reduce_scatter_", "")
358
+ .typed<
359
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
360
+ const at::TensorList&,
361
+ const std::vector<std::vector<at::Tensor>>&,
362
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
363
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
364
+ int64_t)>();
365
+ return std::get<1>(op.call(
366
+ outputTensors,
367
+ inputTensors,
368
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
369
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
370
+ opts.timeout.count()));
371
+ }
372
+
373
+ virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
374
+ at::Tensor& outputBuffer,
375
+ at::Tensor& inputBuffer,
376
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
377
+ static auto op =
378
+ c10::Dispatcher::singleton()
379
+ .findSchemaOrThrow("c10d::_reduce_scatter_base_", "")
380
+ .typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
381
+ at::Tensor&,
382
+ at::Tensor&,
383
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
384
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
385
+ bool,
386
+ int64_t)>();
387
+ return std::get<1>(op.call(
388
+ outputBuffer,
389
+ inputBuffer,
390
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
391
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
392
+ opts.asyncOp,
393
+ opts.timeout.count()));
394
+ }
395
+
396
+ // This function is a coalesced version of `reduce_scatter_tensor` (currently
397
+ // still named as `_reduce_scatter_base`). Each tensor in the vector
398
+ // corresponds to an input/output of one `reduce_scatter_tensor` operation.
399
+ virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
400
+ std::vector<at::Tensor>& outputTensors,
401
+ std::vector<at::Tensor>& inputTensors,
402
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) {
403
+ static auto op =
404
+ c10::Dispatcher::singleton()
405
+ .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "")
406
+ .typed<c10::intrusive_ptr<Work>(
407
+ const at::TensorList,
408
+ const at::TensorList,
409
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
410
+ const c10::intrusive_ptr<::c10d::ReduceOp>&,
411
+ int64_t)>();
412
+
413
+ return op.call(
414
+ outputTensors,
415
+ inputTensors,
416
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
417
+ c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
418
+ opts.timeout.count());
419
+ }
420
+
421
+ virtual c10::intrusive_ptr<Work> alltoall_base(
422
+ at::Tensor& outputBuffer,
423
+ at::Tensor& inputBuffer,
424
+ std::vector<int64_t>& outputSplitSizes,
425
+ std::vector<int64_t>& inputSplitSizes,
426
+ const AllToAllOptions& opts = AllToAllOptions()) {
427
+ static auto op = c10::Dispatcher::singleton()
428
+ .findSchemaOrThrow("c10d::alltoall_base_", "")
429
+ .typed<c10::intrusive_ptr<::c10d::Work>(
430
+ at::Tensor&,
431
+ at::Tensor&,
432
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
433
+ std::vector<int64_t>,
434
+ std::vector<int64_t>,
435
+ int64_t)>();
436
+ return op.call(
437
+ outputBuffer,
438
+ inputBuffer,
439
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
440
+ outputSplitSizes,
441
+ inputSplitSizes,
442
+ opts.timeout.count());
443
+ }
444
+
445
+ virtual c10::intrusive_ptr<Work> alltoall(
446
+ std::vector<at::Tensor>& outputTensors,
447
+ std::vector<at::Tensor>& inputTensors,
448
+ const AllToAllOptions& opts = AllToAllOptions()) {
449
+ static auto op =
450
+ c10::Dispatcher::singleton()
451
+ .findSchemaOrThrow("c10d::alltoall_", "")
452
+ .typed<
453
+ std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
454
+ const at::TensorList&,
455
+ const at::TensorList&,
456
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
457
+ int64_t)>();
458
+ return std::get<1>(op.call(
459
+ outputTensors,
460
+ inputTensors,
461
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
462
+ opts.timeout.count()));
463
+ }
464
+
465
+ virtual void monitoredBarrier(
466
+ const BarrierOptions& opts,
467
+ bool wait_all_ranks = false) {
468
+ static auto op = c10::Dispatcher::singleton()
469
+ .findSchemaOrThrow("c10d::monitored_barrier_", "")
470
+ .typed<void(
471
+ at::Tensor,
472
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
473
+ const std::vector<int64_t>&,
474
+ int64_t,
475
+ bool)>();
476
+ // Default to using cpu implementation, monitored barrier is only for GLOO
477
+ at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU));
478
+ op.call(
479
+ tensor,
480
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
481
+ opts.device_ids,
482
+ opts.timeout.count(),
483
+ wait_all_ranks);
484
+ }
485
+
486
+ // Agrees on an initial sequence number for the whole group by having rank 0
487
+ // create it and broadcast it to other ranks using the store. Only implemented
488
+ // for GLOO and NCCL backends currently.
489
+ virtual void setSequenceNumberForGroup() {
490
+ auto backendType = getBackendType();
491
+ // TODO: HACK for backend name to get sequence number for that backend.
492
+ if (backendType == ProcessGroup::BackendType::GLOO ||
493
+ backendType == ProcessGroup::BackendType::NCCL ||
494
+ backendType == ProcessGroup::BackendType::UCC) {
495
+ getDefaultBackend()->setSequenceNumberForGroup();
496
+ } else {
497
+ TORCH_CHECK(
498
+ false,
499
+ c10::str(
500
+ "ProcessGroup ",
501
+ getBackendName(),
502
+ " does not yet support sequence numbers."));
503
+ }
504
+ }
505
+
506
+ // Retrieves the current sequence number for the whole group, which should be
507
+ // in sync. If the returned number is not consistent across the group, it
508
+ // may indicate that there is some sort of collective desynchronization.
509
+ virtual uint64_t getSequenceNumberForGroup() {
510
+ auto backendType = getBackendType();
511
+
512
+ // TODO: HACK for backend name to get sequence number for that backend.
513
+ if (backendType == ProcessGroup::BackendType::GLOO ||
514
+ backendType == ProcessGroup::BackendType::NCCL ||
515
+ backendType == ProcessGroup::BackendType::UCC) {
516
+ return getDefaultBackend()->getSequenceNumberForGroup();
517
+ } else {
518
+ TORCH_CHECK(
519
+ false,
520
+ c10::str(
521
+ "ProcessGroup ",
522
+ getBackendName(),
523
+ " does not yet support sequence numbers."));
524
+ }
525
+ }
526
+
527
+ virtual c10::intrusive_ptr<Work> send(
528
+ std::vector<at::Tensor>& tensors,
529
+ int dstRank,
530
+ int tag) {
531
+ static auto op = c10::Dispatcher::singleton()
532
+ .findSchemaOrThrow("c10d::send", "")
533
+ .typed<c10::intrusive_ptr<::c10d::Work>(
534
+ at::TensorList,
535
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
536
+ int64_t,
537
+ int64_t)>();
538
+ return op.call(
539
+ tensors,
540
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
541
+ dstRank,
542
+ tag);
543
+ }
544
+
545
+ virtual c10::intrusive_ptr<Work> recv(
546
+ std::vector<at::Tensor>& tensors,
547
+ int srcRank,
548
+ int tag) {
549
+ static auto op = c10::Dispatcher::singleton()
550
+ .findSchemaOrThrow("c10d::recv_", "")
551
+ .typed<c10::intrusive_ptr<::c10d::Work>(
552
+ at::TensorList,
553
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
554
+ int64_t,
555
+ int64_t)>();
556
+ return op.call(
557
+ tensors,
558
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
559
+ srcRank,
560
+ tag);
561
+ }
562
+
563
+ virtual c10::intrusive_ptr<Work> recvAnysource(
564
+ std::vector<at::Tensor>& tensors,
565
+ int tag) {
566
+ static auto op = c10::Dispatcher::singleton()
567
+ .findSchemaOrThrow("c10d::recv_any_source_", "")
568
+ .typed<c10::intrusive_ptr<::c10d::Work>(
569
+ at::TensorList,
570
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
571
+ int64_t)>();
572
+ return op.call(
573
+ tensors,
574
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
575
+ tag);
576
+ }
577
+
578
+ virtual c10::intrusive_ptr<Work> barrier(
579
+ const BarrierOptions& opts = BarrierOptions()) {
580
+ static at::Tensor tensor;
581
+ // TODO: if nccl was specified then use it
582
+ auto device = opts.device;
583
+ if (device.has_value()) {
584
+ // set device tensor from argument
585
+ tensor = at::empty(
586
+ {1}, at::TensorOptions().device(device.value()).dtype(at::kByte));
587
+ } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) {
588
+ // set cuda tensor
589
+ tensor = at::empty(
590
+ {1},
591
+ at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte));
592
+ } else {
593
+ // Default to using cpu implementation
594
+ tensor = at::empty(
595
+ {1},
596
+ at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte));
597
+ }
598
+
599
+ static auto op = c10::Dispatcher::singleton()
600
+ .findSchemaOrThrow("c10d::barrier", "")
601
+ .typed<c10::intrusive_ptr<::c10d::Work>(
602
+ at::Tensor,
603
+ const c10::intrusive_ptr<::c10d::ProcessGroup>&,
604
+ const std::vector<int64_t>&,
605
+ int64_t)>();
606
+
607
+ return op.call(
608
+ tensor,
609
+ c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
610
+ opts.device_ids,
611
+ opts.timeout.count());
612
+ }
613
+
614
+ c10::intrusive_ptr<Options> getOptions() {
615
+ return options_;
616
+ }
617
+
618
+ bool hasBackends() {
619
+ return !deviceTypeToBackendType_.empty();
620
+ }
621
+
622
+ void setBackend(
623
+ c10::DeviceType deviceType,
624
+ BackendType backendType,
625
+ const c10::optional<c10::intrusive_ptr<Backend>>& backend) {
626
+ // TODO: should we add these entries after the backend setting succeeds?
627
+ deviceTypeToBackendType_[deviceType] = backendType;
628
+ deviceTypes_.insert(deviceType);
629
+ // if the backendType is already set then reuse it for this device
630
+ if (backendTypeToBackend_.find(backendType) !=
631
+ backendTypeToBackend_.end()) {
632
+ auto existingBackend = backendTypeToBackend_.at(backendType);
633
+ deviceTypeToBackend_[deviceType] = existingBackend;
634
+ } else {
635
+ // check if backend has value
636
+ if (backend.has_value()) {
637
+ deviceTypeToBackend_[deviceType] = backend.value();
638
+ backendTypeToBackend_[backendType] = backend.value();
639
+ }
640
+ }
641
+ }
642
+
643
+ c10::intrusive_ptr<Backend> getDefaultBackend() const {
644
+ TORCH_CHECK(
645
+ backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(),
646
+ "Could not find the default backend type ",
647
+ backendType_,
648
+ " for Process Group with name ",
649
+ getBackendName(),
650
+ ".");
651
+ return backendTypeToBackend_.at(backendType_);
652
+ }
653
+
654
+ c10::intrusive_ptr<Backend> getBackend(c10::DeviceType deviceType);
655
+
656
+ c10::intrusive_ptr<Backend> getBackend(BackendType backendType) const {
657
+ TORCH_CHECK(
658
+ backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(),
659
+ "Could not find backend type ",
660
+ backendType,
661
+ ".");
662
+ return backendTypeToBackend_.at(backendType);
663
+ }
664
+
665
+ // Return device types supported by this ProcessGroup.
666
+ // Note: the return type is `Device` rather than `DeviceType` for the purpose
667
+ // of easy comparison at Python level. The `Device` will have default index
668
+ // (-1).
669
+ std::vector<c10::Device> getDeviceTypes() const {
670
+ std::vector<c10::Device> devices;
671
+ devices.reserve(deviceTypes_.size());
672
+ for (auto& dt : deviceTypes_) {
673
+ devices.push_back(c10::Device(dt));
674
+ }
675
+ return devices;
676
+ }
677
+
678
+ void registerOnCompletionHook(
679
+ std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
680
+ getDefaultBackend()->registerOnCompletionHook(std::move(hook));
681
+ }
682
+
683
+ void waitForPendingWorks() {
684
+ getDefaultBackend()->waitForPendingWorks();
685
+ }
686
+
687
+ bool hasHooks() const {
688
+ return getDefaultBackend()->hasHooks();
689
+ }
690
+
691
+ const std::string& getGroupName() const;
692
+ void setGroupName(const std::string& name);
693
+ void enableCollectivesTiming();
694
+
695
+ void release_resources() override;
696
+
697
+ protected:
698
+ // Implementations of this interface need to call this to setup
699
+ // appropriate logging etc.
700
+ void init();
701
+
702
+ c10::intrusive_ptr<c10d::Store> store_;
703
+ const int rank_;
704
+ const int size_;
705
+ const c10::intrusive_ptr<Options> options_;
706
+ const BackendType backendType_;
707
+
708
+ // Debug level setting. It is parsed once when ProcessGroup is constructed and
709
+ // remains the same across use of this process group.
710
+ DebugLevel dist_debug_level_;
711
+
712
+ // Backend classes for this ProcessGroup
713
+ std::unordered_set<c10::DeviceType> deviceTypes_;
714
+ std::unordered_map<c10::DeviceType, BackendType> deviceTypeToBackendType_;
715
+ std::unordered_map<c10::DeviceType, c10::intrusive_ptr<Backend>>
716
+ deviceTypeToBackend_;
717
+ std::unordered_map<BackendType, c10::intrusive_ptr<Backend>>
718
+ backendTypeToBackend_;
719
+ };
720
+
721
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_GLOO
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <mutex>
8
+ #include <thread>
9
+ #include <unordered_map>
10
+ #include <vector>
11
+
12
+ #include <gloo/algorithm.h>
13
+ #include <gloo/common/error.h>
14
+ #include <gloo/context.h>
15
+ #include <gloo/rendezvous/store.h>
16
+ #include <gloo/transport/device.h>
17
+
18
+ #include <c10/util/hash.h>
19
+
20
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
21
+ #include <torch/csrc/distributed/c10d/Store.hpp>
22
+ #include <torch/csrc/distributed/c10d/Types.hpp>
23
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
24
+
25
+ namespace c10d {
26
+
27
+ constexpr const char* GLOO_BACKEND_NAME = "gloo";
28
+
29
+ // ProcessGroupGloo implements Gloo bindings for c10d.
30
+ //
31
+ // All functions on this class are expected to be called in the same
32
+ // order across processes in the group. This is the only way that we
33
+ // can guarantee to match up the same calls across processes. For
34
+ // multi-threaded usage of process groups, you can use consider using
35
+ // multiple process group instances.
36
+ //
37
+ // The Gloo algorithms that this class calls into are cached by their
38
+ // signature (see description of AlgorithmKey above). This cache works
39
+ // as follows: every function call instantiates an AlgorithmKey and
40
+ // looks in the cache for existing entries. If there is one, it is
41
+ // removed from the cache and returned to the caller. If there are
42
+ // none, a new entry is created and returned. If an entry was created
43
+ // before, but is still in use, the call will block and wait until the
44
+ // entry is returned to the cache.
45
+ //
46
+ // In the future, we hope to extend this to allow multiple entries per
47
+ // key, to enable parallelism for a single key. The number of entries
48
+ // per key must always be identical for all processes. This maximum
49
+ // number can be automatically tuned, but only if we let a single
50
+ // process take charge, and have it broadcast the limits.
51
+ //
52
+ class TORCH_API ProcessGroupGloo : public Backend {
53
+ public:
54
+ // AsyncWork is the Gloo specific superclass for asynchronous work items.
55
+ // We can split asynchronous work into 3 phases:
56
+ // 1) Sanity checks and prepare input (e.g. memcpy)
57
+ // 2) Run operation on background thread
58
+ // 3) Synchronize with completion on foreground thread
59
+ //
60
+ // There is state to be shared between these 3 phases and all of this state
61
+ // is captured in the AsyncWork class and its derivatives.
62
+ //
63
+ // Note: while we are porting operations to use new style collectives, there
64
+ // is a split between operations using the existing caching approach and
65
+ // operations using the new AsyncWork base class. Over time we will port
66
+ // all operations and perform needed cleanup.
67
+ //
68
+ // FIXME: This probably should be called WorkGloo since the work is executed
69
+ // in sync mode by a background thread.
70
+ class TORCH_API AsyncWork : public Work {
71
+ public:
72
+ explicit AsyncWork(
73
+ std::vector<std::vector<at::Tensor>> outputTensors,
74
+ OpType opType,
75
+ uint64_t seq,
76
+ const char* profilingTitle = nullptr,
77
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
78
+ c10::nullopt);
79
+
80
+ ~AsyncWork() override = default;
81
+
82
+ static void execute(c10::intrusive_ptr<AsyncWork> work);
83
+
84
+ virtual void run() = 0;
85
+
86
+ std::vector<at::Tensor> result() override;
87
+
88
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
89
+ uint64_t getSequencenumber() const override;
90
+
91
+ protected:
92
+ friend class ProcessGroupGloo;
93
+
94
+ private:
95
+ void finishWorkGloo();
96
+ void finishWorkGlooError(std::exception_ptr eptr);
97
+ inline void recordAsyncWorkProfilingInfo(
98
+ const char* profilingTitle,
99
+ const c10::optional<std::vector<at::Tensor>>& inputTensors);
100
+
101
+ const std::vector<std::vector<at::Tensor>> outputTensors_;
102
+ c10::intrusive_ptr<at::ivalue::Future> future_;
103
+ std::function<void()> recordFunctionBeforeCallback_;
104
+ const uint64_t seq_;
105
+ };
106
+
107
+ // Wrap c10d store as Gloo store
108
+ class TORCH_API GlooStore : public ::gloo::rendezvous::Store {
109
+ public:
110
+ GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {}
111
+
112
+ void setUint(const std::string& key, const std::vector<uint8_t>& value) {
113
+ store_->set(key, value);
114
+ }
115
+
116
+ void set(const std::string& key, const std::vector<char>& value) override {
117
+ std::vector<uint8_t> tmp(value.begin(), value.end());
118
+ store_->set(key, tmp);
119
+ }
120
+
121
+ std::vector<uint8_t> getUint(const std::string& key) {
122
+ auto value = store_->get(key);
123
+ return value;
124
+ }
125
+
126
+ std::vector<char> get(const std::string& key) override {
127
+ auto value = store_->get(key);
128
+ return std::vector<char>(value.begin(), value.end());
129
+ }
130
+
131
+ void wait(const std::vector<std::string>& keys) override {
132
+ store_->wait(keys, ::c10d::Store::kDefaultTimeout);
133
+ }
134
+
135
+ void wait(
136
+ const std::vector<std::string>& keys,
137
+ const std::chrono::milliseconds& timeout) override {
138
+ store_->wait(keys, timeout);
139
+ }
140
+
141
+ #ifdef GLOO_STORE_HAS_STORE_V2
142
+ bool has_v2_support() override {
143
+ return store_->hasExtendedApi();
144
+ }
145
+
146
+ std::vector<std::vector<char>> multi_get(
147
+ const std::vector<std::string>& keys) override {
148
+ std::vector<std::vector<char>> res;
149
+ for (auto& value : store_->multiGet(keys)) {
150
+ res.emplace_back(std::vector<char>(value.begin(), value.end()));
151
+ }
152
+ return res;
153
+ }
154
+
155
+ void multi_set(
156
+ const std::vector<std::string>& keys,
157
+ const std::vector<std::vector<char>>& values) override {
158
+ std::vector<std::vector<uint8_t>> u_values;
159
+ for (auto& value : values) {
160
+ u_values.emplace_back(std::vector<uint8_t>(value.begin(), value.end()));
161
+ }
162
+ store_->multiSet(keys, u_values);
163
+ }
164
+
165
+ void append(const std::string& key, const std::vector<char>& value)
166
+ override {
167
+ std::vector<uint8_t> tmp(value.begin(), value.end());
168
+ return store_->append(key, tmp);
169
+ }
170
+
171
+ int64_t add(const std::string& key, int64_t value) override {
172
+ return store_->add(key, value);
173
+ }
174
+ #endif
175
+
176
+ protected:
177
+ c10::intrusive_ptr<::c10d::Store> store_;
178
+ };
179
+
180
+ // For send and recv operations there is no need to pass them to the
181
+ // thread pool as they are entirely completed by the device thread.
182
+ // This work object is used to synchronize completion of the send or
183
+ // recv operation. It keeps a reference to the tensor it is
184
+ // operating on to prevent it from being deallocated while the
185
+ // operation is still in flight.
186
+ class TORCH_API SendWork : public Work {
187
+ public:
188
+ explicit SendWork(
189
+ at::Tensor& tensor,
190
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
191
+ uint64_t seq);
192
+
193
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
194
+
195
+ void abort() override;
196
+
197
+ uint64_t getSequencenumber() const override;
198
+
199
+ protected:
200
+ at::Tensor tensor_;
201
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
202
+ const uint64_t seq_;
203
+ };
204
+
205
+ class TORCH_API RecvWork : public Work {
206
+ public:
207
+ explicit RecvWork(
208
+ at::Tensor& tensor,
209
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
210
+ OpType opType,
211
+ uint64_t seq,
212
+ const char* profilingTitle = nullptr);
213
+
214
+ int sourceRank() const override;
215
+
216
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
217
+
218
+ void abort() override;
219
+
220
+ uint64_t getSequencenumber() const override;
221
+
222
+ protected:
223
+ at::Tensor tensor_;
224
+ std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
225
+ int srcRank_;
226
+ const uint64_t seq_;
227
+ };
228
+
229
+ struct TORCH_API Options : public Backend::Options {
230
+ explicit Options(
231
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout);
232
+
233
+ // return intrusive_ptr of the object
234
+ static c10::intrusive_ptr<Options> create(
235
+ std::chrono::milliseconds timeout = kBackendDefaultTimeout) {
236
+ return c10::make_intrusive<Options>(timeout);
237
+ }
238
+
239
+ std::vector<std::shared_ptr<::gloo::transport::Device>> devices;
240
+ int threads;
241
+ };
242
+
243
+ const std::string getBackendName() const override {
244
+ return std::string(GLOO_BACKEND_NAME);
245
+ }
246
+
247
+ // Helper functions to create a new device object.
248
+ // They are static functions on this class to keep them logically
249
+ // separate from the rest of the code base (e.g. torch/csrc/distributed).
250
+
251
+ // Create new device instance for specific interface.
252
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
253
+ const std::string& interface);
254
+
255
+ // Create new device instance for specific hostname or address.
256
+ static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
257
+ const std::string& hostname);
258
+
259
+ // Create new device instance.
260
+ // It tries to resolve this machine's hostname and bind to that address.
261
+ // If that fails (i.e. the hostname doesn't resolve to an address), it
262
+ // falls back to binding to the loopback address.
263
+ static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();
264
+
265
+ // Create ProcessGroupGloo instance.
266
+ static c10::intrusive_ptr<ProcessGroupGloo> createProcessGroupGloo(
267
+ const c10::intrusive_ptr<Store>& store,
268
+ int rank,
269
+ int size,
270
+ std::chrono::milliseconds timeout);
271
+
272
+ explicit ProcessGroupGloo(
273
+ const c10::intrusive_ptr<Store>& store,
274
+ int rank,
275
+ int size,
276
+ c10::intrusive_ptr<Options> options = Options::create());
277
+
278
+ ~ProcessGroupGloo() override;
279
+
280
+ c10::intrusive_ptr<Options> getOptions() {
281
+ return options_;
282
+ }
283
+
284
+ c10::intrusive_ptr<Work> broadcast(
285
+ std::vector<at::Tensor>& tensors,
286
+ const BroadcastOptions& opts = BroadcastOptions()) override;
287
+
288
+ c10::intrusive_ptr<Work> allreduce(
289
+ std::vector<at::Tensor>& tensors,
290
+ const AllreduceOptions& opts = AllreduceOptions()) override;
291
+
292
+ c10::intrusive_ptr<Work> allreduce_sparse(
293
+ std::vector<at::Tensor>& tensors,
294
+ const AllreduceOptions& opts = AllreduceOptions()) override;
295
+
296
+ c10::intrusive_ptr<Work> allreduce_coalesced(
297
+ std::vector<at::Tensor>& tensors,
298
+ const AllreduceCoalescedOptions& opts =
299
+ AllreduceCoalescedOptions()) override;
300
+
301
+ c10::intrusive_ptr<Work> reduce(
302
+ std::vector<at::Tensor>& tensors,
303
+ const ReduceOptions& opts = ReduceOptions()) override;
304
+
305
+ c10::intrusive_ptr<Work> _reduce_scatter_base(
306
+ at::Tensor& outputTensor,
307
+ at::Tensor& inputTensor,
308
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
309
+
310
+ c10::intrusive_ptr<Work> _allgather_base(
311
+ at::Tensor& output_tensor,
312
+ at::Tensor& input_tensor,
313
+ const AllgatherOptions& opts = AllgatherOptions()) override;
314
+
315
+ c10::intrusive_ptr<Work> allgather(
316
+ std::vector<std::vector<at::Tensor>>& outputs,
317
+ std::vector<at::Tensor>& inputs,
318
+ const AllgatherOptions& opts = AllgatherOptions()) override;
319
+
320
+ c10::intrusive_ptr<Work> allgather_coalesced(
321
+ std::vector<std::vector<at::Tensor>>& output_lists,
322
+ std::vector<at::Tensor>& input_list,
323
+ const AllgatherOptions& opts = AllgatherOptions()) override;
324
+
325
+ c10::intrusive_ptr<Work> gather(
326
+ std::vector<std::vector<at::Tensor>>& outputs,
327
+ std::vector<at::Tensor>& inputs,
328
+ const GatherOptions& opts = GatherOptions()) override;
329
+
330
+ c10::intrusive_ptr<Work> scatter(
331
+ std::vector<at::Tensor>& outputs,
332
+ std::vector<std::vector<at::Tensor>>& inputs,
333
+ const ScatterOptions& opts = ScatterOptions()) override;
334
+
335
+ c10::intrusive_ptr<Work> reduce_scatter(
336
+ std::vector<at::Tensor>& outputs,
337
+ std::vector<std::vector<at::Tensor>>& inputs,
338
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
339
+
340
+ c10::intrusive_ptr<Work> alltoall_base(
341
+ at::Tensor& outputTensor,
342
+ at::Tensor& inputTensor,
343
+ std::vector<int64_t>& outputCounts,
344
+ std::vector<int64_t>& inputCounts,
345
+ const AllToAllOptions& opts = AllToAllOptions()) override;
346
+
347
+ c10::intrusive_ptr<Work> send(
348
+ std::vector<at::Tensor>& tensors,
349
+ int dstRank,
350
+ int tag) override;
351
+
352
+ c10::intrusive_ptr<Work> recv(
353
+ std::vector<at::Tensor>& tensors,
354
+ int srcRank,
355
+ int tag) override;
356
+
357
+ c10::intrusive_ptr<Work> recvAnysource(
358
+ std::vector<at::Tensor>& tensors,
359
+ int tag) override;
360
+
361
+ c10::intrusive_ptr<Work> barrier(
362
+ const BarrierOptions& opts = BarrierOptions()) override;
363
+
364
+ void enableCollectivesTiming() override;
365
+
366
+ const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const {
367
+ return store_;
368
+ }
369
+
370
+ // Similar to barrier(), but blocks rank 0 until all other ranks have
371
+ // acknowledged that they are alive (through send/recv from rank 0). Rank 0
372
+ // is able to report all failed ranks if waitAllRanks = true, otherwise
373
+ // reports the first rank it detected as failed.
374
+ void monitoredBarrier(
375
+ const BarrierOptions& opts = BarrierOptions(),
376
+ bool waitAllRanks = false) override;
377
+
378
+ // Agrees on an initial sequence number for the whole group by having rank 0
379
+ // create it and broadcast it to other ranks using the store.
380
+ void setSequenceNumberForGroup() override;
381
+
382
+ // Retrieves the current sequence number for the whole group, which should be
383
+ // in sync. If the returned number is not consistent across the group, it
384
+ // may indicate that there is some sort of collective desynchronization.
385
+ uint64_t getSequenceNumberForGroup() override;
386
+
387
+ int getNumThreads() {
388
+ return options_->threads;
389
+ }
390
+
391
+ protected:
392
+ std::unique_ptr<::gloo::rendezvous::Store> store_;
393
+ const c10::intrusive_ptr<Options> options_;
394
+
395
+ // Every Gloo context represents a set of connections to its peers.
396
+ // In order to use more than one device (or allow for parallelism on
397
+ // a single device), you need multiple contexts.
398
+ std::vector<std::shared_ptr<::gloo::Context>> contexts_;
399
+ std::vector<std::thread> threads_;
400
+ bool stop_;
401
+
402
+ // Incremented for every collective we kick off.
403
+ // The value is used as tag for collective operations. Collectives are kicked
404
+ // off in identical order across processes. Therefore the tag can be used
405
+ // to match up operations during concurrent execution.
406
+ uint32_t collectiveCounter_;
407
+
408
+ // Returns next collective tag to use (uses collectiveCounter_).
409
+ uint32_t nextTag();
410
+
411
+ // Returns the context to use for the specified tag.
412
+ // With `nextTag` returning an increasing number, this should lead
413
+ // to contexts being used in a round-robin fashion.
414
+ std::shared_ptr<::gloo::Context> getContext(uint32_t tag);
415
+
416
+ // Entrypoint for worker threads.
417
+ void runLoop(int workerIndex);
418
+
419
+ // Queue work to run on worker thread.
420
+ void enqueue(c10::intrusive_ptr<AsyncWork> work);
421
+
422
+ // Keep both a queue of pending work, and a vector with in progress work.
423
+ // Both of these can only be mutated when holding the queue lock.
424
+ // We keep both around instead of just the queue, so we can grab a weak_ptr
425
+ // to all in progress and pending work when executing a barrier.
426
+ // When executing a barrier, we need to ensure that all prior work
427
+ // has completed before completing itself.
428
+ std::deque<c10::intrusive_ptr<AsyncWork>> workQueue_;
429
+ std::vector<c10::intrusive_ptr<AsyncWork>> workInProgress_;
430
+ std::mutex workMutex_;
431
+ std::condition_variable workProduceCV_;
432
+ std::condition_variable workConsumeCV_;
433
+ uint64_t seq_{0};
434
+ };
435
+
436
+ } // namespace c10d
437
+
438
+ #endif // USE_C10D_GLOO
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_MPI
4
+
5
+ #include <condition_variable>
6
+ #include <deque>
7
+ #include <exception>
8
+ #include <memory>
9
+ #include <mutex>
10
+ #include <thread>
11
+ #include <vector>
12
+
13
+ #include <ATen/core/ivalue.h>
14
+ #include <ATen/core/ivalue_inl.h>
15
+
16
+ #include <torch/csrc/distributed/c10d/Backend.hpp>
17
+ #include <torch/csrc/distributed/c10d/Types.hpp>
18
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
19
+
20
+ #include <c10/util/CallOnce.h>
21
+
22
+ #include <mpi.h>
23
+
24
+ namespace c10d {
25
+
26
+ constexpr const char* MPI_BACKEND_NAME = "mpi";
27
+
28
+ // WorkEntry is the state associated with a single MPI run instance.
29
+ // It include the source Tensor list and destination Tensor list, as well as
30
+ // The actual run function that will operate either on src or dst or both.
31
+ struct WorkEntry {
32
+ explicit WorkEntry(
33
+ std::vector<at::Tensor>* srcPtr,
34
+ std::vector<at::Tensor>* dstPtr,
35
+ std::function<void(std::unique_ptr<WorkEntry>&)> run)
36
+ : dst(dstPtr ? *dstPtr : std::vector<at::Tensor>()), run(std::move(run)) {
37
+ if (srcPtr) {
38
+ src = *srcPtr;
39
+ }
40
+ }
41
+
42
+ // Not copyable
43
+ WorkEntry(const WorkEntry&) = delete;
44
+ // Not copy assignable
45
+ WorkEntry& operator=(const WorkEntry&) = delete;
46
+
47
+ // For input and output tensors (in-place), we will always use src
48
+ std::vector<at::Tensor> src;
49
+
50
+ // Copy of user provided outputs.
51
+ const std::vector<at::Tensor> dst;
52
+
53
+ // src rank returned, for recv only
54
+ int* srcRank = nullptr;
55
+ std::function<void(std::unique_ptr<WorkEntry>&)> run;
56
+ };
57
+
58
+ // ProcessGroupMPI implements MPI bindings for c10d.
59
+ //
60
+ // All functions on this class are expected to be called in the same
61
+ // order across processes in the group. This is the only way that we
62
+ // can guarantee to match up the same calls across processes.
63
+ //
64
+ // All MPI functions provided by this class is asynchronously scheduled on a
65
+ // Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation
66
+ // that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED.
67
+ // That is, The process may be multi-threaded, and multiple threads may make
68
+ // MPI calls, but only one at a time: MPI calls are not made concurrently from
69
+ // two distinct threads (all MPI calls are serialized). However, with
70
+ // MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process
71
+ // group. In other words, no more than 1 process group can be created globally.
72
+ //
73
+ // If you would like to use multiple ProcessGroupMPI, it requires your MPI
74
+ // implementation to have a thread support value of MPI_THREAD_MULTIPLE, that
75
+ // is, multiple threads may call MPI, with no restriction.
76
+ //
77
+ // Also note that ProcessGroupMPI only supports a single Tensor operation. In
78
+ // other words, the size of the input Tensor vector should always be 1.
79
+ //
80
+ // CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and
81
+ // ProcessGroupMPI will automatically detect this support.
82
+ class TORCH_API ProcessGroupMPI : public Backend {
83
+ public:
84
+ class WorkMPI : public Work {
85
+ public:
86
+ explicit WorkMPI(
87
+ std::vector<at::Tensor> outputTensors,
88
+ const char* profilingTitle = nullptr,
89
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
90
+ c10::nullopt)
91
+ : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors),
92
+ outputTensors_(std::move(outputTensors)),
93
+ future_(c10::make_intrusive<at::ivalue::Future>(
94
+ c10::ListType::create(c10::TensorType::get()))) {}
95
+
96
+ std::vector<at::Tensor> result() override;
97
+
98
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
99
+
100
+ protected:
101
+ friend class ProcessGroupMPI;
102
+
103
+ private:
104
+ void finishWorkMPI();
105
+ void finishWorkMPIError(std::exception_ptr eptr);
106
+
107
+ std::vector<at::Tensor> outputTensors_;
108
+ c10::intrusive_ptr<at::ivalue::Future> future_;
109
+ };
110
+
111
+ class AsyncWork : public Work {
112
+ public:
113
+ AsyncWork(
114
+ MPI_Request request,
115
+ std::vector<at::Tensor> outputTensors,
116
+ const char* profilingTitle = nullptr,
117
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
118
+ c10::nullopt);
119
+
120
+ ~AsyncWork() override;
121
+
122
+ bool isCompleted() override;
123
+
124
+ bool isSuccess() const override;
125
+
126
+ int sourceRank() const override;
127
+
128
+ bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
129
+
130
+ void abort() override;
131
+
132
+ std::vector<at::Tensor> result() override;
133
+
134
+ protected:
135
+ void populateException();
136
+
137
+ private:
138
+ const std::vector<at::Tensor> outputTensors_;
139
+ MPI_Request request_;
140
+ MPI_Status status_;
141
+ };
142
+
143
+ // Constructor will spawn up the worker thread loop
144
+ explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm);
145
+
146
+ ~ProcessGroupMPI() override;
147
+
148
+ // Abort the MPI program, needs to be called when exception is detected
149
+ void abort();
150
+
151
+ const std::string getBackendName() const override {
152
+ return std::string(MPI_BACKEND_NAME);
153
+ }
154
+
155
+ c10::intrusive_ptr<Work> broadcast(
156
+ std::vector<at::Tensor>& data,
157
+ const BroadcastOptions& opts = BroadcastOptions()) override;
158
+
159
+ c10::intrusive_ptr<Work> allreduce(
160
+ std::vector<at::Tensor>& tensors,
161
+ const AllreduceOptions& opts = AllreduceOptions()) override;
162
+
163
+ c10::intrusive_ptr<Work> allreduce_coalesced(
164
+ std::vector<at::Tensor>& tensors,
165
+ const AllreduceCoalescedOptions& opts =
166
+ AllreduceCoalescedOptions()) override;
167
+
168
+ c10::intrusive_ptr<Work> reduce(
169
+ std::vector<at::Tensor>& tensors,
170
+ const ReduceOptions& opts = ReduceOptions()) override;
171
+
172
+ c10::intrusive_ptr<Work> allgather(
173
+ std::vector<std::vector<at::Tensor>>& outputTensors,
174
+ std::vector<at::Tensor>& inputTensors,
175
+ const AllgatherOptions& opts = AllgatherOptions()) override;
176
+
177
+ c10::intrusive_ptr<Work> _allgather_base(
178
+ at::Tensor& outputbuffer,
179
+ at::Tensor& inputbuffer,
180
+ const AllgatherOptions& opts = AllgatherOptions()) override;
181
+
182
+ c10::intrusive_ptr<Work> allgather_coalesced(
183
+ std::vector<std::vector<at::Tensor>>& outputTensorLists,
184
+ std::vector<at::Tensor>& inputTensors,
185
+ const AllgatherOptions& opts = AllgatherOptions()) override;
186
+
187
+ c10::intrusive_ptr<Work> gather(
188
+ std::vector<std::vector<at::Tensor>>& outputTensors,
189
+ std::vector<at::Tensor>& inputTensors,
190
+ const GatherOptions& opts = GatherOptions()) override;
191
+
192
+ c10::intrusive_ptr<Work> scatter(
193
+ std::vector<at::Tensor>& outputTensors,
194
+ std::vector<std::vector<at::Tensor>>& inputTensors,
195
+ const ScatterOptions& opts = ScatterOptions()) override;
196
+
197
+ c10::intrusive_ptr<Work> reduce_scatter(
198
+ std::vector<at::Tensor>& outputTensors,
199
+ std::vector<std::vector<at::Tensor>>& inputTensors,
200
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
201
+
202
+ c10::intrusive_ptr<Work> alltoall_base(
203
+ at::Tensor& outputTensor,
204
+ at::Tensor& inputTensor,
205
+ std::vector<int64_t>& outputSplitSizes,
206
+ std::vector<int64_t>& inputSplitSizes,
207
+ const AllToAllOptions& opts = AllToAllOptions()) override;
208
+
209
+ c10::intrusive_ptr<Work> alltoall(
210
+ std::vector<at::Tensor>& outputTensors,
211
+ std::vector<at::Tensor>& inputTensors,
212
+ const AllToAllOptions& opts = AllToAllOptions()) override;
213
+
214
+ c10::intrusive_ptr<Work> send(
215
+ std::vector<at::Tensor>& tensors,
216
+ int dstRank,
217
+ int tag) override;
218
+
219
+ c10::intrusive_ptr<Work> recv(
220
+ std::vector<at::Tensor>& tensors,
221
+ int srcRank,
222
+ int tag) override;
223
+
224
+ c10::intrusive_ptr<Work> recvAnysource(
225
+ std::vector<at::Tensor>& tensor,
226
+ int tag) override;
227
+
228
+ c10::intrusive_ptr<Work> barrier(
229
+ const BarrierOptions& opts = BarrierOptions()) override;
230
+
231
+ // Creating a new ProcessGroupMPI, will initialize MPI if not initialized
232
+ static c10::intrusive_ptr<ProcessGroupMPI> createProcessGroupMPI(
233
+ std::vector<int> ranks = {});
234
+
235
+ protected:
236
+ using WorkType =
237
+ std::tuple<std::unique_ptr<WorkEntry>, c10::intrusive_ptr<WorkMPI>>;
238
+ // Worker thread loop
239
+ void runLoop();
240
+ // Helper function that is called by the destructor
241
+ void destroy();
242
+
243
+ c10::intrusive_ptr<Work> enqueue(
244
+ std::unique_ptr<WorkEntry> entry,
245
+ const char* profilingTitle = nullptr,
246
+ const c10::optional<std::vector<at::Tensor>>& inputTensors =
247
+ c10::nullopt);
248
+
249
+ bool stop_;
250
+
251
+ std::mutex pgMutex_;
252
+ std::thread workerThread_;
253
+
254
+ std::deque<WorkType> queue_;
255
+ std::condition_variable queueProduceCV_;
256
+ std::condition_variable queueConsumeCV_;
257
+
258
+ // Global states
259
+ static void initMPIOnce();
260
+ static void mpiExit();
261
+ static c10::once_flag onceFlagInitMPI;
262
+
263
+ static std::mutex pgGlobalMutex_;
264
+ static int mpiThreadSupport_;
265
+
266
+ MPI_Comm pgComm_;
267
+ };
268
+
269
+ } // namespace c10d
270
+
271
+ #endif // USE_C10D_MPI
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
4
+ #include <torch/csrc/jit/python/pybind_utils.h>
5
+ #include <torch/csrc/utils/pybind.h>
6
+
7
+ namespace c10d {
8
+
9
+ // PyProcessGroup is a pybind11 trampoline class to allow a Python
10
+ // class to inherit from torch.distributed.ProcessGroup
11
+ class PyProcessGroup : public ProcessGroup {
12
+ public:
13
+ // PyWork is a pybind11 trampoline class to allow a Python
14
+ // class to inherit from torch.distributed.Work
15
+ class PyWork : public Work {
16
+ public:
17
+ PyWork() = default;
18
+
19
+ bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
20
+ PYBIND11_OVERRIDE(
21
+ bool, /* Return type */
22
+ Work, /* Parent class */
23
+ wait, /* Name of function in C++ */
24
+ timeout);
25
+ }
26
+
27
+ c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
28
+ // We cannot use PYBIND11_OVERRIDE because:
29
+ // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and
30
+ // 2. The python name is get_future
31
+ pybind11::gil_scoped_acquire gil;
32
+ auto override =
33
+ pybind11::get_override(static_cast<const Work*>(this), "get_future");
34
+
35
+ if (override) {
36
+ py::object o = override();
37
+ auto futWrapper =
38
+ o.cast<std::shared_ptr<torch::jit::PythonFutureWrapper>>();
39
+ return futWrapper->fut;
40
+ }
41
+
42
+ return Work::getFuture();
43
+ }
44
+ };
45
+
46
+ using ProcessGroup::ProcessGroup;
47
+
48
+ const std::string getBackendName() const override {
49
+ PYBIND11_OVERRIDE_PURE(
50
+ std::string, /* Return type */
51
+ ProcessGroup, /* Parent class */
52
+ getBackendName, /* Name of function in C++ */
53
+ );
54
+ }
55
+
56
+ c10::intrusive_ptr<Work> allgather(
57
+ std::vector<std::vector<at::Tensor>>& outputTensors,
58
+ std::vector<at::Tensor>& inputTensors,
59
+ const AllgatherOptions& opts = AllgatherOptions()) override {
60
+ PYBIND11_OVERRIDE(
61
+ c10::intrusive_ptr<Work>, /* Return type */
62
+ ProcessGroup, /* Parent class */
63
+ allgather, /* Name of function in C++ */
64
+ outputTensors,
65
+ inputTensors,
66
+ opts);
67
+ }
68
+
69
+ c10::intrusive_ptr<Work> allreduce(
70
+ std::vector<at::Tensor>& tensors,
71
+ const AllreduceOptions& opts = AllreduceOptions()) override {
72
+ PYBIND11_OVERRIDE(
73
+ c10::intrusive_ptr<Work>, /* Return type */
74
+ ProcessGroup, /* Parent class */
75
+ allreduce, /* Name of function in C++ */
76
+ tensors,
77
+ opts);
78
+ }
79
+
80
+ c10::intrusive_ptr<Work> barrier(
81
+ const BarrierOptions& opts = BarrierOptions()) override {
82
+ PYBIND11_OVERRIDE(
83
+ c10::intrusive_ptr<Work>, /* Return type */
84
+ ProcessGroup, /* Parent class */
85
+ barrier, /* Name of function in C++ */
86
+ opts);
87
+ }
88
+
89
+ c10::intrusive_ptr<Work> broadcast(
90
+ std::vector<at::Tensor>& tensors,
91
+ const BroadcastOptions& opts = BroadcastOptions()) override {
92
+ PYBIND11_OVERRIDE(
93
+ c10::intrusive_ptr<Work>, /* Return type */
94
+ ProcessGroup, /* Parent class */
95
+ broadcast, /* Name of function in C++ */
96
+ tensors,
97
+ opts);
98
+ }
99
+
100
+ c10::intrusive_ptr<Work> reduce_scatter(
101
+ std::vector<at::Tensor>& outputTensors,
102
+ std::vector<std::vector<at::Tensor>>& inputTensors,
103
+ const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
104
+ PYBIND11_OVERRIDE(
105
+ c10::intrusive_ptr<Work>, /* Return type */
106
+ ProcessGroup, /* Parent class */
107
+ reduce_scatter, /* Name of function in C++ */
108
+ outputTensors,
109
+ inputTensors,
110
+ opts);
111
+ }
112
+
113
+ c10::intrusive_ptr<Work> send(
114
+ std::vector<at::Tensor>& tensors,
115
+ int dstRank,
116
+ int tag) override {
117
+ PYBIND11_OVERRIDE(
118
+ c10::intrusive_ptr<Work>, /* Return type */
119
+ ProcessGroup, /* Parent class */
120
+ send, /* Name of function in C++ */
121
+ tensors,
122
+ dstRank,
123
+ tag);
124
+ }
125
+
126
+ c10::intrusive_ptr<Work> recv(
127
+ std::vector<at::Tensor>& tensors,
128
+ int srcRank,
129
+ int tag) override {
130
+ PYBIND11_OVERRIDE(
131
+ c10::intrusive_ptr<Work>, /* Return type */
132
+ ProcessGroup, /* Parent class */
133
+ recv, /* Name of function in C++ */
134
+ tensors,
135
+ srcRank,
136
+ tag);
137
+ }
138
+ };
139
+
140
+ class TORCH_PYTHON_API PythonOnCompletionHook {
141
+ public:
142
+ // Wraps a py::object hook and acquires Python GIL in dtor before
143
+ // destructing the hook object.
144
+ PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {}
145
+
146
+ ~PythonOnCompletionHook() {
147
+ py::gil_scoped_acquire ag;
148
+ hook_.dec_ref();
149
+ // Explicitly set hook_ to nullptr to prevent py::object's dtor
150
+ // to decref on the PyObject again.
151
+ // See Note [Destructing py::object] in python_ivalue.h
152
+ hook_.ptr() = nullptr;
153
+ }
154
+
155
+ void operator()(std::shared_ptr<WorkInfo> workInfo) const {
156
+ std::exception_ptr eptr;
157
+ {
158
+ py::gil_scoped_acquire acquire;
159
+ try {
160
+ hook_(workInfo);
161
+ } catch (py::error_already_set& e) {
162
+ // py::error_already_set requires GIL to destruct, take
163
+ // special care.
164
+ eptr = std::make_exception_ptr(std::runtime_error(e.what()));
165
+ e.restore();
166
+ PyErr_Clear();
167
+ } catch (std::exception& e) {
168
+ eptr = std::current_exception();
169
+ }
170
+ }
171
+ // No more Python-related stuff at this point, i.e., this
172
+ // exception can be captured and handled by PG backend.
173
+ if (eptr)
174
+ std::rethrow_exception(eptr);
175
+ }
176
+
177
+ private:
178
+ py::object hook_;
179
+ };
180
+
181
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <chrono>
4
+ #include <cstdint>
5
+ #include <stdexcept>
6
+ #include <string>
7
+ #include <vector>
8
+
9
+ #include <c10/macros/Macros.h>
10
+ #include <torch/custom_class.h>
11
+
12
+ namespace c10d {
13
+
14
+ // callback function will be given arguments (optional<string> oldValue,
15
+ // optional<string> newValue)
16
+ using WatchKeyCallback =
17
+ std::function<void(c10::optional<std::string>, c10::optional<std::string>)>;
18
+
19
+ class TORCH_API Store : public torch::CustomClassHolder {
20
+ public:
21
+ static constexpr std::chrono::milliseconds kDefaultTimeout =
22
+ std::chrono::seconds(300);
23
+ static constexpr std::chrono::milliseconds kNoTimeout =
24
+ std::chrono::milliseconds::zero();
25
+
26
+ Store() : timeout_(kDefaultTimeout) {}
27
+
28
+ explicit Store(const std::chrono::milliseconds& timeout)
29
+ : timeout_(timeout) {}
30
+
31
+ Store(const Store&) = default;
32
+ Store(Store&&) noexcept = default;
33
+
34
+ ~Store() override = default;
35
+
36
+ void set(const std::string& key, const std::string& value);
37
+
38
+ virtual void set(
39
+ const std::string& key,
40
+ const std::vector<uint8_t>& value) = 0;
41
+
42
+ std::string compareSet(
43
+ const std::string& key,
44
+ const std::string& currentValue,
45
+ const std::string& newValue);
46
+
47
+ virtual std::vector<uint8_t> compareSet(
48
+ const std::string& key,
49
+ const std::vector<uint8_t>& currentValue,
50
+ const std::vector<uint8_t>& newValue) {
51
+ TORCH_INTERNAL_ASSERT(false, "Not implemented.");
52
+ }
53
+
54
+ std::string get_to_str(const std::string& key);
55
+
56
+ virtual std::vector<uint8_t> get(const std::string& key) = 0;
57
+
58
+ virtual int64_t add(const std::string& key, int64_t value) = 0;
59
+
60
+ virtual bool deleteKey(const std::string& key) = 0;
61
+
62
+ virtual bool check(const std::vector<std::string>& keys) = 0;
63
+
64
+ virtual int64_t getNumKeys() = 0;
65
+
66
+ virtual void wait(const std::vector<std::string>& keys) = 0;
67
+
68
+ virtual void wait(
69
+ const std::vector<std::string>& keys,
70
+ const std::chrono::milliseconds& timeout) = 0;
71
+
72
+ virtual const std::chrono::milliseconds& getTimeout() const noexcept;
73
+
74
+ virtual void setTimeout(const std::chrono::milliseconds& timeout);
75
+
76
+ // watchKey() is deprecated and no longer supported.
77
+ virtual void watchKey(
78
+ const std::string& /* unused */,
79
+ WatchKeyCallback /* unused */) {
80
+ TORCH_CHECK(false, "watchKey is deprecated, no implementation support it.");
81
+ }
82
+
83
+ virtual void append(
84
+ const std::string& key,
85
+ const std::vector<uint8_t>& value);
86
+
87
+ virtual std::vector<std::vector<uint8_t>> multiGet(
88
+ const std::vector<std::string>& keys);
89
+
90
+ virtual void multiSet(
91
+ const std::vector<std::string>& keys,
92
+ const std::vector<std::vector<uint8_t>>& values);
93
+
94
+ // Returns true if this store support append, multiGet and multiSet
95
+ virtual bool hasExtendedApi() const;
96
+
97
+ protected:
98
+ std::chrono::milliseconds timeout_;
99
+ };
100
+
101
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Types.hpp ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Store.hpp>
4
+
5
+ #include <chrono>
6
+ #include <cstdint>
7
+
8
+ #include <ATen/core/Tensor.h>
9
+ #include <ATen/core/ivalue.h>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/intrusive_ptr.h>
13
+
14
+ namespace c10d {
15
+
16
+ // Base class for supplementary data potentially needed by ReduceOps
17
+ struct TORCH_API _SupplementBase : torch::CustomClassHolder {
18
+ ~_SupplementBase() override = default;
19
+ };
20
+
21
+ // Supplementary data specific to NCCL PREMUL_SUM
22
+ // The point of use in ProcessGroupNCCL knows how to unpack it.
23
+ struct NCCLPreMulSumSupplement : _SupplementBase {
24
+ double double_factor{0.0};
25
+ at::Tensor tensor_factor;
26
+ NCCLPreMulSumSupplement(double f) : double_factor{f} {}
27
+ NCCLPreMulSumSupplement(at::Tensor t) : tensor_factor{std::move(t)} {
28
+ TORCH_CHECK_EQ(tensor_factor.numel(), 1);
29
+ }
30
+ };
31
+
32
+ // Other ReduceOps that need different supplementary data can also
33
+ // derive from _SupplementBase.
34
+ struct TORCH_API ReduceOp : torch::CustomClassHolder {
35
+ // note(crcrpar): RedOpType could be defined outside of `ReduceOp`
36
+ enum RedOpType : uint8_t {
37
+ SUM = 0,
38
+ AVG = 1,
39
+ PRODUCT = 2,
40
+ MIN = 3,
41
+ MAX = 4,
42
+ BAND = 5, // Bitwise AND
43
+ BOR = 6, // Bitwise OR
44
+ BXOR = 7, // Bitwise XOR
45
+ PREMUL_SUM = 8, // Multiply by a user-supplied constant before summing.
46
+ UNUSED = 9
47
+ };
48
+
49
+ ReduceOp() = default;
50
+
51
+ ReduceOp(RedOpType op) : op_(op) {
52
+ TORCH_INTERNAL_ASSERT(
53
+ op_ != PREMUL_SUM,
54
+ "Use `torch.distributed._make_nccl_premul_sum` to create an instance of ReduceOp with PREMUL_SUM");
55
+ }
56
+
57
+ ReduceOp(
58
+ RedOpType op,
59
+ c10::intrusive_ptr<_SupplementBase> optional_supplement) {
60
+ if (optional_supplement.get()) {
61
+ op_ = op;
62
+ } else {
63
+ supplement_ = optional_supplement;
64
+ }
65
+ }
66
+
67
+ // The heap resource supplement_, if it exists, is managed by a
68
+ // c10::intrusive_ptr, so constructors and operator= can be simple
69
+ ReduceOp(const ReduceOp& other)
70
+ : op_(other.op_), supplement_(other.supplement_) {}
71
+
72
+ const ReduceOp& operator=(const ReduceOp& other) {
73
+ op_ = other.op_;
74
+ supplement_ = other.supplement_;
75
+ return *this;
76
+ }
77
+
78
+ operator RedOpType() const {
79
+ return op_;
80
+ }
81
+
82
+ bool operator==(const std::uint8_t other) {
83
+ TORCH_INTERNAL_ASSERT(other < 9, "Invalid other op value");
84
+ return other == op_;
85
+ }
86
+
87
+ bool operator==(const ReduceOp::RedOpType other) {
88
+ return *this == static_cast<std::uint8_t>(other);
89
+ }
90
+
91
+ // todo(crcrpar): Handle `RedOpType::PREMUL_SUM` with its scaling factor.
92
+ bool operator==(const ReduceOp& other) {
93
+ return *this == other.op_;
94
+ }
95
+
96
+ RedOpType op_ = SUM;
97
+ // supplement_ is "type-erased" storage for optional supplementary
98
+ // data the op might need.
99
+ // The point of use will know the derived type supplement_ really is,
100
+ // and downcast its pointer to extract the data as the needed type(s).
101
+ // Right now, only PREMUL_SUM needs supplementary data, but the same
102
+ // mechanism could extend to support other nontrivial reduce ops with
103
+ // different supplementary payloads.
104
+ c10::intrusive_ptr<_SupplementBase> supplement_;
105
+ };
106
+
107
+ template <typename T>
108
+ ReduceOp makeNCCLPreMulSum(const T& factor) {
109
+ ReduceOp rop;
110
+ rop.op_ = ReduceOp::PREMUL_SUM;
111
+ rop.supplement_ = c10::make_intrusive<NCCLPreMulSumSupplement>(factor);
112
+ return rop;
113
+ }
114
+
115
+ constexpr auto kUnsetTimeout = std::chrono::milliseconds(-1);
116
+
117
+ struct BroadcastOptions {
118
+ int64_t rootRank = 0;
119
+ int64_t rootTensor = 0;
120
+ std::chrono::milliseconds timeout = kUnsetTimeout;
121
+ bool asyncOp = true;
122
+ };
123
+
124
+ struct AllreduceOptions {
125
+ ReduceOp reduceOp = ReduceOp::SUM;
126
+ std::chrono::milliseconds timeout = kUnsetTimeout;
127
+ c10::optional<at::Tensor> sparseIndices = c10::nullopt;
128
+ };
129
+
130
+ struct AllreduceCoalescedOptions : AllreduceOptions {};
131
+
132
+ struct ReduceOptions {
133
+ ReduceOp reduceOp = ReduceOp::SUM;
134
+ int64_t rootRank = 0;
135
+ int64_t rootTensor = 0;
136
+ std::chrono::milliseconds timeout = kUnsetTimeout;
137
+ };
138
+
139
+ struct AllgatherOptions {
140
+ std::chrono::milliseconds timeout = kUnsetTimeout;
141
+ bool asyncOp = true;
142
+ };
143
+
144
+ struct GatherOptions {
145
+ int64_t rootRank = 0;
146
+ std::chrono::milliseconds timeout = kUnsetTimeout;
147
+ };
148
+
149
+ struct ScatterOptions {
150
+ int64_t rootRank = 0;
151
+ std::chrono::milliseconds timeout = kUnsetTimeout;
152
+ bool asyncOp = true;
153
+ };
154
+
155
+ struct ReduceScatterOptions {
156
+ ReduceOp reduceOp = ReduceOp::SUM;
157
+ std::chrono::milliseconds timeout = kUnsetTimeout;
158
+ bool asyncOp = true;
159
+ };
160
+
161
+ struct AllToAllOptions {
162
+ std::chrono::milliseconds timeout = kUnsetTimeout;
163
+ };
164
+
165
+ struct BarrierOptions {
166
+ std::vector<int64_t> device_ids;
167
+ std::chrono::milliseconds timeout = kUnsetTimeout;
168
+ c10::optional<at::Device> device;
169
+ };
170
+
171
+ struct DistributedBackendOptions {
172
+ c10::intrusive_ptr<::c10d::Store> store;
173
+ int group_rank;
174
+ int group_size;
175
+ std::chrono::duration<float> timeout;
176
+ std::string group_id;
177
+ std::vector<int64_t> global_ranks_in_group;
178
+ };
179
+
180
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCUtils.hpp ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_C10D_UCC
4
+
5
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
6
+ #include <torch/csrc/distributed/c10d/Store.hpp>
7
+ #include <ucc/api/ucc.h>
8
+
9
+ namespace c10d {
10
+
11
+ // Macro to generate the error message on a non-successful UCC return value.
12
+ #define TORCH_UCC_GET_ERROR_MSG(_err, _error_msg, _result) \
13
+ do { \
14
+ _err = c10::str( \
15
+ "[", \
16
+ std::string(__FILE__), \
17
+ ":", \
18
+ std::to_string(__LINE__), \
19
+ "] ", \
20
+ logger->getLogPrefix(), \
21
+ _error_msg, \
22
+ ", error code ", \
23
+ _result, \
24
+ ": ", \
25
+ ucc_status_string(_result), \
26
+ ", system error code ", \
27
+ errno); \
28
+ } while (0)
29
+
30
+ // Macro to throw on a non-successful UCC return value.
31
+ #define TORCH_UCC_CHECK(_cmd, _error_msg) \
32
+ do { \
33
+ ucc_status_t result = _cmd; \
34
+ if (result != UCC_OK) { \
35
+ std::string err; \
36
+ TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
37
+ TORCH_CHECK(false, err); \
38
+ } \
39
+ } while (0)
40
+
41
+ // Macro and throw on a non-successful UCC return value and free its request.
42
+ #define TORCH_UCC_CHECK_REQUEST(_request, _cmd, _error_msg) \
43
+ do { \
44
+ ucc_status_t result = _cmd; \
45
+ if (result != UCC_OK) { \
46
+ std::string err; \
47
+ TORCH_UCC_GET_ERROR_MSG(err, _error_msg, result); \
48
+ if (_request != nullptr) { \
49
+ ucc_collective_finalize(_request); \
50
+ } \
51
+ TORCH_CHECK(false, err); \
52
+ } \
53
+ } while (0)
54
+
55
+ // Macros to print logs with unified format
56
+ #define TORCH_UCC_LOG_ERROR(_phase, _msg) \
57
+ LOG(ERROR) << logger->getLogPrefix(_phase) << "[ERROR] " << _msg;
58
+ #define TORCH_UCC_LOG_INFO(_phase, _msg) \
59
+ LOG(INFO) << logger->getLogPrefix(_phase) << "[INFO] " << _msg;
60
+ #define TORCH_UCC_LOG_DEBUG(_phase, _msg) \
61
+ VLOG(1) << logger->getLogPrefix(_phase) << "[DEBUG] " << _msg;
62
+
63
+ enum torch_ucc_phase_t {
64
+ TORCH_UCC_UNKNOWN = -1,
65
+ TORCH_UCC_INIT,
66
+ TORCH_UCC_HEALTH_CHECK,
67
+ TORCH_UCC_READY,
68
+ TORCH_UCC_COLL_POST,
69
+ TORCH_UCC_COLL_PROGRESS,
70
+ TORCH_UCC_FINALIZE,
71
+ };
72
+
73
+ const std::map<torch_ucc_phase_t, std::string> ucc_phase_map = {
74
+ {TORCH_UCC_UNKNOWN, "UNKNOWN"},
75
+ {TORCH_UCC_INIT, "INIT"},
76
+ {TORCH_UCC_HEALTH_CHECK, "HEALTH_CHECK"},
77
+ {TORCH_UCC_READY, "READY"},
78
+ {TORCH_UCC_COLL_POST, "COLL_POST"},
79
+ {TORCH_UCC_COLL_PROGRESS, "COLL_PROGRESS"},
80
+ {TORCH_UCC_FINALIZE, "FINALIZE"},
81
+ };
82
+
83
+ class CommTraceLogger;
84
+
85
+ class TORCH_API ProcessGroupUCCLogger : public torch::CustomClassHolder {
86
+ public:
87
+ ProcessGroupUCCLogger();
88
+ ProcessGroupUCCLogger(std::string log_prefix, torch_ucc_phase_t phase);
89
+
90
+ std::string getLogPrefix(torch_ucc_phase_t phase = TORCH_UCC_UNKNOWN);
91
+ void setLogPrefix(std::string log_prefix);
92
+ inline void setPhase(torch_ucc_phase_t phase) {
93
+ local_phase = phase;
94
+ }
95
+
96
+ void initCommsTracer();
97
+ void flushComms(int rank, int world_size);
98
+ std::shared_ptr<CommTraceLogger> trace_generator = nullptr;
99
+
100
+ protected:
101
+ std::string log_prefix;
102
+ torch_ucc_phase_t local_phase = TORCH_UCC_UNKNOWN;
103
+ bool initialized_CommTraceLogger = false;
104
+ };
105
+
106
+ struct torch_ucc_oob_coll_info_t {
107
+ c10::intrusive_ptr<Store> store;
108
+ uint32_t comm_id;
109
+ int rank;
110
+ int size;
111
+ void* rbuf;
112
+ size_t msglen;
113
+ std::string getKey(std::string key) {
114
+ return std::to_string(comm_id) + key;
115
+ }
116
+ };
117
+
118
+ class CommBase {
119
+ public:
120
+ CommBase(const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger_)
121
+ : logger(logger_) {}
122
+ virtual void progress() = 0;
123
+ virtual void free_request(ucc_coll_req_h request) = 0;
124
+ virtual ~CommBase() {}
125
+ c10::intrusive_ptr<ProcessGroupUCCLogger> logger;
126
+ };
127
+ class CommUCC : public CommBase {
128
+ public:
129
+ ucc_lib_h lib{nullptr};
130
+ ucc_context_h context{nullptr};
131
+
132
+ public:
133
+ void progress() override;
134
+ CommUCC(
135
+ std::shared_ptr<torch_ucc_oob_coll_info_t> oob,
136
+ const c10::intrusive_ptr<ProcessGroupUCCLogger>& logger);
137
+ void free_request(ucc_coll_req_h request) override;
138
+ ~CommUCC();
139
+ };
140
+
141
+ ucc_status_t oob_allgather(
142
+ void* sbuf,
143
+ void* rbuf,
144
+ size_t msglen,
145
+ void* coll_info,
146
+ void** req);
147
+
148
+ ucc_status_t oob_allgather_test(void* req);
149
+
150
+ ucc_status_t oob_allgather_free(void* req);
151
+
152
+ // trim: remove spaces before and after the string view
153
+ // implementation borrowed from https://stackoverflow.com/a/17976541
154
+ inline c10::string_view trim(c10::string_view s) {
155
+ auto wsfront = std::find_if_not(
156
+ s.begin(), s.end(), [](int c) { return std::isspace(c); });
157
+ auto wsback = std::find_if_not(s.rbegin(), s.rend(), [](int c) {
158
+ return std::isspace(c);
159
+ }).base();
160
+ return (
161
+ wsback <= wsfront ? "" : s.substr(wsfront - s.begin(), wsback - wsfront));
162
+ }
163
+
164
+ inline std::string tolower(c10::string_view s) {
165
+ std::string result;
166
+ result.reserve(s.size());
167
+ for (auto c : s) {
168
+ result.push_back(std::tolower(c));
169
+ }
170
+ return result;
171
+ }
172
+
173
+ inline std::vector<std::string> parse_list(std::string list) {
174
+ std::vector<std::string> result;
175
+ list = tolower(trim(list));
176
+ while (!list.empty()) {
177
+ const auto end_pos = list.find_first_of(',');
178
+ const auto token = trim(list.substr(0, end_pos));
179
+ result.push_back(std::string(token));
180
+ list = (end_pos != c10::string_view::npos) ? list.substr(end_pos + 1) : "";
181
+ }
182
+ return result;
183
+ }
184
+
185
+ } // namespace c10d
186
+
187
+ #endif // USE_C10D_UCC
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/Utils.hpp>
4
+
5
+ namespace c10d {
6
+ namespace tcputil {
7
+
8
+ #define CONNECT_SOCKET_OFFSET 1
9
+
10
+ inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) {
11
+ return WSAPoll(fdArray, fds, timeout);
12
+ }
13
+
14
+ inline void addPollfd(
15
+ std::vector<struct pollfd>& fds,
16
+ int socket,
17
+ short events) {
18
+ fds.push_back({(SOCKET)socket, events});
19
+ }
20
+
21
+ inline struct ::pollfd getPollfd(int socket, short events) {
22
+ struct ::pollfd res = {(SOCKET)socket, events};
23
+ return res;
24
+ }
25
+
26
+ } // namespace tcputil
27
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <cstring>
10
+ #include <system_error>
11
+
12
+ #include <fmt/format.h>
13
+
14
+ namespace fmt {
15
+
16
+ template <>
17
+ struct formatter<std::error_category> {
18
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
19
+ return ctx.begin();
20
+ }
21
+
22
+ template <typename FormatContext>
23
+ decltype(auto) format(const std::error_category& cat, FormatContext& ctx)
24
+ const {
25
+ if (std::strcmp(cat.name(), "generic") == 0) {
26
+ return fmt::format_to(ctx.out(), "errno");
27
+ } else {
28
+ return fmt::format_to(ctx.out(), "{} error", cat.name());
29
+ }
30
+ }
31
+ };
32
+
33
+ template <>
34
+ struct formatter<std::error_code> {
35
+ constexpr decltype(auto) parse(format_parse_context& ctx) const {
36
+ return ctx.begin();
37
+ }
38
+
39
+ template <typename FormatContext>
40
+ decltype(auto) format(const std::error_code& err, FormatContext& ctx) const {
41
+ return fmt::format_to(
42
+ ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message());
43
+ }
44
+ };
45
+
46
+ } // namespace fmt
47
+
48
+ namespace c10d {
49
+ namespace detail {
50
+
51
+ inline std::error_code lastError() noexcept {
52
+ return std::error_code{errno, std::generic_category()};
53
+ }
54
+
55
+ } // namespace detail
56
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Facebook, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <stdexcept>
10
+
11
+ #include <c10/macros/Macros.h>
12
+ #include <c10/util/Exception.h>
13
+
14
+ // Utility macro similar to C10_THROW_ERROR, the major difference is that this
15
+ // macro handles exception types defined in the c10d namespace, whereas
16
+ // C10_THROW_ERROR requires an exception to be defined in the c10 namespace.
17
+ #define C10D_THROW_ERROR(err_type, msg) \
18
+ throw ::c10d::err_type( \
19
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
20
+
21
+ namespace c10d {
22
+
23
+ using c10::DistNetworkError;
24
+
25
+ class TORCH_API SocketError : public DistNetworkError {
26
+ using DistNetworkError::DistNetworkError;
27
+ };
28
+
29
+ class TORCH_API TimeoutError : public DistNetworkError {
30
+ using DistNetworkError::DistNetworkError;
31
+ };
32
+
33
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/python_comm_hook.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/distributed/c10d/comm.hpp>
4
+
5
+ #include <ATen/ATen.h>
6
+ #include <ATen/core/ivalue.h>
7
+ #include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
8
+ #include <torch/csrc/utils/pybind.h>
9
+
10
+ namespace c10d {
11
+
12
+ class TORCH_PYTHON_API PythonCommHook : public CommHookInterface {
13
+ public:
14
+ // Takes a state and a callable hook. The inputs are Python objects.
15
+ // The state is passed to the hook in runHook method, and it can be used to
16
+ // maintain and update any state information during the execution of the hook.
17
+ // The hook performs user-specified processing and returns a future indicating
18
+ // asychronous communication of gradients.
19
+ PythonCommHook(py::object state, py::object hook)
20
+ : state_(std::move(state)), hook_(std::move(hook)) {}
21
+
22
+ ~PythonCommHook() override;
23
+
24
+ c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
25
+
26
+ at::Tensor parseHookResult(const c10::IValue& result) override;
27
+
28
+ private:
29
+ // Only needed for stateful communication.
30
+ py::object state_;
31
+ py::object hook_;
32
+ };
33
+
34
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer_timer.hpp ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/ApproximateClock.h>
3
+ #include <torch/csrc/autograd/profiler.h>
4
+
5
+ namespace c10d {
6
+ constexpr int kUnsetTime = -1;
7
+
8
+ inline int64_t current_time_in_nanos() {
9
+ return c10::getTime();
10
+ }
11
+
12
+ class TORCH_API Timer {
13
+ private:
14
+ // The timestamp of forward call start time in each iteration.
15
+ int64_t forward_start_time = kUnsetTime;
16
+ // The timestamp of backward computation start and end time in each
17
+ // iteration.
18
+ int64_t backward_compute_start_time = kUnsetTime;
19
+ int64_t backward_compute_end_time = kUnsetTime;
20
+ // The timestamp of first communication call start time in each iteration.
21
+ int64_t backward_comm_start_time = kUnsetTime;
22
+ // The timestamp of last communication call end time in each iteration.
23
+ int64_t backward_comm_end_time = kUnsetTime;
24
+
25
+ public:
26
+ enum class Event {
27
+ kForwardStart,
28
+ kBackwardComputeStart,
29
+ kBackwardComputeEnd,
30
+ kBackwardCommStart,
31
+ kBackwardCommEnd,
32
+ };
33
+
34
+ // Record the current event, i.e., mark it as having occurred now. Default
35
+ // CPU implementation.
36
+ virtual void record(Event event) {
37
+ getTimeRef(event) = current_time_in_nanos();
38
+ }
39
+
40
+ // Return the difference between when two events occurred, in nanoseconds.
41
+ // Or nullopt if one of them hasn't been recorded.
42
+ virtual c10::optional<int64_t> measureDifference(Event start, Event end) = 0;
43
+
44
+ virtual ~Timer() = default;
45
+
46
+ // Return host-side timestamp, or nullopt if it has not yet been recorded.
47
+ c10::optional<int64_t> getTimestamp(Event event) {
48
+ auto time = getTimeRef(event);
49
+ if (time == kUnsetTime) {
50
+ return c10::nullopt;
51
+ } else {
52
+ return time;
53
+ }
54
+ }
55
+
56
+ // Return host-side time member variable corresponding to the given event.
57
+ int64_t& getTimeRef(Event event) {
58
+ switch (event) {
59
+ case Event::kForwardStart:
60
+ return forward_start_time;
61
+ case Event::kBackwardComputeStart:
62
+ return backward_compute_start_time;
63
+ case Event::kBackwardComputeEnd:
64
+ return backward_compute_end_time;
65
+ case Event::kBackwardCommStart:
66
+ return backward_comm_start_time;
67
+ case Event::kBackwardCommEnd:
68
+ return backward_comm_end_time;
69
+ default:
70
+ TORCH_INTERNAL_ASSERT(false);
71
+ }
72
+ }
73
+ };
74
+
75
+ TORCH_DECLARE_TYPED_REGISTRY(
76
+ TimerRegistry,
77
+ c10::DeviceType,
78
+ Timer,
79
+ std::unique_ptr,
80
+ c10::Device);
81
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and its affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <chrono>
10
+ #include <cstdint>
11
+ #include <memory>
12
+ #include <string>
13
+
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/Exception.h>
16
+ #include <torch/csrc/distributed/c10d/exception.h>
17
+
18
+ namespace c10d {
19
+ namespace detail {
20
+
21
+ class SocketOptions {
22
+ public:
23
+ SocketOptions& prefer_ipv6(bool value) noexcept {
24
+ prefer_ipv6_ = value;
25
+
26
+ return *this;
27
+ }
28
+
29
+ bool prefer_ipv6() const noexcept {
30
+ return prefer_ipv6_;
31
+ }
32
+
33
+ SocketOptions& connect_timeout(std::chrono::seconds value) noexcept {
34
+ connect_timeout_ = value;
35
+
36
+ return *this;
37
+ }
38
+
39
+ std::chrono::seconds connect_timeout() const noexcept {
40
+ return connect_timeout_;
41
+ }
42
+
43
+ private:
44
+ bool prefer_ipv6_ = true;
45
+ std::chrono::seconds connect_timeout_{30};
46
+ };
47
+
48
+ class SocketImpl;
49
+
50
+ class Socket {
51
+ public:
52
+ // This function initializes the underlying socket library and must be called
53
+ // before any other socket function.
54
+ static void initialize();
55
+
56
+ static Socket listen(std::uint16_t port, const SocketOptions& opts = {});
57
+
58
+ static Socket listenFromFd(int fd, std::uint16_t expected_port);
59
+
60
+ static Socket connect(
61
+ const std::string& host,
62
+ std::uint16_t port,
63
+ const SocketOptions& opts = {});
64
+
65
+ Socket() noexcept = default;
66
+
67
+ Socket(const Socket& other) = delete;
68
+
69
+ Socket& operator=(const Socket& other) = delete;
70
+
71
+ Socket(Socket&& other) noexcept;
72
+
73
+ Socket& operator=(Socket&& other) noexcept;
74
+
75
+ ~Socket();
76
+
77
+ Socket accept() const;
78
+
79
+ int handle() const noexcept;
80
+
81
+ std::uint16_t port() const;
82
+
83
+ bool waitForInput(std::chrono::milliseconds timeout);
84
+
85
+ private:
86
+ explicit Socket(std::unique_ptr<SocketImpl>&& impl) noexcept;
87
+
88
+ std::unique_ptr<SocketImpl> impl_;
89
+ };
90
+
91
+ } // namespace detail
92
+
93
+ } // namespace c10d
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void AnnotateWarns(const std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void CheckStrictFusion(std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Cache utils in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <functional>
9
+ #include <list>
10
+ #include <memory>
11
+ #include <mutex>
12
+ #include <unordered_map>
13
+ #include <utility>
14
+
15
+ namespace torch {
16
+ namespace lazy {
17
+
18
+ // Generic key and object cache with LRU expiration policy. The objects of type
19
+ // T will be stored as std::shared_ptr<T> and taken and returned as such, by the
20
+ // cache API.
21
+ template <
22
+ typename K,
23
+ typename T,
24
+ typename H = std::hash<K>,
25
+ typename E = std::equal_to<K>>
26
+ class Cache {
27
+ public:
28
+ using TypePtr = std::shared_ptr<T>;
29
+ using Element = std::pair<K, TypePtr>;
30
+
31
+ explicit Cache(size_t max_size) : max_size_(max_size) {}
32
+
33
+ // Adds an object to the cache, unless it already exists. If the cache grows
34
+ // beyond the limit set during construction, the oldest used object will be
35
+ // removed from the cache.
36
+ TypePtr Add(K key, TypePtr object) {
37
+ if (!max_size_) {
38
+ return object;
39
+ }
40
+ std::lock_guard<std::mutex> slock(lock_);
41
+ element_list_.emplace_front(Element(std::move(key), std::move(object)));
42
+ auto it = element_list_.begin();
43
+ auto emplace_result = element_map_.emplace(&it->first, it);
44
+ if (!emplace_result.second) {
45
+ element_list_.erase(it);
46
+ DoLRU(emplace_result.first->second);
47
+ } else if (element_list_.size() > max_size_) {
48
+ Element* last = &element_list_.back();
49
+ element_map_.erase(&last->first);
50
+ element_list_.pop_back();
51
+ }
52
+ return emplace_result.first->second->second;
53
+ }
54
+
55
+ // Retrieves the existing object if it exists. If it does, its position in
56
+ // the LRU list gets moved to the head of the list.
57
+ // Returns nullptr if no object with the specified key is found within the
58
+ // cache.
59
+ TypePtr Get(const K& key) {
60
+ if (!max_size_) {
61
+ return nullptr;
62
+ }
63
+ std::lock_guard<std::mutex> slock(lock_);
64
+ auto it = element_map_.find(&key);
65
+ if (it == element_map_.end()) {
66
+ return nullptr;
67
+ }
68
+ DoLRU(it->second);
69
+ return it->second->second;
70
+ }
71
+
72
+ TypePtr GetLatest() {
73
+ std::lock_guard<std::mutex> g(lock_);
74
+ TORCH_CHECK(!element_list_.empty());
75
+ return element_list_.front().second;
76
+ }
77
+
78
+ bool Erase(const K& key) {
79
+ if (!max_size_) {
80
+ return false;
81
+ }
82
+ std::lock_guard<std::mutex> slock(lock_);
83
+ auto it = element_map_.find(&key);
84
+ if (it == element_map_.end()) {
85
+ return false;
86
+ }
87
+ auto lit = it->second;
88
+ element_map_.erase(it);
89
+ element_list_.erase(lit);
90
+ return true;
91
+ }
92
+
93
+ void Clear() {
94
+ if (!max_size_) {
95
+ return;
96
+ }
97
+ std::lock_guard<std::mutex> slock(lock_);
98
+ element_map_.clear();
99
+ element_list_.clear();
100
+ }
101
+
102
+ int Numel() const {
103
+ if (!max_size_) {
104
+ return 0;
105
+ }
106
+ std::lock_guard<std::mutex> g(lock_);
107
+ TORCH_CHECK(element_map_.size() == element_list_.size());
108
+ return element_map_.size();
109
+ }
110
+
111
+ private:
112
+ using ElementList = std::list<Element>;
113
+
114
+ struct Hasher {
115
+ size_t operator()(const K* key) const {
116
+ return hasher(*key);
117
+ }
118
+
119
+ H hasher;
120
+ };
121
+
122
+ struct Equaler {
123
+ bool operator()(const K* k1, const K* k2) const {
124
+ return equaler(*k1, *k2);
125
+ }
126
+
127
+ E equaler;
128
+ };
129
+
130
+ using ElementMap = std::
131
+ unordered_map<const K*, typename ElementList::iterator, Hasher, Equaler>;
132
+
133
+ void DoLRU(typename ElementList::iterator it) {
134
+ element_list_.splice(element_list_.begin(), element_list_, it);
135
+ }
136
+
137
+ mutable std::mutex lock_;
138
+ const size_t max_size_ = 0;
139
+ ElementList element_list_;
140
+ ElementMap element_map_;
141
+ };
142
+
143
+ } // namespace lazy
144
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/lazy/core/tensor.h>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ TORCH_API std::function<std::vector<SourceLocation>()>&
12
+ GetPythonFramesFunction();
13
+
14
+ TORCH_API std::string GetFirstUserFrameInPython();
15
+
16
+ class TORCH_API DebugUtil {
17
+ public:
18
+ enum GraphFormat {
19
+ kText,
20
+ kDot,
21
+ kBackend,
22
+ };
23
+
24
+ static GraphFormat GetDefaultGraphFormat();
25
+
26
+ // Dumps the current Python frame and the IR Graph whose roots are the IR
27
+ // values held at the tensors. If indices is not nullptr, it selects the
28
+ // indices of the tensors whose graph will be emitted.
29
+ static std::string GetTensorsGraphInfo(
30
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
31
+ const std::vector<size_t>* indices,
32
+ GraphFormat format = GetDefaultGraphFormat());
33
+
34
+ // If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper
35
+ // output path, an instance of the report returned by GetTensorsGraphInfo() is
36
+ // saved.
37
+ static void SaveTensorsGraphInfo(
38
+ const char* name,
39
+ c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
40
+ const std::vector<size_t>* indices,
41
+ GraphFormat format = GetDefaultGraphFormat());
42
+
43
+ static bool ExperimentEnabled(const std::string& name);
44
+ };
45
+
46
+ } // namespace lazy
47
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/dynamic_ir.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/Flags.h>
16
+ #include <torch/csrc/lazy/core/hash.h>
17
+ #include <torch/csrc/lazy/core/ir.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
20
+
21
+ namespace torch {
22
+ namespace lazy {
23
+
24
+ /**
25
+ * The goal of "dynamic" Nodes is to patch a hole in our tracing.
26
+ * Previously, if a user called `sizes` on a Tensor, it would leak out
27
+ * of our tracing system, as `sizes` returns a torch.Size or an int. To
28
+ * prevent this from happening, we introduce DimensionNode, a new type
29
+ * of Node that abstracts the operation of getting the dimensions of a
30
+ * Tensor.
31
+ *
32
+ * Consider the following example:
33
+ * ```
34
+ * numel = x.shape()[0] * x.shape()[1]
35
+ * ```
36
+ *
37
+ * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
38
+ * and the multiplication of the two SizeNodes will be represented by
39
+ * a SizeMul (also a subclass of DimensionNode). Through this, we can
40
+ * prevent `numel` from being represented as a Python int and thus
41
+ * burned into the Graph.
42
+ */
43
+
44
+ class TORCH_API DimensionNode {
45
+ public:
46
+ virtual bool isSymbolic() const {
47
+ return false;
48
+ };
49
+ virtual int64_t getDynamicValue() const {
50
+ TORCH_CHECK(false, "NYI");
51
+ };
52
+ virtual int64_t getStaticValue() const {
53
+ TORCH_CHECK(false, "NYI");
54
+ };
55
+ virtual ~DimensionNode() = default;
56
+ };
57
+
58
+ } // namespace lazy
59
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/hash.h ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Hash utils in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/e0e5f937a0ba8d904f9608137dc8c51ba439df2d/third_party/xla_client/util.h
4
+ */
5
+ #pragma once
6
+
7
+ #include <ATen/Tensor.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <c10/util/int128.h>
10
+ #include <torch/csrc/Export.h>
11
+ #include <cstring>
12
+ #include <set>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+
19
+ using size_t = std::size_t;
20
+
21
+ class TORCH_API hash_t : public c10::uint128 {
22
+ public:
23
+ // Swich from typedef hash_t = uint128 to provide explicit casters
24
+ hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
25
+ hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
26
+ hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
27
+ hash_t(int64_t val) : uint128(static_cast<uint64_t>(val)) {}
28
+ hash_t(uint32_t val) : uint128(val) {}
29
+ hash_t(uint64_t val) : uint128(val) {}
30
+ hash_t(uint128 val) : uint128(val) {}
31
+ hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
32
+ hash_t() : uint128() {}
33
+ };
34
+
35
+ // Std* functions use 64-bit hash
36
+ size_t TORCH_API StdDataHash(const void* data, size_t size);
37
+
38
+ size_t TORCH_API StdHashCombine(uintmax_t a, uintmax_t b);
39
+
40
+ // Other functions are all 128-bit
41
+ hash_t TORCH_API HashBlock(const void* data, size_t n, const hash_t& seed);
42
+
43
+ hash_t TORCH_API DataHash(const void* data, size_t size);
44
+
45
+ hash_t TORCH_API HashCombine(const hash_t& a, const hash_t& b);
46
+
47
+ size_t TORCH_API HashReduce(const hash_t& a);
48
+
49
+ // Returns a string representation of a hash
50
+ std::string TORCH_API HashToString(const hash_t& a);
51
+
52
+ struct HashReducer {
53
+ size_t operator()(const hash_t& value) const {
54
+ return HashReduce(value);
55
+ }
56
+ };
57
+
58
+ static inline hash_t StringHash(const char* data) {
59
+ return DataHash(data, std::strlen(data));
60
+ }
61
+
62
+ // Automatic templated implementation for 'arithmetic' types
63
+ template <
64
+ typename T,
65
+ typename std::enable_if<std::is_arithmetic<T>::value>::type* = nullptr>
66
+ hash_t Hash(const T& value) {
67
+ return DataHash(&value, sizeof(value));
68
+ }
69
+
70
+ // added because on macos builds the vector<bool> specialization
71
+ // breaks falling through to the templated arithmetic types above
72
+ hash_t TORCH_API Hash(const std::vector<bool>& value);
73
+
74
+ // Specialiazed implementations for proprietary types
75
+ static inline hash_t Hash(const c10::ScalarType& value) {
76
+ return DataHash(&value, sizeof(value));
77
+ }
78
+
79
+ static inline hash_t Hash(const c10::MemoryFormat& value) {
80
+ return DataHash(&value, sizeof(value));
81
+ }
82
+
83
+ static inline hash_t Hash(const c10::DeviceType& value) {
84
+ return DataHash(&value, sizeof(value));
85
+ }
86
+
87
+ static inline hash_t Hash(const c10::Device& value) {
88
+ return HashCombine(Hash(value.type()), Hash(value.index()));
89
+ }
90
+
91
+ static inline hash_t Hash(const c10::Layout& value) {
92
+ return DataHash(&value, sizeof(value));
93
+ }
94
+
95
+ static inline hash_t Hash(const c10::Scalar& value) {
96
+ switch (value.type()) {
97
+ case c10::ScalarType::ComplexDouble:
98
+ return Hash(value.toComplexDouble());
99
+ case c10::ScalarType::Double:
100
+ return Hash(value.toDouble());
101
+ case c10::ScalarType::Long:
102
+ return Hash(value.toLong());
103
+ case c10::ScalarType::Bool:
104
+ return Hash(value.toBool());
105
+ default:
106
+ TORCH_INTERNAL_ASSERT(false, "Unknown scalar type.", value.type());
107
+ }
108
+ }
109
+
110
+ static inline hash_t TensorHash(const at::Tensor& tensor) {
111
+ at::Tensor ctensor = tensor.contiguous();
112
+ int64_t size = ctensor.numel() * ctensor.element_size();
113
+ switch (ctensor.scalar_type()) {
114
+ case at::ScalarType::Bool:
115
+ return DataHash(ctensor.const_data_ptr<bool>(), size);
116
+ case at::ScalarType::Byte:
117
+ return DataHash(ctensor.const_data_ptr<uint8_t>(), size);
118
+ case at::ScalarType::Char:
119
+ return DataHash(ctensor.const_data_ptr<int8_t>(), size);
120
+ case at::ScalarType::Short:
121
+ return DataHash(ctensor.const_data_ptr<int16_t>(), size);
122
+ case at::ScalarType::Int:
123
+ return DataHash(ctensor.const_data_ptr<int32_t>(), size);
124
+ case at::ScalarType::Long:
125
+ return DataHash(ctensor.const_data_ptr<int64_t>(), size);
126
+ case at::ScalarType::Float:
127
+ return DataHash(ctensor.const_data_ptr<float>(), size);
128
+ case at::ScalarType::Double:
129
+ return DataHash(ctensor.const_data_ptr<double>(), size);
130
+ case at::ScalarType::BFloat16:
131
+ return DataHash(ctensor.const_data_ptr<at::BFloat16>(), size);
132
+ case at::ScalarType::Half:
133
+ return DataHash(ctensor.const_data_ptr<at::Half>(), size);
134
+ case at::ScalarType::ComplexFloat:
135
+ return DataHash(ctensor.const_data_ptr<c10::complex<float>>(), size);
136
+ case at::ScalarType::ComplexDouble:
137
+ return DataHash(ctensor.const_data_ptr<c10::complex<double>>(), size);
138
+ default:
139
+ TORCH_INTERNAL_ASSERT(
140
+ false, "Unsupported scalar type:", ctensor.scalar_type());
141
+ }
142
+ }
143
+
144
+ static inline hash_t Hash(const std::string& value) {
145
+ return DataHash(value.data(), value.size());
146
+ }
147
+
148
+ static inline hash_t Hash(const c10::string_view& value) {
149
+ return DataHash(value.data(), value.size());
150
+ }
151
+
152
+ static inline hash_t Hash(const at::Generator& value) {
153
+ return TensorHash(value.get_state());
154
+ }
155
+
156
+ // Taken from glibc's implementation of hashing optionals,
157
+ // we want to include a contribution to the hash to distinguish
158
+ // cases where one or another option was null, but we hope it doesn't
159
+ // collide with an actually scalar value.
160
+ //
161
+ // Use an arbitrary randomly-selected 64-bit integer rather than a
162
+ // small constant that we then hash at runtime so we don't have to
163
+ // repeatedly hash a constant at runtime.
164
+ static const int64_t kNullOpt = 0x8655d738f3678dda;
165
+
166
+ // Hashing for c10::optional types contributes to hash
167
+ // for optionals with null value, important to distinguish
168
+ // between <nullopt, non-nullopt> and <non-nullopt, nullopt> cases
169
+ template <typename T>
170
+ hash_t Hash(const c10::optional<T>& value) {
171
+ if (value.has_value()) {
172
+ return Hash(value.value());
173
+ } else {
174
+ return kNullOpt;
175
+ }
176
+ }
177
+
178
+ // Hashing of containers
179
+ // Forward declare to allow hashes of vectors of vectors to work.
180
+ template <typename T>
181
+ hash_t ContainerHash(const T& values);
182
+
183
+ template <typename T>
184
+ hash_t Hash(const std::vector<T>& values) {
185
+ return ContainerHash(values);
186
+ }
187
+
188
+ // Need a special case for optional<container>?
189
+ template <typename T>
190
+ hash_t Hash(const c10::optional<std::vector<T>>& value) {
191
+ if (value.has_value()) {
192
+ return ContainerHash(value.value());
193
+ } else {
194
+ return kNullOpt;
195
+ }
196
+ }
197
+
198
+ template <typename T>
199
+ hash_t Hash(const std::set<T>& values) {
200
+ return ContainerHash(values);
201
+ }
202
+
203
+ template <typename T, typename S>
204
+ hash_t Hash(const std::pair<T, S>& values) {
205
+ return HashCombine(Hash(values.first), Hash(values.second));
206
+ }
207
+
208
+ static inline hash_t Hash(const hash_t& value) {
209
+ return value;
210
+ }
211
+
212
+ template <typename T>
213
+ hash_t Hash(c10::ArrayRef<T> values) {
214
+ return ContainerHash(values);
215
+ }
216
+
217
+ template <typename T>
218
+ hash_t ContainerHash(const T& values) {
219
+ hash_t h(static_cast<uint64_t>(0x85ebca77c2b2ae63));
220
+ for (const auto& value : values) {
221
+ h = HashCombine(h, Hash(value));
222
+ }
223
+ return h;
224
+ }
225
+
226
+ // Varargs hashing
227
+ template <typename T = void>
228
+ hash_t MHash() {
229
+ return hash_t(static_cast<uint64_t>(0x165667b19e3779f9));
230
+ }
231
+
232
+ template <typename T, typename... Targs>
233
+ hash_t MHash(T value, Targs... Fargs) {
234
+ return HashCombine(Hash(value), MHash(Fargs...));
235
+ }
236
+
237
+ } // namespace lazy
238
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/internal_ops/ltc_ops.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/ir.h>
4
+
5
+ #include <c10/util/CallOnce.h>
6
+
7
+ #include <mutex>
8
+ #include <string>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ class TORCH_API OpKindWrapper {
14
+ public:
15
+ explicit OpKindWrapper(const char* name) : name_(name) {}
16
+
17
+ const OpKind& operator*() const {
18
+ return get();
19
+ }
20
+
21
+ operator OpKind() const {
22
+ return get();
23
+ }
24
+
25
+ private:
26
+ const OpKind& get() const {
27
+ c10::call_once(once_, [this]() { op_kind_ = OpKind::Get(name_); });
28
+ return op_kind_;
29
+ }
30
+
31
+ const char* name_;
32
+ mutable OpKind op_kind_;
33
+ mutable c10::once_flag once_;
34
+ };
35
+
36
+ const OpKindWrapper ltc_all_to_all("lazy_tensors::all_to_all");
37
+ const OpKindWrapper ltc_cast("lazy_tensors::cast");
38
+ const OpKindWrapper ltc_collective_permute("lazy_tensors::collective_permute");
39
+ const OpKindWrapper ltc_cross_replica_sum("lazy_tensors::cross_replica_sum");
40
+ const OpKindWrapper ltc_device_data("lazy_tensors::device_data");
41
+ const OpKindWrapper ltc_get_dimensions_size(
42
+ "lazy_tensors::ltc_get_dimensions_size");
43
+ const OpKindWrapper ltc_moving_average("lazy_tensors::moving_average");
44
+ const OpKindWrapper ltc_nms("lazy_tensors::nms");
45
+ const OpKindWrapper ltc_not_supported("lazy_tensors::not_supported");
46
+ const OpKindWrapper ltc_replication_pad("lazy_tensors::replication_pad");
47
+ const OpKindWrapper ltc_replication_pad_backward(
48
+ "lazy_tensors::replication_pad_backward");
49
+ const OpKindWrapper ltc_tensor_data("lazy_tensors::tensor_data");
50
+
51
+ } // namespace lazy
52
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/ArrayRef.h>
16
+ #include <c10/util/Flags.h>
17
+ #include <torch/csrc/lazy/core/hash.h>
18
+ #include <torch/csrc/lazy/core/ir_metadata.h>
19
+ #include <torch/csrc/lazy/core/shape.h>
20
+
21
+ C10_DECLARE_bool(ltc_enable_dynamic_shapes);
22
+
23
+ namespace torch {
24
+ namespace lazy {
25
+
26
+ static const hash_t kHashSeed(static_cast<uint32_t>(0x5a2d296e9));
27
+
28
+ class Node;
29
+ struct Output;
30
+ struct Value;
31
+
32
+ using NodePtr = std::shared_ptr<Node>;
33
+
34
+ // The Kind of operation a Node can be associated to.
35
+ struct TORCH_API OpKind {
36
+ OpKind() = default;
37
+ explicit OpKind(c10::Symbol op) : op(op) {}
38
+
39
+ bool operator==(const OpKind& rhs) const {
40
+ return op == rhs.op;
41
+ }
42
+ bool operator!=(const OpKind& rhs) const {
43
+ return !operator==(rhs);
44
+ }
45
+ bool operator<(const OpKind& rhs) const {
46
+ return c10::unique_t(op) < c10::unique_t(rhs.op);
47
+ }
48
+
49
+ hash_t hash() const;
50
+
51
+ std::string ToString() const {
52
+ return op.toQualString();
53
+ }
54
+
55
+ // Retrieves an existing operation object, or creates a new one. Operations
56
+ // that are specific to lazy tensors, should live within the 'lazy_tensors::'
57
+ // namespace.
58
+ static OpKind Get(const std::string& name);
59
+
60
+ c10::Symbol op;
61
+ };
62
+
63
+ inline std::ostream& operator<<(std::ostream& stream, const OpKind& op) {
64
+ stream << op.ToString();
65
+ return stream;
66
+ }
67
+
68
+ using OpList = c10::ArrayRef<Value>;
69
+
70
+ hash_t OperandHashes(
71
+ const OpList& operands,
72
+ const hash_t& seed,
73
+ bool bakeInSizes);
74
+ // A node in the graph. Nodes for operations which require extra data to be
75
+ // stored for lowering should inherit from this class and add an operation
76
+ // specific member there. For example, a constant might create a new
77
+ // NodeConstant class (inheriting from Node) with an extra lazy_tensors::Literal
78
+ // field, or a tensor value might create a new NodeTensor with a computation
79
+ // client data handle in it.
80
+ class TORCH_API Node {
81
+ public:
82
+ static bool enableDynamicShape();
83
+
84
+ // Creates a new node with the given op name. The op is a unique identifier
85
+ // for the operation. The num_outputs tells how many outputs a given operation
86
+ // generates.
87
+ //
88
+ // None leaf node's node_hash does not contains shape information always.
89
+ // So we pass in the hash value rather than a function.
90
+ Node(OpKind op, size_t num_outputs);
91
+
92
+ // Construct node with operands and shapes
93
+ Node(
94
+ OpKind op,
95
+ OpList operands,
96
+ std::vector<Shape>&& shapes,
97
+ size_t num_outputs = 1);
98
+
99
+ // Construct node with operands and shape generated from a function
100
+ Node(
101
+ OpKind op,
102
+ OpList operands,
103
+ const std::function<Shape()>& shape_fn,
104
+ size_t num_outputs = 1);
105
+
106
+ // Construct node with operands and no shape
107
+ Node(OpKind op, OpList operands, size_t num_outputs = 1);
108
+
109
+ // Construct node with shape and no operands
110
+ Node(OpKind op, Shape shape, size_t num_outputs = 1);
111
+
112
+ virtual ~Node();
113
+
114
+ const OpKind& op() const {
115
+ return op_;
116
+ }
117
+
118
+ size_t num_outputs() const {
119
+ return num_outputs_;
120
+ }
121
+
122
+ // Retrieves the full shape of the IR Node.
123
+ virtual c10::ArrayRef<Shape> shapes() const;
124
+
125
+ virtual const Shape& shape(size_t output_index = 0) const;
126
+
127
+ // Add the shape computed by the shape_fn
128
+ void addComputedShape(const std::function<Shape()>& shape_fn);
129
+
130
+ // Compute the shape using the provided shape_fn if not previously cached
131
+ Shape computeShape(const std::function<Shape()>& shape_fn);
132
+
133
+ virtual const std::vector<Output>& operands() const;
134
+
135
+ virtual const Output& operand(size_t i) const;
136
+
137
+ // Gets operand at index i if index is valid, or kNullOutput otherwise.
138
+ virtual const Output& nullable_operand(size_t i) const;
139
+
140
+ // Returns the hash of the dag used to look up the compiled graph
141
+ virtual hash_t hash() const = 0;
142
+
143
+ // Returns the hash of the dag used to for shape caching
144
+ virtual hash_t shapeHash() const = 0;
145
+
146
+ const MetaData& metadata() const {
147
+ return metadata_;
148
+ }
149
+
150
+ UserMetaData* user_metadata() const {
151
+ return user_metadata_.get();
152
+ }
153
+
154
+ std::shared_ptr<UserMetaData> SetUserMetadata(
155
+ std::shared_ptr<UserMetaData> user_meta) {
156
+ std::swap(user_metadata_, user_meta);
157
+ return user_meta;
158
+ }
159
+
160
+ virtual std::string ToString() const;
161
+
162
+ private:
163
+ // The ID of the operation captured by this node.
164
+ OpKind op_;
165
+ size_t num_outputs_ = 1;
166
+
167
+ // The IR specific metadata attached to the IR node.
168
+ MetaData metadata_;
169
+ // The IR framework user can attach a user defined metadata object deriving
170
+ // from UserMetaData.
171
+ std::shared_ptr<UserMetaData> user_metadata_;
172
+
173
+ protected:
174
+ // Adds node's index output number as operand.
175
+ void AddOperand(NodePtr node, size_t index = 0);
176
+
177
+ std::vector<Shape> shapes_;
178
+ // A node holds a real reference to its operands.
179
+ std::vector<NodePtr> operands_;
180
+ // Outputs do not hold references on the nodes, and neither do the uses, since
181
+ // otherwise we get into circular reference counting.
182
+ std::vector<Output> operands_as_outputs_;
183
+ };
184
+
185
+ inline std::ostream& operator<<(std::ostream& stream, const Node& node) {
186
+ stream << node.ToString();
187
+ return stream;
188
+ }
189
+
190
+ // Note: Keep this version of NodeCast for smooth PyTorch/XLA migration, and
191
+ // clean up once the migration is done.
192
+ template <typename T>
193
+ const T* NodeCast(const Node* node, OpKind op) {
194
+ if (op != node->op()) {
195
+ return nullptr;
196
+ }
197
+ #ifdef NDEBUG
198
+ return static_cast<const T*>(node);
199
+ #else
200
+ return &dynamic_cast<const T&>(*node);
201
+ #endif
202
+ }
203
+
204
+ template <typename T>
205
+ const T* NodeCast(const Node* node) {
206
+ if (T::ClassOpKind() != node->op()) {
207
+ return nullptr;
208
+ }
209
+ // TODO: Some IR classes share the same opkind, such as Mean and MeanDim, so
210
+ // static_cast is not safe here. Unless we have opkind unique for each class,
211
+ // we have to use dynamic_cast here.
212
+ return dynamic_cast<const T*>(node);
213
+ }
214
+
215
+ // Represents a specific output produced by a node. Since the output of a node
216
+ // can be composed by multiple outputs, the node+index coordinates fully qualify
217
+ // each single output.
218
+ struct TORCH_API Output {
219
+ struct Hasher {
220
+ size_t operator()(const Output& output) const;
221
+ };
222
+
223
+ Output() = default;
224
+ explicit Output(const Node* node, size_t index = 0)
225
+ : node(node), index(index) {}
226
+
227
+ hash_t hash() const;
228
+ hash_t shapeHash() const;
229
+
230
+ bool operator==(const Output& rhs) const {
231
+ return node == rhs.node && index == rhs.index;
232
+ }
233
+
234
+ // To compare the operands of to-be-constructed node and to-be-reused node
235
+ bool operator==(const Value& rhs) const;
236
+
237
+ bool operator!=(const Output& rhs) const {
238
+ return !operator==(rhs);
239
+ }
240
+
241
+ const Shape& shape() const {
242
+ return node->shape(index);
243
+ }
244
+
245
+ std::string ToString() const;
246
+
247
+ // The node providing the output.
248
+ const Node* node{nullptr};
249
+ // The index in the node's output this output refers to.
250
+ size_t index{0};
251
+ };
252
+
253
+ inline std::ostream& operator<<(std::ostream& stream, const Output& output) {
254
+ stream << output.ToString();
255
+ return stream;
256
+ }
257
+
258
+ template <typename T>
259
+ using OutputMap = std::unordered_map<Output, T, Output::Hasher>;
260
+
261
+ // Represents an input/operand for a Node object.
262
+ struct TORCH_API Value {
263
+ Value() = default;
264
+ /* implicit */ Value(NodePtr&& node, size_t index = 0)
265
+ : node(std::move(node)), index(index) {}
266
+ /* implicit */ Value(const NodePtr& node, size_t index = 0)
267
+ : node(node), index(index) {}
268
+
269
+ hash_t hash() const;
270
+ hash_t shapeHash() const;
271
+
272
+ operator bool() const {
273
+ return node != nullptr;
274
+ }
275
+
276
+ operator Output() const {
277
+ return Output(node.get(), index);
278
+ }
279
+
280
+ const Shape& shape() const {
281
+ return node->shape(index);
282
+ }
283
+
284
+ Node* operator->() const {
285
+ return node.get();
286
+ }
287
+
288
+ NodePtr node;
289
+ size_t index = 0;
290
+ };
291
+
292
+ } // namespace lazy
293
+ } // namespace torch
294
+
295
+ namespace c10 {
296
+ // Explicit template instantiation to make ArrayRef<Value> work
297
+ template class at::ArrayRef<torch::lazy::Value>;
298
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/ir.h>
4
+
5
+ #include <string>
6
+
7
+ namespace torch {
8
+ namespace lazy {
9
+
10
+ class BackendDevice;
11
+
12
+ class TORCH_API DumpUtil {
13
+ public:
14
+ static std::string ToDot(c10::ArrayRef<const Node*> nodes);
15
+
16
+ static std::string PostOrderToDot(
17
+ c10::ArrayRef<const Node*> post_order,
18
+ c10::ArrayRef<const Node*> roots);
19
+
20
+ static std::string ToText(c10::ArrayRef<const Node*> nodes);
21
+
22
+ static std::string PostOrderToText(
23
+ c10::ArrayRef<const Node*> post_order,
24
+ c10::ArrayRef<const Node*> roots);
25
+
26
+ static std::string ToBackend(
27
+ c10::ArrayRef<Value> values,
28
+ const BackendDevice& device);
29
+ };
30
+
31
+ } // namespace lazy
32
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Optional.h>
4
+
5
+ #include <string>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+ struct SourceLocation {
11
+ std::string file;
12
+ std::string function;
13
+ int line = -1;
14
+ };
15
+
16
+ TORCH_API void EmitShortFrameInfo(
17
+ std::ostream& stream,
18
+ const std::vector<SourceLocation>& frames);
19
+
20
+ TORCH_API std::ostream& operator<<(
21
+ std::ostream& stream,
22
+ const std::vector<SourceLocation>& frames);
23
+
24
+ // The base class for user defined metadata which is possible to attach to IR
25
+ // nodes.
26
+ struct TORCH_API UserMetaData {
27
+ virtual ~UserMetaData() = default;
28
+ };
29
+
30
+ struct TORCH_API MetaData {
31
+ std::string scope;
32
+ std::vector<SourceLocation> frame_info;
33
+ };
34
+
35
+ // TODO(whc) is this going to be used outside of in IR decompositions?
36
+ // RAII data structure to be used a stack variable to enter a new IR scope. IR
37
+ // scope names will appear in the IR and will help identifying the source of the
38
+ // single IR nodes.
39
+ struct TORCH_API ScopePusher {
40
+ explicit ScopePusher(const std::string& name);
41
+ ~ScopePusher();
42
+
43
+ static void ResetScopes();
44
+ };
45
+
46
+ TORCH_API MetaData GetMetaDataIfDebugging();
47
+
48
+ } // namespace lazy
49
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <unordered_map>
4
+ #include <vector>
5
+
6
+ #include <torch/csrc/lazy/core/ir.h>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ class TORCH_API Util {
12
+ public:
13
+ // Tracks the emission status of the nodes during the post-order generation.
14
+ // It helps tracking loops within the computation graphs.
15
+ enum EmitStatus {
16
+ kNotEmitted,
17
+ kEmitting,
18
+ kEmitted,
19
+ };
20
+
21
+ using EmissionMap = std::unordered_map<const Node*, EmitStatus>;
22
+
23
+ // Computes the post order from the given node, without using recursion. The
24
+ // emission map can be used as saved state, for multiple separate calls to
25
+ // this API. The returned post-order can be empty if the node has already been
26
+ // emitted inside the emission map. An error is generated if a loop is
27
+ // detected.
28
+ static std::vector<const Node*> ComputePostOrder(
29
+ const Node* node,
30
+ EmissionMap* emap);
31
+
32
+ static std::vector<const Node*> ComputePostOrder(
33
+ c10::ArrayRef<const Node*> nodes,
34
+ EmissionMap* emap);
35
+
36
+ // Same as above, but computes the post order on the set of nodes specified as
37
+ // argument.
38
+ static std::vector<const Node*> ComputePostOrder(
39
+ c10::ArrayRef<const Node*> nodes);
40
+
41
+ // Retrieves the number of nodes within the graph whose sink are passed in the
42
+ // nodes argument.
43
+ static size_t GetGraphSize(c10::ArrayRef<const Node*> nodes);
44
+ };
45
+
46
+ } // namespace lazy
47
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * This file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <atomic>
9
+ #include <functional>
10
+ #include <map>
11
+ #include <memory>
12
+ #include <mutex>
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ #include <c10/macros/Export.h>
17
+
18
+ namespace torch {
19
+ namespace lazy {
20
+
21
+ struct TORCH_API Sample {
22
+ Sample() = default;
23
+ Sample(int64_t timestamp_ns, double value)
24
+ : timestamp_ns(timestamp_ns), value(value) {}
25
+
26
+ int64_t timestamp_ns = 0;
27
+ double value = 0;
28
+ };
29
+
30
+ using MetricReprFn = std::function<std::string(double)>;
31
+
32
+ // Class used to collect time-stamped numeric samples. The samples are stored in
33
+ // a circular buffer whose size can be configured at constructor time.
34
+ class TORCH_API MetricData {
35
+ public:
36
+ // Creates a new MetricData object with the internal circular buffer storing
37
+ // max_samples samples. The repr_fn argument allow to specify a function which
38
+ // pretty-prints a sample value.
39
+ MetricData(MetricReprFn repr_fn, size_t max_samples);
40
+
41
+ // Returns the total values of all the samples being posted to this metric.
42
+ double Accumulator() const;
43
+
44
+ size_t TotalSamples() const;
45
+
46
+ void AddSample(int64_t timestamp_ns, double value);
47
+
48
+ // Returns a vector with all the current samples, from the oldest to the
49
+ // newer. If accumulator is not nullptr, it will receive the current value of
50
+ // the metrics' accumulator (the sum of all posted values). If total_samples
51
+ // is not nullptr, it will receive the count of the posted values.
52
+ std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
53
+
54
+ std::string Repr(double value) const {
55
+ return repr_fn_(value);
56
+ }
57
+
58
+ void Reset();
59
+
60
+ bool IsValid() const {
61
+ return TotalSamples() > 0;
62
+ }
63
+
64
+ private:
65
+ mutable std::mutex lock_;
66
+ MetricReprFn repr_fn_;
67
+ size_t count_ = 0;
68
+ std::vector<Sample> samples_;
69
+ double accumulator_ = 0.0;
70
+ };
71
+
72
+ // Counters are a very lightweight form of metrics which do not need to track
73
+ // sample time.
74
+ class TORCH_API CounterData {
75
+ public:
76
+ CounterData() : value_(0) {}
77
+
78
+ void AddValue(int64_t value) {
79
+ value_ += value;
80
+ }
81
+
82
+ int64_t Value() const {
83
+ return value_;
84
+ }
85
+
86
+ void Reset() {
87
+ value_ = 0;
88
+ }
89
+
90
+ bool IsValid() const {
91
+ return value_ > 0;
92
+ }
93
+
94
+ private:
95
+ std::atomic<int64_t> value_;
96
+ };
97
+
98
+ class TORCH_API MetricsArena {
99
+ public:
100
+ static MetricsArena* Get();
101
+
102
+ void ResetCounters();
103
+ void ResetMetrics();
104
+
105
+ // Registers a new metric in the global arena.
106
+ void RegisterMetric(
107
+ const std::string& name,
108
+ MetricReprFn repr_fn,
109
+ size_t max_samples,
110
+ std::shared_ptr<MetricData>* data);
111
+
112
+ void RegisterCounter(
113
+ const std::string& name,
114
+ std::shared_ptr<CounterData>* data);
115
+
116
+ void ForEachMetric(
117
+ const std::function<void(const std::string&, MetricData*)>& metric_func);
118
+
119
+ void ForEachCounter(
120
+ const std::function<void(const std::string&, CounterData*)>&
121
+ counter_func);
122
+
123
+ std::vector<std::string> GetMetricNames();
124
+
125
+ MetricData* GetMetric(const std::string& name);
126
+
127
+ std::vector<std::string> GetCounterNames();
128
+
129
+ CounterData* GetCounter(const std::string& name);
130
+
131
+ private:
132
+ std::mutex lock_;
133
+ std::map<std::string, std::shared_ptr<MetricData>> metrics_;
134
+ std::map<std::string, std::shared_ptr<CounterData>> counters_;
135
+ };
136
+
137
+ // Emits the value in a to_string() conversion.
138
+ TORCH_API std::string MetricFnValue(double value);
139
+ // Emits the value in a humanized bytes representation.
140
+ TORCH_API std::string MetricFnBytes(double value);
141
+ // Emits the value in a humanized time representation. The value is expressed in
142
+ // nanoseconds EPOCH time.
143
+ TORCH_API std::string MetricFnTime(double value);
144
+
145
+ // The typical use of a Metric is one in which it gets created either in a
146
+ // global scope context:
147
+ // static Metric* metric = new Metric("RpcCount");
148
+ // Or within a function scope:
149
+ // void MyFunction(...) {
150
+ // static Metric* metric = new Metric("RpcCount");
151
+ // ...
152
+ // metric->AddSample(ts_nanos, some_value);
153
+ // }
154
+ class TORCH_API Metric {
155
+ public:
156
+ explicit Metric(
157
+ std::string name,
158
+ MetricReprFn repr_fn = MetricFnValue,
159
+ size_t max_samples = 0);
160
+
161
+ const std::string& Name() const {
162
+ return name_;
163
+ }
164
+
165
+ double Accumulator() const;
166
+
167
+ void AddSample(int64_t timestamp_ns, double value);
168
+
169
+ void AddSample(double value);
170
+
171
+ std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
172
+
173
+ std::string Repr(double value) const;
174
+
175
+ private:
176
+ MetricData* GetData() const;
177
+
178
+ std::string name_;
179
+ MetricReprFn repr_fn_;
180
+ size_t max_samples_;
181
+ mutable std::shared_ptr<MetricData> data_ptr_;
182
+ mutable std::atomic<MetricData*> data_;
183
+ };
184
+
185
+ // A Counter is a lightweight form of metric which tracks an integer value which
186
+ // can increase or decrease.
187
+ // A typical use is as:
188
+ // static Counter* counter = new Counter("MyCounter");
189
+ // ...
190
+ // counter->AddValue(+1);
191
+ class TORCH_API Counter {
192
+ public:
193
+ explicit Counter(std::string name);
194
+
195
+ void AddValue(int64_t value) {
196
+ GetData()->AddValue(value);
197
+ }
198
+
199
+ int64_t Value() const {
200
+ return GetData()->Value();
201
+ }
202
+
203
+ private:
204
+ CounterData* GetData() const;
205
+
206
+ std::string name_;
207
+ mutable std::shared_ptr<CounterData> data_ptr_;
208
+ mutable std::atomic<CounterData*> data_;
209
+ };
210
+
211
+ #define TORCH_LAZY_COUNTER(name, value) \
212
+ do { \
213
+ static ::torch::lazy::Counter* __counter = \
214
+ new ::torch::lazy::Counter(name); \
215
+ __counter->AddValue(value); \
216
+ } while (0)
217
+
218
+ #define TORCH_LAZY_FN_COUNTER(ns) TORCH_LAZY_COUNTER(c10::str(ns, __func__), 1)
219
+
220
+ #define TORCH_LAZY_VALUE_METRIC(name, value) \
221
+ do { \
222
+ static ::torch::lazy::Metric* __metric = \
223
+ new ::torch::lazy::Metric(name, torch::lazy::MetricFnValue); \
224
+ __metric->AddSample(value); \
225
+ } while (0)
226
+
227
+ // Creates a report with the current metrics statistics.
228
+ TORCH_API std::string CreateMetricReport();
229
+
230
+ // Creates a report with the selected metrics statistics.
231
+ TORCH_API std::string CreateMetricReport(
232
+ const std::vector<std::string>& counter_names,
233
+ const std::vector<std::string>& metric_names);
234
+
235
+ // Returns the currently registered metric names. Note that the list can grow
236
+ // since metrics are usually function intialized (they are static function
237
+ // variables).
238
+ TORCH_API std::vector<std::string> GetMetricNames();
239
+
240
+ // Retrieves the metric data of a given metric, or nullptr if such metric does
241
+ // not exist.
242
+ TORCH_API MetricData* GetMetric(const std::string& name);
243
+
244
+ // Returns the currently registered counter names. Note that the list can grow
245
+ // since counters are usually function intialized (they are static function
246
+ // variables).
247
+ TORCH_API std::vector<std::string> GetCounterNames();
248
+
249
+ // Retrieves the counter data of a given counter, or nullptr if such counter
250
+ // does not exist.
251
+ TORCH_API CounterData* GetCounter(const std::string& name);
252
+
253
+ // Retrieves the current EPOCH time in nanoseconds.
254
+ TORCH_API int64_t NowNs();
255
+
256
+ // Scope based utility class TORCH_API to measure the time the code takes within
257
+ // a given C++ scope.
258
+ class TORCH_API TimedSection {
259
+ public:
260
+ explicit TimedSection(Metric* metric) : metric_(metric), start_(NowNs()) {}
261
+
262
+ ~TimedSection() {
263
+ int64_t now = NowNs();
264
+ metric_->AddSample(now, now - start_);
265
+ }
266
+
267
+ double Elapsed() const {
268
+ return 1e-9 * static_cast<double>(NowNs() - start_);
269
+ }
270
+
271
+ private:
272
+ Metric* metric_;
273
+ int64_t start_;
274
+ };
275
+
276
+ #define TORCH_LAZY_TIMED(name) \
277
+ static torch::lazy::Metric* timed_metric = \
278
+ new torch::lazy::Metric(name, torch::lazy::MetricFnTime); \
279
+ torch::lazy::TimedSection timed_section(timed_metric)
280
+
281
+ #define TORCH_LAZY_FN_COUNTER_TIMED_TRACING(ns) \
282
+ TORCH_LAZY_FN_COUNTER(ns); \
283
+ TORCH_LAZY_TIMED("LazyTracing")
284
+
285
+ } // namespace lazy
286
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/ir.h>
4
+
5
+ namespace torch {
6
+ namespace lazy {
7
+
8
+ TORCH_API NodePtr operator+(const Value& node1, const Value& node2);
9
+ TORCH_API NodePtr operator-(const Value& node1, const Value& node2);
10
+ TORCH_API NodePtr operator*(const Value& node1, const Value& node2);
11
+ TORCH_API NodePtr operator/(const Value& node1, const Value& node2);
12
+
13
+ } // namespace lazy
14
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/utils.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <vector>
2
+
3
+ #include <torch/csrc/lazy/core/tensor_util.h>
4
+ #include <torch/csrc/lazy/core/util.h>
5
+
6
+ namespace torch {
7
+ namespace lazy {
8
+
9
+ TORCH_API bool StrideIsSupported(c10::ArrayRef<int64_t> stride);
10
+
11
+ TORCH_API std::vector<int64_t> GetArrayStridePermutation(
12
+ c10::ArrayRef<int64_t> stride);
13
+
14
+ TORCH_API Shape MakeDiagonalShape(
15
+ const Shape& shape,
16
+ int64_t offset,
17
+ int64_t dim1,
18
+ int64_t dim2);
19
+
20
+ TORCH_API Shape
21
+ MakePermuteShape(const Shape& source_shape, c10::ArrayRef<int64_t> permutation);
22
+
23
+ TORCH_API Shape MakeSelectShape(
24
+ const Shape& shape,
25
+ int64_t dim,
26
+ int64_t start,
27
+ int64_t end,
28
+ int64_t stride);
29
+
30
+ TORCH_API int64_t GetStride(int64_t start, int64_t end, int64_t stride);
31
+
32
+ TORCH_API std::vector<int64_t> BuildSqueezedDimensions(
33
+ c10::ArrayRef<int64_t> dimensions,
34
+ int64_t squeeze_dim);
35
+
36
+ TORCH_API std::vector<int64_t> BuildUnsqueezedDimensions(
37
+ c10::ArrayRef<int64_t> dimensions,
38
+ int64_t squeeze_dim);
39
+
40
+ } // namespace lazy
41
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/permutation_util.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/irange.h>
6
+
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace lazy {
11
+
12
+ TORCH_API std::vector<int64_t> InversePermutation(
13
+ c10::ArrayRef<int64_t> input_permutation);
14
+
15
+ TORCH_API bool IsPermutation(c10::ArrayRef<int64_t> permutation);
16
+
17
+ // Gathers the input using the order specified by the permutation. For each i,
18
+ // output[i] = dimensions[permutation[i]]. The given permutation must be the
19
+ // same size as the input.
20
+ template <typename Container>
21
+ std::vector<typename Container::value_type> PermuteDimensions(
22
+ c10::ArrayRef<int64_t> permutation,
23
+ const Container& dimensions) {
24
+ using T = typename Container::value_type;
25
+ TORCH_CHECK(
26
+ dimensions.size() == permutation.size(),
27
+ "Invalid permutation specified. dimensions.size() != permutation.size() (",
28
+ dimensions.size(),
29
+ " vs. ",
30
+ permutation.size(),
31
+ ")");
32
+ TORCH_CHECK(
33
+ IsPermutation(permutation),
34
+ "Invalid permutation specified. Permutation is not permutation");
35
+ std::vector<T> output(dimensions.size());
36
+ for (const auto i : c10::irange(permutation.size())) {
37
+ output[i] = dimensions[permutation[i]];
38
+ }
39
+ return output;
40
+ }
41
+
42
+ } // namespace lazy
43
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ostream>
4
+ #include <vector>
5
+
6
+ #include <c10/core/Scalar.h>
7
+ #include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
8
+ #include <torch/csrc/lazy/core/hash.h>
9
+
10
+ C10_DECLARE_bool(ltc_enable_symbolic_shapes);
11
+
12
+ namespace torch {
13
+ namespace lazy {
14
+
15
+ class TORCH_API Shape {
16
+ public:
17
+ Shape() = default;
18
+
19
+ Shape(
20
+ at::ScalarType scalar_type,
21
+ c10::ArrayRef<int64_t> sizes,
22
+ c10::optional<std::vector<bool>> is_symbolic = c10::nullopt);
23
+
24
+ std::string to_string() const;
25
+
26
+ c10::ScalarType scalar_type() const {
27
+ return scalar_type_;
28
+ }
29
+ void set_scalar_type(at::ScalarType value) {
30
+ scalar_type_ = value;
31
+ }
32
+
33
+ int64_t dim() const {
34
+ return sizes_.size();
35
+ }
36
+ c10::ArrayRef<int64_t> sizes() const {
37
+ return sizes_;
38
+ }
39
+ int64_t size(int64_t dim) const {
40
+ return sizes_.at(dim);
41
+ }
42
+ void set_size(int64_t dim, int64_t size) {
43
+ sizes_.at(dim) = size;
44
+ }
45
+
46
+ const c10::optional<std::vector<bool>>& is_symbolic() const {
47
+ return is_symbolic_;
48
+ }
49
+
50
+ // Makes a copy with symbolic dims applied
51
+ Shape with_symbolic_dims(
52
+ c10::optional<std::vector<bool>> symbolic_dims) const;
53
+
54
+ size_t numel() const;
55
+ hash_t hash(bool bakeInSizes) const;
56
+
57
+ bool operator==(const Shape& other) const;
58
+
59
+ private:
60
+ c10::ScalarType scalar_type_{c10::ScalarType::Undefined};
61
+
62
+ // Sizes are the upper bound sizes for a tensor, used by XLA.
63
+ std::vector<int64_t> sizes_;
64
+ // Stores which dimmensions are symbolic
65
+ // If nullopt, either it hasn't been initialized or the symbolic
66
+ // dimmensions are not calculatable
67
+ c10::optional<std::vector<bool>> is_symbolic_ = c10::nullopt;
68
+ };
69
+
70
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape);
71
+
72
+ TORCH_API bool symbolicShapeEnabled();
73
+ // Calculate and applies symbolic shapes onto the
74
+ // Shape objects passed to result_shapes
75
+ TORCH_API void applySymbolicShapesOnLT(
76
+ const char* schema_str,
77
+ std::vector<c10::IValue> args,
78
+ std::vector<Shape>& result_shapes);
79
+ } // namespace lazy
80
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/ScalarType.h>
5
+ #include <c10/core/SymInt.h>
6
+ #include <c10/core/SymIntArrayRef.h>
7
+ #include <c10/core/SymNodeImpl.h>
8
+ #include <c10/macros/Export.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <torch/csrc/lazy/backend/backend_data.h>
11
+ #include <torch/csrc/lazy/core/ir.h>
12
+ #include <torch/csrc/lazy/core/shape.h>
13
+ #include <torch/csrc/lazy/core/tensor.h>
14
+ #include <vector>
15
+
16
+ namespace torch {
17
+ namespace lazy {
18
+ // Turn clang-format off, as we rely on the whole signature being on one line
19
+ // for codegen.
20
+ // clang-format off
21
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
22
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self);
23
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size);
24
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self);
25
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_abs(const at::Tensor & self);
26
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out);
27
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, c10::optional<at::Generator> generator);
28
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, double p, c10::optional<at::Generator> generator);
29
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
30
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
31
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_cat(at::TensorList tensors, int64_t dim);
32
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_cholesky(const at::Tensor & self, bool upper);
33
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min);
34
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_clone(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format);
35
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value);
36
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups);
37
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask);
38
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse);
39
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
40
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit);
41
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
42
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims);
43
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
44
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
45
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
46
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
47
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index);
48
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_inverse(const at::Tensor & self);
49
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_isnan(const at::Tensor & self);
50
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
51
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_forward(const at::Tensor & self);
52
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logdet(const at::Tensor & self);
53
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_and(const at::Tensor & self, const at::Tensor & other);
54
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_not(const at::Tensor & self);
55
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_or(const at::Tensor & self, const at::Tensor & other);
56
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_xor(const at::Tensor & self, const at::Tensor & other);
57
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value);
58
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value);
59
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_max(const at::Tensor & self);
60
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_mean(const at::Tensor & self, c10::optional<at::ScalarType> dtype);
61
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_min(const at::Tensor & self);
62
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_mv(const at::Tensor & self, const at::Tensor & vec);
63
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps);
64
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask);
65
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout(const at::Tensor & input, double p, c10::optional<bool> train);
66
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale);
67
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps);
68
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask);
69
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
70
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
71
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
72
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_nonzero(const at::Tensor & self);
73
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_normal_functional(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator);
74
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, c10::optional<at::Generator> generator);
75
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator);
76
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator);
77
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_relu(const at::Tensor & self);
78
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
79
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_slogdet(const at::Tensor & self);
80
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta);
81
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending);
82
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_stack(at::TensorList tensors, int64_t dim);
83
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, bool unbiased);
84
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim);
85
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim);
86
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_sum(const at::Tensor & self, c10::optional<at::ScalarType> dtype);
87
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape__to_copy(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format);
88
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_take(const at::Tensor & self, const at::Tensor & index);
89
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_trace(const at::Tensor & self);
90
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_zero(const at::Tensor & self);
91
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_narrow_copy_symint(const at::Tensor & self, int64_t dim, int64_t start, c10::SymInt length);
92
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish(const at::Tensor & self);
93
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self);
94
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_selu(const at::Tensor & self);
95
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_uniform(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator);
96
+
97
+ // Non-Native ops
98
+ TORCH_API std::vector<Shape> compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type);
99
+ TORCH_API std::vector<Shape> compute_shape_expand(const Output& input0, const std::vector<int64_t>& size, const bool& is_scalar_expand);
100
+ TORCH_API std::vector<Shape> compute_shape_view(const Output& input0, const std::vector<int64_t>& output_sizes);
101
+ TORCH_API std::vector<Shape> compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const c10::optional<at::ScalarType>& stype);
102
+
103
+ // View Ops
104
+ // (Now that functionalization pass is used, we should kill these in a later PR)
105
+ TORCH_API std::vector<Shape> compute_shape_as_strided_view_update(const Output& target, const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
106
+ TORCH_API std::vector<Shape> compute_shape_as_strided(const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
107
+ TORCH_API std::vector<Shape> compute_shape_diagonal_view_update(const Output& target, const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
108
+ TORCH_API std::vector<Shape> compute_shape_diagonal(const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
109
+ TORCH_API std::vector<Shape> compute_shape_narrow_view_update(const Output& input, const Output& source, const std::vector<int64_t>& base_indices);
110
+ TORCH_API std::vector<Shape> compute_shape_narrow(const Output& input, const std::vector<int64_t>& base_indices, const std::vector<int64_t>& sizes);
111
+ TORCH_API std::vector<Shape> compute_shape_permute(const Output& input, const std::vector<int64_t>& dims);
112
+ TORCH_API std::vector<Shape> compute_shape_resize(const Output& input, const std::vector<int64_t>& size);
113
+ TORCH_API std::vector<Shape> compute_shape_select_view_update(const Output& target, const Output& source, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
114
+ TORCH_API std::vector<Shape> compute_shape_select(const Output& input, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
115
+ TORCH_API std::vector<Shape> compute_shape_squeeze(const Output& input, const int& dim);
116
+ TORCH_API std::vector<Shape> compute_shape_unsqueeze(const Output& input, const int& dim);
117
+
118
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index);
119
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2);
120
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step);
121
+ TORCH_API std::vector<torch::lazy::Shape> compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset);
122
+ // clang-format on
123
+ } // namespace lazy
124
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <c10/util/intrusive_ptr.h>
5
+ #include <torch/csrc/lazy/backend/backend_data.h>
6
+ #include <torch/csrc/lazy/backend/backend_device.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/util.h>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ class TORCH_API SymNodeImpl : public c10::SymNodeImpl {
14
+ public:
15
+ SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)){};
16
+ NodePtr node_;
17
+ };
18
+
19
+ class LazyTensor;
20
+ using LazyTensorPtr = c10::intrusive_ptr<LazyTensor>;
21
+
22
+ class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
23
+ public:
24
+ // This is the core lazy tensor data structure where all the tensor data is
25
+ // held. The lazy tensor is nothing more than a shared pointer to a Data
26
+ // object.
27
+ struct Data {
28
+ Data(BackendDataPtr handle, BackendDevice device)
29
+ : handle(std::move(handle)),
30
+ device(std::move(device)),
31
+ unique_id(GetNextTensorId()) {}
32
+ Data(Value ir_value, BackendDevice device)
33
+ : ir_value(std::move(ir_value)),
34
+ device(std::move(device)),
35
+ unique_id(GetNextTensorId()) {}
36
+ Data(at::Tensor tensor_data, BackendDevice device)
37
+ : tensor_data(std::move(tensor_data)),
38
+ device(std::move(device)),
39
+ unique_id(GetNextTensorId()) {}
40
+ // TODO(alanwaketan): Remove this ctor. This is a
41
+ // temporary ctor to ease XLA LTC migration. It depends on
42
+ // XLA's Functionalization integration.
43
+ Data(BackendDevice device)
44
+ : device(std::move(device)), unique_id(GetNextTensorId()) {}
45
+
46
+ virtual ~Data();
47
+
48
+ BackendDataPtr handle;
49
+ Value ir_value;
50
+ c10::optional<at::Tensor> tensor_data;
51
+ const BackendDevice device;
52
+ const int64_t unique_id = 0;
53
+ size_t generation = 1;
54
+ };
55
+
56
+ static LazyTensorPtr Create(
57
+ const at::Tensor& tensor,
58
+ const BackendDevice& device);
59
+ static LazyTensorPtr Create(Value ir_value, const BackendDevice& device);
60
+ static LazyTensorPtr Create(BackendDataPtr handle);
61
+ static LazyTensorPtr Create(std::shared_ptr<Data> data);
62
+
63
+ // The default ctor previously created a null LazyTensor (one with no 'data'
64
+ // obj). Creating a null LazyTensor is no longer possible, since the same can
65
+ // be achieved by creating a null LazyTensorPtr and it is way too confusing to
66
+ // have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that
67
+ // used to rely on a LazyTensor obj with a null Data can now rely on a null
68
+ // LazyTensorPtr instead.
69
+ LazyTensor() = delete;
70
+ LazyTensor(const LazyTensor&) = default;
71
+ LazyTensor(LazyTensor&&) noexcept = default;
72
+
73
+ ~LazyTensor() override = default;
74
+
75
+ size_t generation() const {
76
+ return data()->generation;
77
+ }
78
+
79
+ // Override it to use your own Shape.
80
+ virtual int64_t size(int64_t dim) const;
81
+
82
+ // Override it to use your own graph executor.
83
+ virtual at::Tensor ToTensor(bool detached);
84
+
85
+ void ShallowCopyTo(LazyTensorPtr dest) const;
86
+
87
+ // Assigns the tensor value to the lazy tensor.
88
+ void SetTensor(at::Tensor tensor);
89
+
90
+ void UpdateFromTensor(at::Tensor tensor, bool sync);
91
+ void UpdateFromTensorOut(at::Tensor tensor);
92
+ void UpdateFromTensorOut(const LazyTensorPtr& tensor);
93
+
94
+ const std::shared_ptr<Data>& data() const;
95
+
96
+ // Override it to use your own type conversion.
97
+ virtual at::ScalarType dtype() const;
98
+
99
+ MaybeRef<Shape> shape() const;
100
+
101
+ const BackendDevice& GetDevice() const;
102
+ int64_t GetUniqueId() const;
103
+
104
+ // Fetches the data behind the tensor. If the tensor has a graph defining
105
+ // its current value, executes the graph and fetches the data result.
106
+ BackendDataPtr GetDataHandle();
107
+
108
+ // Fetches the current value of the data, which can be missing (nullptr)
109
+ // in case the tensor has a graph defining its current value,
110
+ BackendDataPtr CurrentDataHandle() const;
111
+
112
+ void SetDataHandle(BackendDataPtr handle);
113
+ void SetDataHandle(BackendDataPtr handle, bool sync);
114
+
115
+ // Retrieves the current IR Node, or nullptr in case no active IR Node is
116
+ // available.
117
+ Value CurrentIrValue() const;
118
+
119
+ // Retrieves the IR Node representing this LazyTensor. One will be created if
120
+ // missing. Note that although this is a const API, it actually changes the
121
+ // internal state ofthe object.
122
+ Value GetIrValue() const;
123
+
124
+ void SetIrValue(Value ir_value);
125
+ void SetInPlaceIrValue(Value ir_value);
126
+
127
+ c10::optional<at::Tensor> CurrentTensorData() const;
128
+
129
+ std::vector<LazyTensorPtr> MakeOutputTensors(NodePtr node) const;
130
+
131
+ LazyTensorPtr CopyTensorToDevice(const BackendDevice& device);
132
+
133
+ // Applies the queue of operations in preparation for using the data.
134
+ // Override it to use your own graph executor.
135
+ virtual void ApplyPendingGraph();
136
+
137
+ // Override it to set extra information.
138
+ virtual void AssignIrValue(Value ir_value) const;
139
+
140
+ protected:
141
+ explicit LazyTensor(std::shared_ptr<Data> data);
142
+
143
+ void SetTensorData(at::Tensor tensor_data);
144
+
145
+ // We build a graph accumulating operations, but at a given point we
146
+ // need to force a rendering, otherwise the graph can grow without control.
147
+ // Think:
148
+ // for i in range(0, 100000):
149
+ // a = a + b
150
+ void TryLimitGraphSize();
151
+
152
+ // Override it to instantiate your own data.
153
+ virtual Value GetIrValueForTensor(
154
+ const at::Tensor& tensor,
155
+ const BackendDevice& device) const;
156
+
157
+ Value CreateTensorNode(BackendDataPtr data, bool read_only) const;
158
+
159
+ private:
160
+ LazyTensor(const at::Tensor& tensor, const BackendDevice& device);
161
+ LazyTensor(Value ir_value, const BackendDevice& device);
162
+ explicit LazyTensor(BackendDataPtr handle);
163
+
164
+ static int64_t GetNextTensorId();
165
+
166
+ std::shared_ptr<Data> data_;
167
+ };
168
+
169
+ // Utils to convert at::Tensor to LazyTensor, and vice versa.
170
+
171
+ // Section 0: c10::Tensorlist ==> lazy::TensorList
172
+ // note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList
173
+ // skips
174
+ // the LazyTensor wrappers, assuming that the list of underlying IR nodes
175
+ // is actually more useful for downstream computations. TBD.
176
+ TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors);
177
+
178
+ // Section 1: at::Tensor => LazyTensor.
179
+ // Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor
180
+ // if the tensor is not a lazy tensor.
181
+ TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor);
182
+
183
+ // Extracts the LazyTensor out of an at::Tensor. Throws an exception
184
+ // if the tensor is not a lazy tensor.
185
+ TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor);
186
+
187
+ // Same as above, applied to a list of tensors.
188
+ TORCH_API std::vector<LazyTensorPtr> GetLtcTensors(
189
+ c10::ArrayRef<at::Tensor> tensors);
190
+
191
+ // If tensor is a lazy tensor type, returns the LazyTensor embedded within it,
192
+ // otherwise creates a new lazy tensor type with tensor as data.
193
+ TORCH_API LazyTensorPtr GetOrCreateLtcTensor(
194
+ const c10::optional<at::Tensor>& tensor,
195
+ const BackendDevice& device);
196
+
197
+ TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber(
198
+ const at::Tensor& tensor,
199
+ const BackendDevice& device);
200
+
201
+ // Section 2: LazyTensor => at::Tensor.
202
+ // Creates an ATen tensor from an LazyTensor.
203
+ TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor);
204
+ TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
205
+
206
+ // Note [Lazy Tensor Functionalization]
207
+ // The functionalization pass is implemented by wrapping all TensorImpl
208
+ // objects in C++ with an extra FunctionalTensorWrapper object,
209
+ // that knows how to perform functionalization
210
+ //
211
+ // Certain functions in the aten API serve as entry/exit points for
212
+ // functionalization, where we need to perform the wrapping/unwrapping:
213
+ // - aten::to.device
214
+ // - aten::empty
215
+
216
+ // Given a non-lazy tensor, this function creates a lazy tensor on the specified
217
+ // (lazy) device. The functionalize_output determines whether or not we should
218
+ // wrap the output in a "functional wrapper".
219
+ //
220
+ // How do you know whether to pass true/false for functionalize_output?
221
+ //
222
+ // Case 1: nonlazy -> lazy
223
+ // If you're implementing a function that takes in nonlazy tensors and returns
224
+ // lazy tensors, then you should think of that function as an "entrypoint" to
225
+ // functionalization, and use functionalize_output=true Examples include:
226
+ // - factory functions (the LTC kernel for at::empty)
227
+ // - CPU -> Lazy device converions (the LTC kernel for at::to_device)
228
+ //
229
+ // Case 2: lazy -> lazy
230
+ // If you're implementing a function that takes in lazy tensors and returns
231
+ // lazy tensors,
232
+ // **but** requires creating lazy tensors internally,
233
+ // then you can assume that the current function is running inside of some
234
+ // outer context where functionalization is already running, that will take
235
+ // care of doing the wrapping for you, and use functionalize_output=true
236
+ // Examples include:
237
+ // - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel,
238
+ // converts returns back to lazy tensors).
239
+ TORCH_API at::Tensor to_lazy_tensor(
240
+ const at::Tensor& self,
241
+ const c10::TensorOptions& options,
242
+ at::Device device,
243
+ bool non_blocking,
244
+ bool functionalize_output);
245
+
246
+ template <size_t... Indices>
247
+ auto TupleAtenFromLtcTensorsImpl(
248
+ const std::vector<LazyTensorPtr>& tensors,
249
+ std::index_sequence<Indices...>) {
250
+ return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...);
251
+ }
252
+
253
+ template <size_t N>
254
+ auto TupleAtenFromLtcTensors(const std::vector<LazyTensorPtr>& tensors) {
255
+ return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence<N>{});
256
+ }
257
+
258
+ } // namespace lazy
259
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_impl.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/SymIntArrayRef.h>
5
+ #include <c10/core/TensorImpl.h>
6
+
7
+ #include <torch/csrc/lazy/core/tensor.h>
8
+
9
+ namespace torch {
10
+ namespace lazy {
11
+
12
+ // Tensor implementation class used to be fed to the at::Tensor.
13
+ // Its scope is just to handle an LazyTensor.
14
+ class TORCH_API LTCTensorImpl final : public c10::TensorImpl {
15
+ public:
16
+ explicit LTCTensorImpl(const LazyTensorPtr& tensor);
17
+ explicit LTCTensorImpl(const LazyTensor& tensor);
18
+ explicit LTCTensorImpl(LazyTensor&& tensor);
19
+
20
+ LazyTensorPtr tensor() {
21
+ return tensor_;
22
+ }
23
+
24
+ void set_tensor(const LazyTensorPtr& lazy_tensor);
25
+
26
+ void force_refresh_sizes() {
27
+ generation_ = 0;
28
+ }
29
+
30
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
31
+ const c10::VariableVersion& version_counter,
32
+ bool allow_tensor_metadata_change) const override;
33
+
34
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
35
+ c10::VariableVersion&& version_counter,
36
+ bool allow_tensor_metadata_change) const override;
37
+
38
+ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
39
+
40
+ at::IntArrayRef sizes_custom() const override;
41
+ at::IntArrayRef strides_custom() const override;
42
+ int64_t numel_custom() const override;
43
+ int64_t storage_offset_custom() const override;
44
+ int64_t dim_custom() const override;
45
+ bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
46
+ bool is_strides_like_custom(at::MemoryFormat memory_format) const override;
47
+ bool is_non_overlapping_and_dense_custom() const override;
48
+
49
+ c10::SymIntArrayRef sym_sizes_custom() const override;
50
+ c10::SymIntArrayRef sym_strides_custom() const override;
51
+ c10::SymInt sym_numel_custom() const override;
52
+
53
+ private:
54
+ void setup_size_properties();
55
+
56
+ LazyTensorPtr tensor_;
57
+ mutable c10::optional<std::vector<c10::SymInt>> sym_sizes_;
58
+ size_t generation_{0};
59
+ };
60
+
61
+ } // namespace lazy
62
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/backend/backend_interface.h>
4
+ #include <torch/csrc/lazy/core/shape.h>
5
+
6
+ #include <ATen/FunctionalTensorWrapper.h>
7
+
8
+ #include <string>
9
+ #include <vector>
10
+
11
+ namespace torch {
12
+ namespace lazy {
13
+
14
+ TORCH_API std::vector<int64_t> ComputeArrayStrides(
15
+ c10::ArrayRef<int64_t> sizes);
16
+
17
+ TORCH_API std::vector<at::Tensor> DataHandlesToTensors(
18
+ c10::ArrayRef<BackendDataPtr> data_handles,
19
+ at::ScalarType dest_element_type);
20
+
21
+ // Uploads an ATEN tensor data to the device and fetches the corresponding
22
+ // device data handle.
23
+ TORCH_API BackendDataPtr
24
+ TensorToDataHandle(const at::Tensor& tensor, const BackendDevice& device);
25
+
26
+ // Retrieves the device data handles by parallel uploading data onto the
27
+ // corresponding devices.
28
+ TORCH_API std::vector<BackendDataPtr> CreateTensorsData(
29
+ const std::vector<at::Tensor>& tensors,
30
+ const std::vector<BackendDevice>& devices);
31
+
32
+ // Makes a deep copy of an ATEN tensor.
33
+ inline at::Tensor CopyTensor(const at::Tensor& ref) {
34
+ return ref.to(ref.options(), /*non_blocking=*/false, /*copy=*/true);
35
+ }
36
+
37
+ // Same as above, with an additional cast.
38
+ inline at::Tensor CopyTensor(
39
+ const at::Tensor& ref,
40
+ at::ScalarType dest_type,
41
+ bool copy = true) {
42
+ return ref.to(ref.options().dtype(dest_type), /*non_blocking=*/false, copy);
43
+ }
44
+
45
+ template <typename T, typename S>
46
+ T OptionalOr(const c10::optional<S>& value, T defval) {
47
+ return value ? static_cast<T>(*value) : defval;
48
+ }
49
+
50
+ // Unwraps tensor to target dtype if it's a wrapped number.
51
+ inline at::Tensor UnwrapNumber(const at::Tensor& tensor, at::ScalarType dtype) {
52
+ return tensor.unsafeGetTensorImpl()->is_wrapped_number() ? tensor.to(dtype)
53
+ : tensor;
54
+ }
55
+
56
+ template <typename T>
57
+ at::Scalar MakeIntScalar(T value) {
58
+ return at::Scalar(static_cast<int64_t>(value));
59
+ }
60
+
61
+ // Routing values to device data maximizes the changes for compilation cache
62
+ // hits, but it can prevent the compiler to perform optimizations. So tensor
63
+ // values which are within a given set, are routed to constant scalars if this
64
+ // API returns true.
65
+ TORCH_API bool IsSpecialScalar(const at::Scalar& value);
66
+
67
+ // Note: returns a reference instead of a fresh tensor to avoid refcount bumps.
68
+ inline const at::Tensor& maybe_unwrap_functional(const at::Tensor& tensor) {
69
+ if (at::functionalization::impl::isFunctionalTensor(tensor)) {
70
+ return at::functionalization::impl::unsafeGetFunctionalWrapper(tensor)
71
+ ->value();
72
+ } else {
73
+ return tensor;
74
+ }
75
+ }
76
+
77
+ } // namespace lazy
78
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/trie.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <atomic>
4
+ #include <list>
5
+
6
+ #include <c10/core/ScalarType.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/metrics.h>
9
+
10
+ namespace torch {
11
+ namespace lazy {
12
+
13
+ struct TORCH_API TrieNode {
14
+ static size_t GetNextUniqueId() {
15
+ static thread_local size_t id_generator = 0;
16
+ return id_generator++;
17
+ }
18
+
19
+ size_t unique_id;
20
+ size_t hit_counter;
21
+ NodePtr ir_node;
22
+ std::list<std::shared_ptr<TrieNode>> successors;
23
+
24
+ TrieNode() : unique_id(GetNextUniqueId()), hit_counter(0), ir_node(nullptr) {}
25
+ explicit TrieNode(NodePtr node)
26
+ : unique_id(GetNextUniqueId()),
27
+ hit_counter(0),
28
+ ir_node(std::move(node)) {}
29
+ };
30
+
31
+ class TORCH_API TrieCache {
32
+ public:
33
+ static TrieCache* Get();
34
+
35
+ TrieNode* Current() const;
36
+ // Take an iterator as the input because we want to move the corresponding
37
+ // node in the successor list to achieve a LRU caching effect
38
+ void SetCurrent(std::list<std::shared_ptr<TrieNode>>::iterator& iter);
39
+ // Used in MarkStep to indicate the end of one tracing
40
+ void ResetCurrent();
41
+
42
+ // Create a new TrieNode for ir_node and insert into the TrieCache
43
+ void Insert(NodePtr ir_node);
44
+
45
+ // Clear all TrieCache nodes
46
+ // TODO: Because we don't expect user to explicitly call this function via
47
+ // a Python API, we may need to introduce a threshold on the size of the cache
48
+ // to avoid holding tensors for too long.
49
+ void Clear();
50
+
51
+ void DumpToDotFile(const std::string& file_name);
52
+
53
+ private:
54
+ TrieCache();
55
+
56
+ std::shared_ptr<TrieNode> root_;
57
+ TrieNode* current_;
58
+ };
59
+
60
+ template <typename T, typename... Args>
61
+ NodePtr LookupNodeFromTrieCache(Args&&... args) {
62
+ auto& successors = TrieCache::Get()->Current()->successors;
63
+ for (auto it = successors.begin(); it != successors.end(); it++) {
64
+ NodePtr ir_node = (*it)->ir_node;
65
+ const T* concrete_node = NodeCast<T>(ir_node.get());
66
+ if (concrete_node &&
67
+ concrete_node->CanBeReused(std::forward<Args>(args)...)) {
68
+ TORCH_LAZY_COUNTER(
69
+ "IrNodeReused_" + c10::demangle((typeid(T).name())), 1);
70
+ (*it)->hit_counter++;
71
+ TrieCache::Get()->SetCurrent(it);
72
+ return ir_node;
73
+ }
74
+ }
75
+ return nullptr;
76
+ }
77
+
78
+ } // namespace lazy
79
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Unique in this file is adapted from PyTorch/XLA
3
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/unique.h
4
+ */
5
+
6
+ #pragma once
7
+
8
+ #include <c10/util/Optional.h>
9
+
10
+ #include <functional>
11
+ #include <set>
12
+
13
+ namespace torch {
14
+ namespace lazy {
15
+
16
+ // Helper class to allow tracking zero or more things, which should be forcibly
17
+ // be one only thing.
18
+ template <typename T, typename C = std::equal_to<T>>
19
+ class Unique {
20
+ public:
21
+ std::pair<bool, const T&> set(const T& value) {
22
+ if (value_) {
23
+ TORCH_CHECK(C()(*value_, value), "'", *value_, "' vs '", value);
24
+ return std::pair<bool, const T&>(false, *value_);
25
+ }
26
+ value_ = value;
27
+ return std::pair<bool, const T&>(true, *value_);
28
+ }
29
+
30
+ operator bool() const {
31
+ return value_.has_value();
32
+ }
33
+ operator const T&() const {
34
+ return *value_;
35
+ }
36
+ const T& operator*() const {
37
+ return *value_;
38
+ }
39
+ const T* operator->() const {
40
+ return value_.operator->();
41
+ }
42
+
43
+ std::set<T> AsSet() const {
44
+ std::set<T> vset;
45
+ if (value_.has_value()) {
46
+ vset.insert(*value_);
47
+ }
48
+ return vset;
49
+ }
50
+
51
+ private:
52
+ c10::optional<T> value_;
53
+ };
54
+
55
+ } // namespace lazy
56
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Optional.h>
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/lazy/core/ir_metadata.h>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace lazy {
9
+
10
+ c10::optional<SourceLocation> TORCH_PYTHON_API GetPythonFrameTop();
11
+
12
+ std::vector<SourceLocation> TORCH_PYTHON_API GetPythonFrames();
13
+
14
+ } // namespace lazy
15
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/util/Flags.h>
3
+
4
+ // TODO(whc) unclear if this is useful, has only been tested as true
5
+ C10_DECLARE_bool(torch_lazy_ts_tensor_update_sync);
6
+
7
+ C10_DECLARE_bool(torch_lazy_ts_cuda);
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/dynamic_ir.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/symbol.h>
4
+
5
+ #include <functional>
6
+ #include <memory>
7
+ #include <set>
8
+ #include <string>
9
+ #include <unordered_map>
10
+ #include <unordered_set>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ #include <c10/core/ScalarType.h>
15
+ #include <c10/util/Flags.h>
16
+ #include <torch/csrc/lazy/core/dynamic_ir.h>
17
+ #include <torch/csrc/lazy/core/hash.h>
18
+ #include <torch/csrc/lazy/core/ir.h>
19
+ #include <torch/csrc/lazy/core/ir_metadata.h>
20
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
21
+
22
+ C10_DECLARE_bool(ltc_enable_dynamic_shapes);
23
+
24
+ namespace torch {
25
+ namespace lazy {
26
+
27
+ /**
28
+ * The goal of "dynamic" Nodes is to patch a hole in our tracing.
29
+ * Previously, if a user called `sizes` on a Tensor, it would leak out
30
+ * of our tracing system, as `sizes` returns a torch.Size or an int. To
31
+ * prevent this from happening, we introduce DimensionNode, a new type
32
+ * of Node that abstracts the operation of getting the dimensions of a
33
+ * Tensor.
34
+ *
35
+ * Consider the following example:
36
+ * ```
37
+ * numel = x.shape()[0] * x.shape()[1]
38
+ * ```
39
+ *
40
+ * Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
41
+ * and the multiplication of the two SizeNodes will be represented by
42
+ * a SizeMul (also a subclass of DimensionNode). Through this, we can
43
+ * prevent `numel` from being represented as a Python int and thus
44
+ * burned into the Graph.
45
+ */
46
+
47
+ // Represents the result of calling `size` on a Tensor
48
+ class TORCH_API SizeNode : public TsNode, public DimensionNode {
49
+ public:
50
+ SizeNode(Value input, size_t dim);
51
+ int64_t getStaticValue() const override;
52
+ bool isSymbolic() const override;
53
+ std::string ToString() const override;
54
+ size_t dim_ = 0;
55
+ torch::lazy::TSOpVector Lower(
56
+ std::shared_ptr<torch::jit::GraphFunction> function,
57
+ TSLoweringContext* loctx) const override;
58
+ };
59
+
60
+ class TORCH_API SizeAdd : public TsNode, public DimensionNode {
61
+ public:
62
+ SizeAdd(Value a, Value b);
63
+ int64_t getStaticValue() const override;
64
+ bool isSymbolic() const override;
65
+ std::string ToString() const override;
66
+ };
67
+
68
+ class TORCH_API SizeMul : public TsNode, public DimensionNode {
69
+ public:
70
+ SizeMul(Value a, Value b);
71
+ int64_t getStaticValue() const override;
72
+ bool isSymbolic() const override;
73
+ std::string ToString() const override;
74
+ };
75
+
76
+ class TORCH_API SizeDiv : public TsNode, public DimensionNode {
77
+ public:
78
+ SizeDiv(Value a, Value b);
79
+ int64_t getStaticValue() const override;
80
+ bool isSymbolic() const override;
81
+ std::string ToString() const override;
82
+ };
83
+
84
+ } // namespace lazy
85
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/internal_ops/ltc_ops.h>
4
+ #include <torch/csrc/lazy/core/ir.h>
5
+ #include <torch/csrc/lazy/core/ir_builder.h>
6
+ #include <torch/csrc/lazy/core/shape_inference.h>
7
+ #include <torch/csrc/lazy/generated/LazyNonNativeIr.h>
8
+ #include <torch/csrc/lazy/ts_backend/dynamic_ir.h>
9
+ #include <torch/csrc/lazy/ts_backend/ops/device_data.h>
10
+ #include <torch/csrc/lazy/ts_backend/ops/generic.h>
11
+ #include <torch/csrc/lazy/ts_backend/ts_node.h>
12
+
13
+ namespace torch {
14
+ namespace lazy {
15
+
16
+ struct TorchScriptIrBuilder : IrBuilder {
17
+ NodePtr MakeDeviceData(
18
+ const std::shared_ptr<BackendData>& data) const override {
19
+ return DeviceData::Create(data);
20
+ }
21
+ // TODO: Scalar node is not currently used by ts_backend. Enable reusing
22
+ // Scalar node later if needed.
23
+ NodePtr MakeScalar(const at::Scalar& value, const at::ScalarType& type)
24
+ const override {
25
+ return MakeNode<Scalar>(value, type);
26
+ }
27
+ NodePtr MakeExpand(
28
+ const Value& input0,
29
+ const std::vector<int64_t>& size,
30
+ const bool& is_scalar_expand) const override {
31
+ return ReuseOrMakeNode<Expand>(input0, size, is_scalar_expand);
32
+ }
33
+ NodePtr MakeCast(
34
+ const Value& input0,
35
+ const at::ScalarType& dtype,
36
+ const c10::optional<at::ScalarType>& stype =
37
+ c10::nullopt) const override {
38
+ return ReuseOrMakeNode<Cast>(input0, dtype, stype);
39
+ }
40
+ NodePtr MakeTensorList(const OpList& inputs) const override {
41
+ return ReuseOrMakeNode<TensorList>(inputs);
42
+ }
43
+ // Generic needs cleanup
44
+ NodePtr MakeGeneric(
45
+ const OpKind& op,
46
+ const OpList& operands,
47
+ const Shape& shape,
48
+ const size_t& num_outputs = 1,
49
+ const hash_t& hash_seed =
50
+ static_cast<uint32_t>(0x5a2d296e9)) const override {
51
+ return MakeNode<Generic>(op, operands, shape, num_outputs, hash_seed);
52
+ }
53
+
54
+ // dynamic ir nodes
55
+ // TODO: verify if IR node reusing works for Dynamic shape ops
56
+ NodePtr MakeSizeNode(const Value& input, size_t dim) const override {
57
+ return MakeNode<SizeNode>(input, dim);
58
+ }
59
+ NodePtr MakeSizeAdd(const Value& a, const Value& b) const override {
60
+ return MakeNode<SizeAdd>(a, b);
61
+ }
62
+ NodePtr MakeSizeMul(const Value& a, const Value& b) const override {
63
+ return MakeNode<SizeMul>(a, b);
64
+ }
65
+ NodePtr MakeSizeDiv(const Value& a, const Value& b) const override {
66
+ return MakeNode<SizeDiv>(a, b);
67
+ }
68
+ };
69
+
70
+ } // namespace lazy
71
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/core/tensor.h>
4
+
5
+ namespace torch {
6
+ namespace lazy {
7
+
8
+ //////////////////////////////////////////////////////////////////////////////
9
+ // ATEN operators follows here, listed in alphabetical order.
10
+ //////////////////////////////////////////////////////////////////////////////
11
+
12
+ void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src);
13
+ // Fills the input with the given value.
14
+ void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value);
15
+
16
+ } // namespace lazy
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/custom_function.h>
4
+
5
+ namespace torch {
6
+ namespace lazy {
7
+
8
+ struct MaxPool3dAutogradFunctionTS
9
+ : public torch::autograd::Function<MaxPool3dAutogradFunctionTS> {
10
+ static at::Tensor forward(
11
+ torch::autograd::AutogradContext* ctx,
12
+ at::Tensor self,
13
+ at::IntArrayRef kernel_size,
14
+ at::IntArrayRef stride,
15
+ at::IntArrayRef padding,
16
+ at::IntArrayRef dilation,
17
+ bool ceil_mode);
18
+ static torch::autograd::variable_list backward(
19
+ torch::autograd::AutogradContext* ctx,
20
+ torch::autograd::variable_list grad_output);
21
+ };
22
+
23
+ } // namespace lazy
24
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_backend_impl.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/lazy/backend/backend_interface.h>
4
+
5
+ namespace torch {
6
+ namespace lazy {
7
+
8
+ class TORCH_API TSData : public torch::lazy::BackendData {
9
+ public:
10
+ TSData(const at::Scalar& scalar, const torch::lazy::BackendDevice& device)
11
+ : torch::lazy::BackendData(device, torch::lazy::Shape(scalar.type(), {})),
12
+ scalar(scalar) {}
13
+
14
+ TSData(
15
+ const at::Tensor& data,
16
+ const torch::lazy::Shape& shape,
17
+ const torch::lazy::BackendDevice& device)
18
+ : torch::lazy::BackendData(device, shape), data_(data) {}
19
+
20
+ TSData(
21
+ const torch::lazy::Shape& shape,
22
+ const torch::lazy::BackendDevice& device)
23
+ : torch::lazy::BackendData(device, shape) {}
24
+
25
+ Handle GetHandle() override {
26
+ return reinterpret_cast<int64_t>(this);
27
+ }
28
+
29
+ void Assign(const torch::lazy::BackendData& data) override {
30
+ data_ = static_cast<const TSData&>(data).data_;
31
+ }
32
+
33
+ bool HasValue() const override {
34
+ return data_.defined();
35
+ }
36
+
37
+ at::Tensor data() {
38
+ return data_;
39
+ }
40
+
41
+ c10::optional<at::Scalar> scalar;
42
+
43
+ private:
44
+ at::Tensor data_;
45
+ };
46
+
47
+ TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl();
48
+
49
+ TORCH_API void InitTorchScriptBackend();
50
+
51
+ } // namespace lazy
52
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/dispatch/Dispatcher.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <functional>
7
+
8
+ namespace torch {
9
+ namespace lazy {
10
+
11
+ bool force_eager_fallback(c10::Symbol op);
12
+ void ltc_eager_fallback(
13
+ const c10::OperatorHandle& op,
14
+ torch::jit::Stack* stack);
15
+
16
+ void ts_eager_fallback(
17
+ const c10::OperatorHandle& op,
18
+ torch::jit::Stack* stack,
19
+ c10::DeviceType device_type);
20
+
21
+ // The TorchScript backend does not register itself with pytorch dispatcher
22
+ // until it is explicitly initialized. This function should only be called
23
+ // by the main Torchscript backend init function.
24
+ void register_ts_ltc_eager_fallback();
25
+
26
+ } // namespace lazy
27
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_lowering_context.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <sstream>
4
+
5
+ #include <torch/csrc/api/include/torch/jit.h>
6
+ #include <torch/csrc/jit/runtime/graph_executor.h>
7
+ #include <torch/csrc/lazy/backend/lowering_context.h>
8
+ #include <torch/csrc/lazy/core/ir.h>
9
+ #include <torch/csrc/lazy/ts_backend/ts_node_lowering.h>
10
+
11
+ namespace torch {
12
+ namespace lazy {
13
+
14
+ using TSOpVector = std::vector<torch::jit::Value*>;
15
+
16
+ class TORCH_API TSComputation : public Computation {
17
+ public:
18
+ TSComputation(const std::shared_ptr<torch::jit::Graph>& graph)
19
+ : graph_(graph), graph_executor_(graph, "") {
20
+ for (torch::jit::Value* input : graph_->inputs()) {
21
+ parameter_names_.push_back(input->debugName());
22
+ }
23
+ }
24
+
25
+ int parameters_size() const override {
26
+ return parameter_names_.size();
27
+ }
28
+
29
+ const std::vector<Shape>& parameter_shapes() const override {
30
+ throw std::runtime_error(
31
+ "TODO(whc) implement TS computation shapes or change interface");
32
+ return parameter_shapes_;
33
+ }
34
+
35
+ const std::vector<std::string>& parameter_names() const override {
36
+ return parameter_names_;
37
+ }
38
+
39
+ const Shape& result_shape() const override {
40
+ throw std::runtime_error(
41
+ "TODO(whc) implement TS computation shapes or change interface");
42
+ return result_shape_;
43
+ }
44
+
45
+ const std::string to_string() const override {
46
+ std::ostringstream oss;
47
+ oss << *graph_;
48
+ return oss.str();
49
+ }
50
+
51
+ std::shared_ptr<torch::jit::Graph> graph() const {
52
+ return graph_;
53
+ }
54
+
55
+ torch::jit::GraphExecutor& graph_executor() {
56
+ return graph_executor_;
57
+ }
58
+
59
+ private:
60
+ std::shared_ptr<torch::jit::Graph> graph_;
61
+ torch::jit::GraphExecutor graph_executor_;
62
+ std::vector<std::string> parameter_names_;
63
+ std::vector<Shape> parameter_shapes_;
64
+ Shape result_shape_;
65
+ };
66
+
67
+ class TORCH_API TSLoweringContext : public LoweringContext {
68
+ public:
69
+ TSLoweringContext(const std::string& name, const BackendDevice device);
70
+
71
+ TSLoweringContext(
72
+ const std::string& name,
73
+ BackendDevice device,
74
+ c10::ArrayRef<const Node*> post_order,
75
+ Util::EmissionMap emit_status);
76
+
77
+ size_t AddResult(const Output& output) override {
78
+ return AddResult(GetOutputOp(output));
79
+ }
80
+
81
+ void AddParameter(
82
+ const torch::lazy::Output& output,
83
+ size_t index,
84
+ const Shape& shape,
85
+ const std::string& name) override {
86
+ TORCH_INTERNAL_ASSERT(false, "not implemented");
87
+ }
88
+
89
+ void Lower(const Node* node);
90
+
91
+ ComputationPtr Build() override {
92
+ for (torch::jit::Value* output : root_tuple_) {
93
+ graph_->block()->registerOutput(output);
94
+ }
95
+ return std::shared_ptr<Computation>(new TSComputation(graph_));
96
+ }
97
+
98
+ // Retrieves the lowered operation for an output. If the requested output is
99
+ // not available yet, the graph behind the output's Node is lowered, and the
100
+ // corresponding TS operation returned.
101
+ torch::jit::Value* GetOutputOp(const Output& output) {
102
+ auto it = emitted_outputs_.find(output);
103
+ if (it == emitted_outputs_.end()) {
104
+ auto post_order = Util::ComputePostOrder(output.node, &emit_status_);
105
+ for (auto node : post_order) {
106
+ Lower(node);
107
+ }
108
+ // At this point the output better be present, otherwise there is an issue
109
+ // with the lowering code.
110
+ it = emitted_outputs_.find(output);
111
+ TORCH_CHECK(
112
+ it != emitted_outputs_.end(),
113
+ "No TS operation emitted for output: ",
114
+ output.ToString());
115
+ }
116
+ return it->second;
117
+ }
118
+
119
+ // Assigns the given TS operation to the specified output. As outputs are
120
+ // lowered in a post-order fashion, later nodes should always find their
121
+ // operands among the emitted outputs.
122
+ void AssignOutputOp(const Output& output, torch::jit::Value* op);
123
+
124
+ // If a parameter associated with data has already been declared, it will be
125
+ // returned. Otherwise a new one will be created, associated with the tensor
126
+ // held in data.
127
+ torch::jit::Value* GetParameter(BackendDataPtr data);
128
+
129
+ std::shared_ptr<torch::jit::Graph> graph() const {
130
+ return graph_;
131
+ }
132
+
133
+ private:
134
+ struct Parameter {
135
+ torch::jit::Value* param{nullptr};
136
+ size_t index = 0;
137
+ };
138
+
139
+ size_t AddResult(torch::jit::Value* op) {
140
+ root_tuple_.push_back(std::move(op));
141
+ return root_tuple_.size() - 1;
142
+ }
143
+
144
+ std::shared_ptr<torch::jit::Graph> graph_;
145
+ std::shared_ptr<torch::jit::GraphFunction> function_;
146
+ std::unordered_map<BackendData::Handle, Parameter> parameters_map_;
147
+ std::vector<torch::jit::Value*> root_tuple_;
148
+ OutputMap<torch::jit::Value*> emitted_outputs_;
149
+ };
150
+
151
+ } // namespace lazy
152
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <torch/csrc/jit/api/function_impl.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/lazy/backend/lowering_context.h>
7
+ #include <torch/csrc/lazy/core/ir.h>
8
+ #include <torch/csrc/lazy/core/shape.h>
9
+ #include <torch/csrc/lazy/ts_backend/ts_lowering_context.h>
10
+
11
+ namespace torch {
12
+ namespace lazy {
13
+
14
+ using TSOpVector = std::vector<torch::jit::Value*>;
15
+
16
+ class TORCH_API TsNode : public lazy::Node {
17
+ public:
18
+ TsNode(
19
+ OpKind op,
20
+ OpList operands,
21
+ std::vector<Shape>&& shapes,
22
+ size_t num_outputs,
23
+ hash_t hash_seed = kHashSeed);
24
+
25
+ TsNode(
26
+ OpKind op,
27
+ OpList operands,
28
+ const std::function<Shape()>& shape_fn,
29
+ size_t num_outputs,
30
+ hash_t hash_seed = kHashSeed);
31
+
32
+ TsNode(
33
+ OpKind op,
34
+ OpList operands,
35
+ size_t num_outputs,
36
+ hash_t hash_seed = kHashSeed);
37
+
38
+ TsNode(
39
+ OpKind op,
40
+ Shape shape,
41
+ size_t num_outputs,
42
+ hash_t hash_seed = kHashSeed);
43
+
44
+ ~TsNode() override = default;
45
+
46
+ hash_t hash() const override;
47
+
48
+ hash_t shapeHash() const override;
49
+
50
+ const std::string getPythonStacktrace() const;
51
+
52
+ // Lower is a backend-specific method since it returns a backend specific
53
+ // type. hence, it is convenient to define it differently per-backend rather
54
+ // than at Node API
55
+ virtual TSOpVector Lower(
56
+ std::shared_ptr<torch::jit::GraphFunction> function,
57
+ TSLoweringContext* loctx) const;
58
+
59
+ private:
60
+ // The hash of the dag WITH size info. Used for shape caching
61
+ hash_t shape_hash_;
62
+ // The hash of the dag used to look up the compiled graph by a hash
63
+ // in this case, we will use the dag hash WITHOUT size info if dynamic shape
64
+ // is enabled and use the dag hash WITH size info otherwise.
65
+ hash_t dag_hash_;
66
+ };
67
+
68
+ // Note: this OpKind is separate from ltc_ops.h since it would be a circular
69
+ // import otherwise, I like leaving TensorList in this file, and I think most of
70
+ // ltc_ops special cases will be deleted anyway
71
+ const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list");
72
+
73
+ // TensorList represents an at::TensorList which is a vector[Tensor] but is also
74
+ // a first-class IValue and can be fed as a single input to a TS program. It is
75
+ // much easier to handle TensorLists in Lazy Tensor code if they are represented
76
+ // as a single Node so there can be more than one TensorList and more than one
77
+ // Tensor side-by-side as operands to an op.
78
+ //
79
+ // Note: shape is undefined for TensorList. We assert in some places that
80
+ // #shapes matches #outputs and this stems from
81
+ // the fact that currently all IR nodes represent tensors (there is no
82
+ // type system for this IR). Becuase of this, TensorList is a bit of a
83
+ // hack.
84
+ //
85
+ // TODO(whc) once Shape() API is moved to Node base, also make it virtual, and
86
+ // then implement it as NotImplemented for TensorList, also fixing the assertion
87
+ // that would fail.
88
+ struct TORCH_API TensorList : public TsNode {
89
+ static OpKind ClassOpKind() {
90
+ return tensor_list_opkind;
91
+ }
92
+
93
+ TensorList() = delete;
94
+ TensorList(OpList values);
95
+
96
+ bool CanBeReused(OpList values) const {
97
+ return operands() == std::vector<Output>(values.begin(), values.end());
98
+ }
99
+
100
+ TSOpVector Lower(
101
+ std::shared_ptr<torch::jit::GraphFunction> function,
102
+ TSLoweringContext* loctx) const override;
103
+ };
104
+
105
+ } // namespace lazy
106
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node_lowering.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/api/include/torch/jit.h>
4
+ #include <torch/csrc/lazy/backend/lowering_context.h>
5
+
6
+ namespace torch {
7
+ namespace lazy {
8
+ using TSOpVector = std::vector<torch::jit::Value*>;
9
+
10
+ TORCH_API TSOpVector LowerTSBuiltin(
11
+ std::shared_ptr<torch::jit::GraphFunction> function,
12
+ c10::Symbol sym,
13
+ const std::vector<torch::jit::NamedValue>& arguments,
14
+ const std::vector<torch::jit::NamedValue>& kwarguments = {});
15
+
16
+ } // namespace lazy
17
+ } // namespace torch
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <onnx/onnx_pb.h>
4
+
5
+ namespace torch::onnx {
6
+
7
+ // The following constants are defined here to avoid breaking Meta's internal
8
+ // usage of ONNX which pre-dates ONNX 1.14 and thus does not support FLOAT8:
9
+ // cf. https://github.com/pytorch/pytorch/pull/106379#issuecomment-1675189340
10
+ // -abock, 2023-08-25
11
+ //
12
+ // ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FN
13
+ constexpr auto TensorProto_DataType_FLOAT8E4M3FN =
14
+ static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(17);
15
+ // ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2
16
+ constexpr auto TensorProto_DataType_FLOAT8E5M2 =
17
+ static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(19);
18
+
19
+ } // namespace torch::onnx