applied-ai-018 commited on
Commit
a8e9d15
·
verified ·
1 Parent(s): d1645b2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/20.input_layernorm.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/distributed/_tools/__init__.py +1 -0
  6. venv/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/memory_tracker.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py +299 -0
  9. venv/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py +3 -0
  10. venv/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/join.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__init__.py +0 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/checkpoint_wrapper.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py +314 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__init__.py +7 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/default_hooks.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/default_hooks.py +165 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__init__.py +1 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/optimizer_overlap.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py +93 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__init__.py +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/quantization.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/quantization.py +144 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__init__.py +108 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/__init__.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/ddp_zero_hook.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/debugging_hooks.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/default_hooks.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/mixed_precision_hooks.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/optimizer_overlap_hooks.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/post_localSGD_hook.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/powerSGD_hook.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/quantization_hooks.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py +448 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py +28 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py +223 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py +85 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py +154 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py +123 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py +850 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py +217 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/algorithms/join.py +346 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__init__.py +0 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/averagers.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/hierarchical_model_averager.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/20.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63db8a3cd200ba4dd876246f8fe90ffb4f5e9f1ee6db5fe738ec98d4a5e57523
3
+ size 9387
ckpts/universal/global_step120/zero/20.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26e75ec14cd60a4213ce1da745672946a05f4bed075a13fd51e780ba0fbf28b
3
+ size 9293
ckpts/universal/global_step120/zero/26.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fde349e0c9ed2d2a1990482ae5ae95bd8009781d5bb0cc2d47cb60a39568f4e
3
+ size 16778411
ckpts/universal/global_step120/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:032dcf9214e9c983a0a7069822758e969a97683a8d4d3ef04089546f27ffdf19
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/distributed/_tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .memory_tracker import MemoryTracker
venv/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (244 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/memory_tracker.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tools/memory_tracker.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from itertools import chain
4
+
5
+ import pickle
6
+
7
+ from typing import (
8
+ Any,
9
+ Callable,
10
+ Dict,
11
+ List,
12
+ no_type_check,
13
+ Sequence,
14
+ )
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ from torch.utils.hooks import RemovableHandle
19
+ from torch.utils._python_dispatch import TorchDispatchMode
20
+
21
+
22
+ BYTES_PER_MB = 1024 * 1024.0
23
+
24
+
25
+ class MemoryProfileDispatchMode(TorchDispatchMode):
26
+ """Run in ``TorchDispatchMode`` to get memory stats at operator level."""
27
+
28
+ def __init__(self, memory_tracker) -> None:
29
+ self.memory_tracker = memory_tracker
30
+
31
+ def __torch_dispatch__(self, func, types, args=..., kwargs=None):
32
+ rs = func(*args, **kwargs)
33
+ if func == torch.ops.aten.detach.default:
34
+ return rs
35
+ func_name: str = (
36
+ self.memory_tracker._cur_module_name
37
+ + "."
38
+ + func.__name__
39
+ + "_"
40
+ + str(self.memory_tracker._operator_names[func.__name__])
41
+ )
42
+ self.memory_tracker._operator_names[func.__name__] = (
43
+ self.memory_tracker._operator_names[func.__name__] + 1
44
+ )
45
+ self.memory_tracker._record_memory_stats(func_name)
46
+
47
+ return rs
48
+
49
+
50
+ class MemoryTracker:
51
+ """
52
+ Collect and plot the memory stats at operator level.
53
+
54
+ Includes ``memories_allocated``, ``memories_active`` and ``memories_reserved``.
55
+ It also prints a summary for the top 20 operators that generate the most memories.
56
+
57
+ Example usage:
58
+
59
+ >>> # xdoctest: +SKIP(failing)
60
+ >>> net.cuda()
61
+ >>> input = input.cuda()
62
+
63
+ >>> mem_tracker = MemoryTracker()
64
+ >>> mem_tracker.start_monitor(net)
65
+
66
+ >>> net.zero_grad(True)
67
+ >>> loss = net(input)
68
+ >>> if isinstance(loss, dict):
69
+ >>> loss = loss['out']
70
+ >>> loss.sum().backward()
71
+ >>> net.zero_grad(set_to_none=True)
72
+
73
+ >>> mem_tracker.stop()
74
+ >>> mem_tracker.summary()
75
+ >>> mem_tracker.show_traces()
76
+ """
77
+
78
+ def __init__(self) -> None:
79
+ torch._C._log_api_usage_once("torch.distributed.memory_tracker")
80
+ self._hooks: List[RemovableHandle] = []
81
+ self._operator_names: Dict[str, int] = defaultdict(int)
82
+ self.memories_allocated: Dict[int, Dict[str, float]] = defaultdict()
83
+ self.memories_active: Dict[int, Dict[str, float]] = defaultdict()
84
+ self.memories_reserved: Dict[int, Dict[str, float]] = defaultdict()
85
+ self._markers: Dict[str, int] = defaultdict(int)
86
+ self._cur_module_name: str = ""
87
+ self._op_index: int = 0
88
+ self._num_cuda_retries: int = 0
89
+
90
+ @no_type_check
91
+ def start_monitor(self, root_module: nn.Module) -> None:
92
+ """
93
+ Register module hooks and entering ``MemoryProfileDispatchMode``.
94
+
95
+ This enables operator level memory stats can be tracked during module runtime.
96
+ """
97
+ self._clear_state()
98
+ root_module.__setattr__("_memory_tracker_is_root", True)
99
+ for name, m in root_module.named_modules():
100
+ if m is not root_module:
101
+ m.__setattr__("_memory_tracker_is_root", False)
102
+ # fused_proxy_group does not support hooks
103
+ if ".fused_proxy_grouped_embedding_bag" in name:
104
+ continue
105
+ # hook ordering with other hooks added by users is not managed, so
106
+ # the memory stats tracked here may not completely accurate.
107
+ h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))
108
+ h2 = m.register_forward_hook(self._create_post_forward_hook(name))
109
+ # it does not work well with jagged tensor somehow, the root cause is not
110
+ # clear and remove it for now as it does not really capture important info.
111
+ # h3 = m.register_backward_hook(self._create_backward_hook(name))
112
+ self._hooks.extend([h1, h2])
113
+ torch.cuda.empty_cache()
114
+ assert getattr(self, "profile_mode", None) is None
115
+ self.profile_mode = MemoryProfileDispatchMode(self)
116
+ self.profile_mode.__enter__()
117
+
118
+ @no_type_check
119
+ def stop(self) -> None:
120
+ """
121
+ Remove module hooks and exit ``MemoryProfileDispatchMode`` to stop tracking memory stats at operator level.
122
+
123
+ Get some aggregated stats when the memory_tracker() is enabled, like cuda ``num_alloc_retries``.
124
+ """
125
+ self._num_cuda_retries = torch.cuda.memory_stats().get("num_alloc_retries", 0)
126
+
127
+ for h in self._hooks:
128
+ h.remove()
129
+ self._hooks.clear()
130
+ assert getattr(self, "profile_mode", None) is not None
131
+ self.profile_mode.__exit__(None, None, None)
132
+ self.profile_mode = None
133
+
134
+ @no_type_check
135
+ def summary(self, top: int = 20) -> None:
136
+ """
137
+ Print out the top operators that generate the most memories.
138
+
139
+ The number of the top operators can be configured.
140
+ """
141
+ op_diff: Dict[str, float] = defaultdict(float)
142
+ op_name, previous_allocated_memory = self.memories_allocated[0]
143
+ for i in range(1, self._op_index):
144
+ op_name, current_allocated_memory = self.memories_allocated[i]
145
+ op_diff[op_name] = current_allocated_memory - previous_allocated_memory
146
+ previous_allocated_memory = current_allocated_memory
147
+
148
+ print("------------------------------------------------")
149
+ print(f"The number of cuda retries are: {self._num_cuda_retries}")
150
+ print(f"Top {top} ops that generates memory are:")
151
+ for k, v in sorted(op_diff.items(), key=lambda item: item[1], reverse=True)[
152
+ :top
153
+ ]:
154
+ print(f"{k}: {v}MB")
155
+ print("------------------------------------------------")
156
+
157
+ @no_type_check
158
+ def show_traces(self, path: str = "") -> None:
159
+ import matplotlib.pyplot as plt
160
+
161
+ def _plot_figure(x, y_values, labels):
162
+ min_val = min(list(chain(*y_values))) * 0.999
163
+ max_val = max(list(chain(*y_values))) * 1.001
164
+ plt.figure()
165
+ for y, label in zip(y_values, labels):
166
+ plt.plot(x, y, label=label)
167
+ plt.xlabel("# Operator Calls")
168
+ plt.ylabel("Memory (MB)")
169
+ plt.legend()
170
+ for marker_name, marker in self._markers.items():
171
+ if marker_name == "fw_bw_boundary":
172
+ plt.plot(
173
+ [marker, marker],
174
+ [min_val, max_val],
175
+ "r",
176
+ lw=2,
177
+ label=marker_name,
178
+ )
179
+ else:
180
+ plt.plot(
181
+ [marker, marker],
182
+ [min_val, max_val],
183
+ "k-",
184
+ lw=2,
185
+ label=marker_name,
186
+ )
187
+
188
+ if path != "":
189
+ self.load(path)
190
+
191
+ y_1 = [gb for (name, gb) in self.memories_allocated.values()]
192
+ y_2 = [gb for (name, gb) in self.memories_active.values()]
193
+ y_3 = [gb for (name, gb) in self.memories_reserved.values()]
194
+ x = list(range(len(y_1)))
195
+ # Split figures when there is big difference between
196
+ # "reserved_memory" and "allocated_memory" or "active_memory".
197
+ _plot_figure(
198
+ x,
199
+ [list(y_1), list(y_2), list(y_3)],
200
+ ["allocated_memory", "active_memory", "reserved_memory"],
201
+ )
202
+ _plot_figure(x, [list(y_1)], ["allocated_memory"])
203
+ _plot_figure(x, [list(y_2)], ["active_memory"])
204
+ _plot_figure(x, [list(y_3)], ["reserved_memory"])
205
+
206
+ def save_stats(self, path: str) -> None:
207
+ """Save the stats using pickle during runtime if users want to plot the traces in other places like notebook."""
208
+ stats = {
209
+ "memories_allocated": self.memories_allocated,
210
+ "memories_active": self.memories_active,
211
+ "memories_reserved": self.memories_reserved,
212
+ "markers": self._markers,
213
+ "num_alloc_retries": self._num_cuda_retries,
214
+ }
215
+
216
+ with open(path, "wb") as f:
217
+ pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
218
+
219
+ def load(self, path: str) -> None:
220
+ """Load the pickled memory stats to plot the traces or print the summary."""
221
+ with open(path, "rb") as f:
222
+ stats = pickle.load(f)
223
+
224
+ self.memories_allocated = stats["memories_allocated"]
225
+ self.memories_active = stats["memories_active"]
226
+ self.memories_reserved = stats["memories_reserved"]
227
+ self._markers = stats["markers"]
228
+ self._num_cuda_retries = stats["num_alloc_retries"]
229
+
230
+ def _create_pre_forward_hook(self, name: str) -> Callable:
231
+ """Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start."""
232
+ def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:
233
+ self._cur_module_name = f"{name}.forward"
234
+ if (
235
+ hasattr(module, "_memory_tracker_is_root")
236
+ and module._memory_tracker_is_root
237
+ ):
238
+ self._add_marker("fw_start")
239
+
240
+ return _pre_forward_hook
241
+
242
+ def _create_post_forward_hook(self, name: str) -> Callable:
243
+ """Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass."""
244
+
245
+ def _post_forward_hook(
246
+ module: nn.Module,
247
+ inputs: Sequence[torch.Tensor],
248
+ outputs: Sequence[torch.Tensor],
249
+ ) -> None:
250
+ if (
251
+ hasattr(module, "_memory_tracker_is_root")
252
+ and module._memory_tracker_is_root
253
+ ):
254
+ self._add_marker("fw_bw_boundary")
255
+
256
+ return _post_forward_hook
257
+
258
+ def _create_backward_hook(self, name: str) -> Callable:
259
+ """Insert the current module name with backward prefix for the operator name."""
260
+
261
+ def _backward_hook(
262
+ module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor
263
+ ) -> None:
264
+ self._cur_module_name = f"{name}.backward"
265
+
266
+ return _backward_hook
267
+
268
+ @no_type_check
269
+ def _record_memory_stats(self, fn_name: str) -> None:
270
+ """
271
+ Record current memory allocated, current memory active and current memory reserved.
272
+
273
+ The memory stats dict is indexed with ``self._op_index``.
274
+ """
275
+ memory_allocated: float = torch.cuda.memory_allocated() / BYTES_PER_MB
276
+ memory_reserved: float = torch.cuda.memory_reserved() / BYTES_PER_MB
277
+ memory_active: float = (
278
+ torch.cuda.memory_stats().get("active_bytes.all.current", 0) / BYTES_PER_MB
279
+ )
280
+ self.memories_allocated[self._op_index] = (fn_name, memory_allocated)
281
+ self.memories_reserved[self._op_index] = (fn_name, memory_reserved)
282
+ self.memories_active[self._op_index] = (fn_name, memory_active)
283
+ self._op_index += 1
284
+
285
+ def _add_marker(self, marker_name: str) -> None:
286
+ """Set the marker's x-axis value."""
287
+ marker_val = len(self.memories_allocated.values())
288
+ self._markers[marker_name] = marker_val
289
+
290
+ def _clear_state(self) -> None:
291
+ """Clear states when start_monitor() is called."""
292
+ self._operator_names.clear()
293
+ self.memories_allocated.clear()
294
+ self.memories_active.clear()
295
+ self.memories_reserved.clear()
296
+ self._markers.clear()
297
+ self._cur_module_name = ""
298
+ self._op_index = 0
299
+ self._num_cuda_retries = 0
venv/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .join import Join
2
+ from .join import Joinable
3
+ from .join import JoinHook
venv/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (291 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/join.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/checkpoint_wrapper.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from enum import auto, Enum
3
+ from functools import partial
4
+ from typing import Any, Callable, Dict, Iterator, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.autograd.graph import save_on_cpu
9
+ from torch.distributed.utils import _pack_kwargs, _replace_by_prefix, _unpack_kwargs
10
+ from torch.utils.checkpoint import checkpoint as torch_utils_checkpoint
11
+
12
+ _CHECKPOINT_WRAPPED_MODULE = "_checkpoint_wrapped_module"
13
+ _CHECKPOINT_PREFIX = _CHECKPOINT_WRAPPED_MODULE + "."
14
+
15
+
16
+ class CheckpointImpl(Enum):
17
+ REENTRANT = auto()
18
+ NO_REENTRANT = auto()
19
+
20
+
21
+ class ActivationWrapper(torch.nn.Module):
22
+ """
23
+ Base class for Activation Checkpoint and Activation Offload.
24
+
25
+ Not meant to be instantiated directly.
26
+ """
27
+
28
+ def __init__(self, mod):
29
+ super().__init__()
30
+ self._checkpoint_wrapped_module = mod
31
+ # state_dict post hook to remove prefix to allow loading into a
32
+ # non-checkpoint wrapped module.
33
+ self._register_state_dict_hook(self._post_state_dict_hook)
34
+ # load_state_dict pre-hook to allow loading back into
35
+ # checkpoint-wrapped module.
36
+ self._register_load_state_dict_pre_hook(
37
+ self._pre_load_state_dict_hook, with_module=True
38
+ )
39
+
40
+ def forward(self, *args, **kwargs):
41
+ raise ValueError("Subclasses should implement forward().")
42
+
43
+ def __getattr__(self, name: str) -> Any:
44
+ """Forward missing attributes to wrapped module."""
45
+ try:
46
+ return super().__getattr__(name) # defer to nn.Module's logic
47
+ except AttributeError:
48
+ return getattr(self._checkpoint_wrapped_module, name)
49
+
50
+ def __getitem__(self, key: int) -> Any:
51
+ """Forward indexing calls in case the module is a nn.Sequential."""
52
+ return self._checkpoint_wrapped_module.__getitem__(key) # type: ignore[operator]
53
+
54
+ def named_parameters(
55
+ self,
56
+ *args,
57
+ **kwargs,
58
+ ) -> Iterator[Tuple[str, torch.nn.Parameter]]:
59
+ """
60
+ Override :meth:`named_parameters()` to intercept parameter names.
61
+
62
+ remove all occurrences of ``_CHECKPOINT_PREFIX``.
63
+ """
64
+ for param_name, param in super().named_parameters(*args, **kwargs):
65
+ yield param_name.replace(_CHECKPOINT_PREFIX, ""), param
66
+
67
+ @staticmethod
68
+ def _post_state_dict_hook(
69
+ module: nn.Module,
70
+ state_dict: Dict[str, Any],
71
+ prefix: str,
72
+ *args: Any,
73
+ ) -> Dict[str, Any]:
74
+ """
75
+ _post_state_dict_hook() is called after the state_dict() of this FSDP module is executed.
76
+
77
+ For ``checkpoint_wrapper``, it will strip checkpoint-wrapped module prefix,
78
+ so that this module can be loaded into non-checkpointed modules.
79
+ It would still be able to be loaded into checkpoint-wrapped modules as this class,
80
+ adds the prefix back before loading the state_dict.
81
+ """
82
+ _replace_by_prefix(state_dict, f"{prefix}{_CHECKPOINT_PREFIX}", prefix)
83
+ return state_dict
84
+
85
+ @staticmethod
86
+ def _pre_load_state_dict_hook(
87
+ module: nn.Module,
88
+ state_dict: Dict[str, Any],
89
+ prefix: str,
90
+ *args: Any,
91
+ ) -> None:
92
+ """
93
+ ``_pre_state_dict_hook` is called before ``self._load_from_state_dict()`` is called.
94
+
95
+ For ``checkpoint_wrapper``, it will add back the module
96
+ prefix so that non-checkpointed modules can be loaded into
97
+ checkpoint_wrapper modules properly.
98
+ """
99
+ _replace_by_prefix(state_dict, prefix, prefix + f"{_CHECKPOINT_PREFIX}")
100
+
101
+
102
+ class OffloadWrapper(ActivationWrapper):
103
+ def __init__(self, mod):
104
+ super().__init__(mod)
105
+
106
+ def forward(self, *args, **kwargs):
107
+ with save_on_cpu(pin_memory=True):
108
+ return self._checkpoint_wrapped_module(*args, **kwargs)
109
+
110
+
111
+ class CheckpointWrapper(ActivationWrapper):
112
+ """
113
+ An ``nn.Module`` that wraps another ``nn.Module`` with checkpointing.
114
+
115
+ Note that this module is not meant to be used directly but instead,
116
+ it is to be used through the ``checkpoint_wrapper`` function.
117
+ """
118
+
119
+ def __init__(
120
+ self,
121
+ mod: torch.nn.Module,
122
+ checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT,
123
+ checkpoint_fn=None,
124
+ **checkpoint_fn_kwargs,
125
+ ):
126
+ super().__init__(mod)
127
+ self.checkpoint_impl = checkpoint_impl
128
+ if checkpoint_fn is None:
129
+ # use torch.utils.checkpoint
130
+ self.checkpoint_fn = partial(
131
+ torch_utils_checkpoint,
132
+ use_reentrant=(self.checkpoint_impl == CheckpointImpl.REENTRANT),
133
+ **checkpoint_fn_kwargs,
134
+ )
135
+ else:
136
+ # Construct user-specified checkpoint function.
137
+ self.checkpoint_fn = partial(
138
+ checkpoint_fn,
139
+ **checkpoint_fn_kwargs,
140
+ )
141
+
142
+ def forward(self, *args, **kwargs):
143
+ # Support keyword arguments for reentrant checkpoint. Note that this
144
+ # only works if user has specified self.checkpoint_impl and is not
145
+ # using their own custom checkpoint_fn.
146
+ if self.checkpoint_impl == CheckpointImpl.REENTRANT and kwargs != {}:
147
+ # Pack the args and kwargs
148
+ flat_args, kwarg_keys = _pack_kwargs(*args, **kwargs)
149
+
150
+ # Function that only takes (packed) args, but can unpack them
151
+ # into the original args and kwargs for the checkpointed
152
+ # function, and runs that function.
153
+ def my_function(*inputs):
154
+ # unpack back into args and kwargs
155
+ unpacked_args, unpacked_kwargs = _unpack_kwargs(inputs, kwarg_keys)
156
+ # run original module
157
+ return self._checkpoint_wrapped_module(
158
+ *unpacked_args, **unpacked_kwargs
159
+ )
160
+
161
+ # Pass the function that only takes packed args into reentrant
162
+ # checkpoint API.
163
+ return self.checkpoint_fn( # type: ignore[misc]
164
+ my_function,
165
+ *flat_args,
166
+ )
167
+ else:
168
+ return self.checkpoint_fn( # type: ignore[misc]
169
+ self._checkpoint_wrapped_module, *args, **kwargs
170
+ )
171
+
172
+
173
+ def offload_wrapper(module: torch.nn.Module) -> torch.nn.Module:
174
+ """
175
+ Wrap a module for activation offloading to CPU.
176
+
177
+ Offloads intermediate activations to the CPU for modules wrapped with this function.
178
+ Wrappers with activation offload can be composed with ones that do recomputation-based
179
+ checkpoint to trade off increased compute versus increased CPU
180
+ memory usage and additional H2D transfers.
181
+
182
+ Usage::
183
+ offloaded_module = offload_wrapper(module)
184
+ outputs = checkpointed_module(inputs)
185
+ Args:
186
+ module (nn.Module):
187
+ The module to be wrapped
188
+ Returns:
189
+ (nn.Module):
190
+ Wrapped module
191
+ """
192
+ return OffloadWrapper(module)
193
+
194
+
195
+ def checkpoint_wrapper(
196
+ module: torch.nn.Module,
197
+ checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT,
198
+ checkpoint_fn=None,
199
+ **checkpoint_fn_kwargs,
200
+ ) -> torch.nn.Module:
201
+ """
202
+ Wrap a module for activation checkpointing.
203
+
204
+ If the module is wrapped with this function, all subsequent calls to the module will,
205
+ automatically perform checkpointing without the user having to explicitly call ``checkpoint`` function.
206
+
207
+ Usage::
208
+ checkpointed_module = checkpoint_wrapper(module)
209
+ outputs = checkpointed_module(inputs)
210
+ Args:
211
+ module (nn.Module):
212
+ The module to be wrapped
213
+ checkpoint_impl (Optional[CheckpointImpl]):
214
+ The checkpointing implementation to use. Note that this will only
215
+ be passed into the ``torch.utils.checkpoint.checkpoint``
216
+ implementation, and is ignored if a custom ``checkpoint_fn`` is
217
+ specified. Note that for implementations using reentrant checkpoint
218
+ from ``torch.utils.checkpoint``, keyword arguments will only be
219
+ supported if ``checkpoint_impl`` is passed as ``CheckpointImpl.REENTRANT`.
220
+ checkpoint_fn (Optional[Callable]):
221
+ Functional checkpoint implementation to use. If this is specified,
222
+ it will be used over the default ``torch.utils.checkpoint.checkpoint``
223
+ implementation and the `checkpoint_impl` argument will be ignored.
224
+ **checkpoint_fn_kwargs: (Dict[str, Any]): Keyword arguments to pass into `checkpoint_fn`.
225
+
226
+ Returns:
227
+ (nn.Module):
228
+ Wrapped module
229
+ """
230
+
231
+ if checkpoint_impl == CheckpointImpl.REENTRANT:
232
+ warnings.warn(
233
+ f"Please specify {CheckpointImpl.NO_REENTRANT} as "
234
+ f"{CheckpointImpl.REENTRANT} will soon be removed as "
235
+ "the default and eventually deprecated.",
236
+ stacklevel=1,
237
+ )
238
+ return CheckpointWrapper(
239
+ module,
240
+ checkpoint_impl,
241
+ checkpoint_fn,
242
+ **checkpoint_fn_kwargs,
243
+ )
244
+
245
+
246
+ def apply_activation_checkpointing(
247
+ model,
248
+ checkpoint_wrapper_fn=checkpoint_wrapper,
249
+ check_fn=lambda _: True,
250
+ auto_wrap_policy: Optional[Callable[[nn.Module, bool, int], bool]] = None,
251
+ ):
252
+ """
253
+ Apply :func:`checkpoint_wrapper` to modules within `model` based on a user-defined configuration.
254
+
255
+ For each module within `model`, the `check_fn` is used to decide
256
+ whether `module` should be wrapped with :func:`checkpoint_wrapper` or not.
257
+
258
+ Note::
259
+ This function modifies `model` in place and replaces appropriate layers with
260
+ their checkpoint-wrapped modules.
261
+ Note::
262
+ This function will not wrap the overall root module. If this is needed, please directly use
263
+ :func:`checkpoint_wrapper` or :func:`offload_wrapper`.
264
+ Usage::
265
+ model = nn.Sequential(
266
+ nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10)
267
+ )
268
+ check_fn = lambda l: isinstance(l, nn.Linear)
269
+ # checkpoint activations
270
+ apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
271
+ # Or offload activations to CPU
272
+ apply_activation_checkpointing(model, checkpoint_wrapper_fn=offload_wrapper, check_fn=check_fn)
273
+ Args:
274
+ model (nn.Module):
275
+ The model whose submodules should be wrapped with activation checkpointing.
276
+ checkpoint_wrapper_fn (Optional[Callable[nn.Module]])
277
+ A ``Callable`` which will wrap modules
278
+ check_fn (Optional[Callable[nn.Module, nn.Module]])
279
+ A lambda function which will be passed each child submodule of ``model`` and returns
280
+ ``True`` or ``False`` depending on whether the submodule should be wrapped.
281
+ auto_wrap_policy (Optional[Callable[[nn.Module, bool, int], bool]]): A policy to wrap model's
282
+ submodules with AC. Note that if this is specified, it takes precedence over ``check_fn``.
283
+ Returns: None (`model` is modified inplace)
284
+ """
285
+ # TODO: Importing inside function to avoid circular import issue between FSDP and
286
+ # checkpoint_wrapper. This can be resolved once wrap() APIs are decoupled from FSDP code.
287
+ from torch.distributed.fsdp.wrap import _recursive_wrap, lambda_auto_wrap_policy, _Policy
288
+ from torch.distributed.fsdp._wrap_utils import _construct_wrap_fn, _post_order_apply
289
+
290
+ policy = (
291
+ auto_wrap_policy
292
+ if auto_wrap_policy is not None
293
+ else partial(lambda_auto_wrap_policy, lambda_fn=check_fn)
294
+ )
295
+ if not callable(policy):
296
+ if not isinstance(policy, _Policy):
297
+ raise ValueError(
298
+ f"Expected {policy} to be callable or be a pre-defined wrap policy"
299
+ )
300
+ target_module_to_kwargs = policy._run_policy(
301
+ model, ignored_modules=set(), root_kwargs={}
302
+ )
303
+ wrap_fn = _construct_wrap_fn(model, target_module_to_kwargs, checkpoint_wrapper_fn)
304
+ _post_order_apply(model, wrap_fn)
305
+ return
306
+
307
+ _recursive_wrap(
308
+ module=model,
309
+ auto_wrap_policy=policy, # type: ignore[arg-type]
310
+ wrapper_cls=checkpoint_wrapper_fn,
311
+ ignored_modules=set(),
312
+ ignored_params=set(),
313
+ only_wrap_children=True,
314
+ )
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ from . import default_hooks as default
3
+
4
+ LOW_PRECISION_HOOKS = [
5
+ default.fp16_compress_hook,
6
+ default.bf16_compress_hook,
7
+ ]
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (334 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/default_hooks.cpython-310.pyc ADDED
Binary file (6.61 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/default_hooks.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import torch
3
+ import torch.distributed as dist
4
+ from typing import Optional
5
+
6
+
7
+ class DefaultState:
8
+ r"""
9
+ Stores state needed to perform the default communication algorithm within a communication hook.
10
+
11
+ Args:
12
+ process_group (ProcessGroup): The process group to be used.
13
+ """
14
+
15
+ __slots__ = [
16
+ "process_group",
17
+ "world_size",
18
+ "gradient_predivide_factor",
19
+ "gradient_postdivide_factor"
20
+ ]
21
+
22
+ def __init__(
23
+ self,
24
+ process_group: dist.ProcessGroup
25
+ ):
26
+ if process_group is None:
27
+ raise ValueError(f"Expected to pass in an explicit ProcessGroup to {self}.")
28
+ self.process_group = process_group
29
+ self.world_size = dist.get_world_size(process_group)
30
+ # Setting two factors `self.gradient_predivide_factor`
31
+ # and `self.gradient_postdivide_factor` to avoid underflow and overflow
32
+ self.gradient_predivide_factor = self._get_gradient_predivide_factor(
33
+ self.world_size
34
+ )
35
+ self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor
36
+
37
+ @staticmethod
38
+ def _get_gradient_predivide_factor(world_size: int) -> float:
39
+ factor: int = 1
40
+ while world_size % factor == 0 and world_size / factor > factor:
41
+ factor *= 2
42
+ return float(factor)
43
+
44
+ class LowPrecisionState(DefaultState):
45
+ r"""
46
+ Stores state needed to perform gradient communication in a lower precision within a communication hook.
47
+
48
+ Communication hook will cast gradients back to the original
49
+ parameter precision specified by ``parameter_type`` (default: torch.float32).
50
+ Builds on top of the :class:`DefaultState`.
51
+
52
+ Args:
53
+ parameter_type (torch.dtype): The precision of model's parameters.
54
+ Required for a hook to cast gradients back to a parameter's precision.
55
+ """
56
+
57
+ __slots__ = [
58
+ "parameter_type",
59
+ ]
60
+
61
+ def __init__(
62
+ self,
63
+ process_group,
64
+ parameter_type=torch.float32,
65
+ ):
66
+ super().__init__(process_group)
67
+ self.parameter_type = parameter_type
68
+
69
+
70
+ def _decompress(state: LowPrecisionState, grad: torch.Tensor):
71
+ """
72
+ Casts gradients back to full parameter precision so that further computation happens in full precision.
73
+ """
74
+ orig_grad_data = grad.data
75
+ grad.data = grad.data.to(state.parameter_type)
76
+ # Don't let this memory get reused until after the transfer.
77
+ orig_grad_data.record_stream(torch.cuda.current_stream()) # type: ignore[arg-type]
78
+
79
+ def allreduce_hook(state: DefaultState, grad: torch.Tensor):
80
+ r"""
81
+ Implement the FSDP communication hook for ``all_reduce`` algorithm and a necessary pre- and post-division of gradients.
82
+
83
+ Args:
84
+ state (DefaultState): State information, configures pre- and post-division factors.
85
+ grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.
86
+ """
87
+ # Average grad by pre-division factor. Together pre- and post-division factors
88
+ # lead to an overall averaging by world_size, required for consistency with PyTorch DDP.
89
+ # This is a two-step process to avoid potential underflow and overflow.
90
+ if state.gradient_predivide_factor > 1:
91
+ grad.div_(state.gradient_predivide_factor)
92
+ dist.all_reduce(grad, group=state.process_group)
93
+ # Average grad by post-division factor.
94
+ if state.gradient_postdivide_factor > 1:
95
+ grad.div_(state.gradient_postdivide_factor)
96
+
97
+ def reduce_scatter_hook(state: DefaultState, grad: torch.Tensor, output: torch.Tensor):
98
+ r"""
99
+ Implement the FSDP communication hook for ``reduce_scatter`` algorithm.
100
+
101
+ For sharded FSDP strategies and a necessary pre- and post-division of gradients.
102
+
103
+ Args:
104
+ state (DefaultState): State information, configures pre- and post-division factors.
105
+ grad (torch.Tensor): An unsharded gradient for the local batch that needs to be
106
+ communicated across ranks.
107
+ output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
108
+ """
109
+ # Average grad by pre-division factor.
110
+ if state.gradient_predivide_factor > 1:
111
+ grad.div_(state.gradient_predivide_factor)
112
+ dist.reduce_scatter_tensor(
113
+ output, grad, group=state.process_group
114
+ )
115
+ # Average grad's shard by post-division factor.
116
+ if state.gradient_postdivide_factor > 1:
117
+ output.div_(state.gradient_postdivide_factor)
118
+
119
+ def _low_precision_hook(prec: torch.dtype, state: LowPrecisionState, grad: torch.Tensor, output: torch.Tensor):
120
+ if grad.dtype != prec:
121
+ grad.data = grad.data.to(prec)
122
+ if output is not None:
123
+ if output.dtype != prec:
124
+ output.data = output.data.to(prec)
125
+ reduce_scatter_hook(state, grad, output)
126
+ _decompress(state, output)
127
+ else:
128
+ allreduce_hook(state, grad)
129
+ _decompress(state, grad)
130
+
131
+ def fp16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor] = None):
132
+ r"""
133
+ Implement FSDP communication hook for a simple gradient compression approach.
134
+ Casts ``grad`` to half-precision floating-point format (``torch.float16``).
135
+
136
+ It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
137
+ ``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``)
138
+ gradients are averaged by a ``state.gradient_postdivide_factor``.
139
+ Once post-division is done, compressed gradients are casted back to parameters' precision.
140
+
141
+ Args:
142
+ state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision.
143
+ grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
144
+ output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
145
+ """
146
+ fp16_hook = functools.partial(_low_precision_hook, torch.float16)
147
+ return fp16_hook(state, grad, output)
148
+
149
+ def bf16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor] = None):
150
+ r"""
151
+ Implement FSDP communication hook for a simple gradient compression approach .
152
+ Casts ``grad`` to half-precision floating-point format.
153
+
154
+ It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
155
+ ``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``)
156
+ gradients are averaged by a ``state.gradient_postdivide_factor``.
157
+ Once post-division is done, compressed gradients are casted back to parameters' precision.
158
+
159
+ Args:
160
+ state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision.
161
+ grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
162
+ output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
163
+ """
164
+ bf16_hook = functools.partial(_low_precision_hook, torch.bfloat16)
165
+ return bf16_hook(state, grad, output)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .optimizer_overlap import _as_overlapped_optim
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (277 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/optimizer_overlap.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ import inspect
3
+ from typing import Dict, Type
4
+
5
+ from torch.distributed.fsdp import FullyShardedDataParallel
6
+ from torch.nn.parallel import DistributedDataParallel
7
+ from torch.optim import Optimizer
8
+ from torch.distributed.optim import as_functional_optim
9
+
10
+ from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook
11
+
12
+ from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import (
13
+ _OptimizerHookState,
14
+ _hook_then_optimizer
15
+ )
16
+
17
+ # Contains the mappings between the regular and overlapped optimizer types.
18
+ _registered_overlapped_optims: Dict[Type, Type] = {}
19
+
20
+
21
+ def register_overlapped(optim_cls):
22
+ def decorator(target_overlapped_optim_cls):
23
+ if target_overlapped_optim_cls in _registered_overlapped_optims:
24
+ raise ValueError(
25
+ f"{target_overlapped_optim_cls} already registered with optim_cls "
26
+ f"{_registered_overlapped_optims[optim_cls]} {optim_cls}, trying to"
27
+ f"re-register it for {optim_cls} is not supported."
28
+ )
29
+ _registered_overlapped_optims[optim_cls] = target_overlapped_optim_cls
30
+ return target_overlapped_optim_cls
31
+ return decorator
32
+
33
+
34
+ class OverlappedOptimizer(ABC):
35
+ def __init__(self, optim_cls: Type) -> None:
36
+ """
37
+ Initialize the OverlappedOptimizer.
38
+
39
+ Overlappedoptimizer is a base class that child classes can implement to
40
+ specify how different optimizers will register themselves with DDP.
41
+ """
42
+ self.optim_cls = optim_cls
43
+
44
+ @abstractmethod
45
+ def register_ddp(self, ddp: DistributedDataParallel) -> None:
46
+ """Registers the overlapped optimizer with DDP."""
47
+ raise NotImplementedError(
48
+ f"{self.__class__.__name__} does not support overlapped DDP."
49
+ )
50
+
51
+ @abstractmethod
52
+ def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
53
+ """Registers the overlapped optimizer with FSDP."""
54
+ raise NotImplementedError(
55
+ f"{self.__class__.__name__} does not support overlapped FSDP."
56
+ )
57
+
58
+
59
+ @register_overlapped(Optimizer)
60
+ class _OverlappedStandardOptimizer(OverlappedOptimizer):
61
+ """Overlaps a regular ``Optimizer``."""
62
+
63
+ def __init__(self, optim_cls: Type, params, *optim_args, **optim_kwargs) -> None:
64
+ super().__init__(optim_cls)
65
+ f_optim = as_functional_optim(self.optim_cls, *optim_args, **optim_kwargs)
66
+ self._opt_hook_state = _OptimizerHookState(f_optim, params)
67
+
68
+ def register_ddp(self, ddp_inst: DistributedDataParallel):
69
+ # NOTE: using a custom communication hook and fused optimizer is not
70
+ # yet supported.
71
+ ddp_inst.register_comm_hook( # type: ignore[operator]
72
+ None, # wrapped hook state
73
+ _hook_then_optimizer(allreduce_hook, self._opt_hook_state)
74
+ )
75
+
76
+ # TODO: register_fsdp once FSDP supports communication hook.
77
+ def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
78
+ """Register the overlapped optimizer with FSDP."""
79
+ raise NotImplementedError(
80
+ f"{self.__class__.__name__} does not support overlapped FSDP."
81
+ )
82
+
83
+ def _as_overlapped_optim(optim_cls: Type, params, *args, **kwargs):
84
+ """Return a new ``OverlappedOptimizer`` instance that supports ``optim_cls``."""
85
+ for clz in inspect.getmro(optim_cls):
86
+ try:
87
+ return _registered_overlapped_optims[clz](optim_cls, params, *args, **kwargs)
88
+ except KeyError:
89
+ pass
90
+
91
+ # Fallback to standard overlapped optimizer, which will raise errors if user
92
+ # is attempting to use an unsupported optimizer.
93
+ return _OverlappedStandardOptimizer(optim_cls, params, *args, **kwargs)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (210 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/quantization.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/quantization.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import torch
3
+ import torch.distributed as dist
4
+
5
+
6
+ from enum import Enum
7
+
8
+
9
+ TORCH_HALF_MIN = torch.finfo(torch.float16).min
10
+ TORCH_HALF_MAX = torch.finfo(torch.float16).max
11
+
12
+ class DQuantType(Enum):
13
+ """
14
+ Different quantization methods for auto_quantize API are identified here.
15
+
16
+ auto_quantize API currently supports fp16 and bfp16 methods.
17
+ """
18
+ FP16 = "fp16",
19
+ BFP16 = "bfp16"
20
+
21
+ def __str__(self) -> str:
22
+ return self.value
23
+
24
+
25
+ def _fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
26
+ return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
27
+
28
+ def _quantize_tensor(tensor, qtype):
29
+ if not isinstance(tensor, torch.Tensor):
30
+ raise RuntimeError(
31
+ f"_quantize_tensor expecting torch.Tensor as input but found {type(tensor)}"
32
+ )
33
+ if qtype == DQuantType.FP16:
34
+ return _fp32_to_fp16_with_clamp(tensor)
35
+ elif qtype == DQuantType.BFP16:
36
+ return torch.ops.quantization._FloatToBfloat16Quantized(tensor)
37
+ else:
38
+ raise RuntimeError(
39
+ f'Quantization type {qtype} is not supported'
40
+ )
41
+
42
+ def _quantize_tensor_list(tensor_list, qtype):
43
+ if not isinstance(tensor_list, list) or not all(
44
+ isinstance(p, torch.Tensor) for p in tensor_list
45
+ ):
46
+ raise RuntimeError(
47
+ f"_quantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}"
48
+ )
49
+ quantized_tensor_list = [_quantize_tensor(t, qtype) for t in tensor_list]
50
+ return quantized_tensor_list
51
+
52
+ def _dequantize_tensor(tensor, qtype, quant_loss=None):
53
+ if not isinstance(tensor, torch.Tensor):
54
+ raise RuntimeError(
55
+ f"_dequantize_tensor expecting torch.Tensor as input but found {type(tensor)}"
56
+ )
57
+ if qtype == DQuantType.FP16:
58
+ if tensor.dtype != torch.float16:
59
+ raise RuntimeError(
60
+ f"tensor dtype is {tensor.dtype} while expected to be FP16."
61
+ )
62
+ elif tensor.dtype == torch.float16 and quant_loss is None:
63
+ return tensor.float()
64
+ else:
65
+ return tensor.float() / quant_loss
66
+ elif qtype == DQuantType.BFP16:
67
+ if tensor.dtype != torch.float16:
68
+ raise RuntimeError(
69
+ f"tensor dtype is {tensor.dtype} while expected to be FP16."
70
+ )
71
+ else:
72
+ return torch.ops.quantization._Bfloat16QuantizedToFloat(tensor)
73
+ else:
74
+ raise RuntimeError(
75
+ f'Quantization type {qtype} is not supported'
76
+ )
77
+
78
+
79
+ def _dequantize_tensor_list(tensor_list, qtype, quant_loss=None):
80
+ if not isinstance(tensor_list, list) or not all(
81
+ isinstance(p, torch.Tensor) for p in tensor_list
82
+ ):
83
+ raise RuntimeError(
84
+ f"_dequantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}"
85
+ )
86
+ dequantized_tensor_list = [_dequantize_tensor(t, qtype) for t in tensor_list]
87
+ return dequantized_tensor_list
88
+
89
+
90
+ def auto_quantize(func, qtype, quant_loss=None):
91
+ """
92
+ Quantize the input tensors, choose the precision types, and pass other necessary arguments and then dequantizes the output.
93
+
94
+ Currently it only supports:
95
+ . FP16 and BFP16 quantization method supported for gloo and nccl backends
96
+ . all_gather, all_to_all collective ops
97
+ Note: BFP16 only supports 2D tensors.
98
+ Args:
99
+ func (Callable): A function representing collective operations.
100
+ qtype (QuantType): Quantization method
101
+ quant_loss (float, optional): This can be used to improve accuracy in the dequantization.
102
+ Returns:
103
+ (Callable): the same collective as func but enables automatic quantization/dequantization.
104
+ """
105
+ @functools.wraps(func)
106
+ def wrapper(*args, **kwargs):
107
+ group = kwargs.get('group', None)
108
+ async_op = kwargs.get('async_op', False)
109
+ if async_op is True:
110
+ raise RuntimeError(
111
+ 'The async_op=True mode is not supported yet.'
112
+ )
113
+ if func == dist.all_gather:
114
+ tensors = args[0]
115
+ input_tensors = _quantize_tensor(args[1], qtype)
116
+ out_tensors = _quantize_tensor_list(tensors, qtype)
117
+ dist.all_gather(out_tensors, input_tensors, group=group, async_op=async_op)
118
+ for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)):
119
+ tensors[i] = t
120
+
121
+ elif func == dist.all_to_all:
122
+ tensors = args[0]
123
+ input_tensors = _quantize_tensor_list(args[1], qtype)
124
+ out_tensors = _quantize_tensor_list(tensors, qtype)
125
+ dist.all_to_all(out_tensors, input_tensors, group=group, async_op=async_op)
126
+ for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)):
127
+ tensors[i] = t
128
+
129
+ elif func == dist.all_to_all_single:
130
+ tensors = args[0]
131
+ out_splits = kwargs.get('out_splits', None)
132
+ in_splits = kwargs.get('in_splits', None)
133
+ # Quantizing the input/output tensor
134
+ input_tensors = _quantize_tensor(args[1], qtype)
135
+ out_tensors = _quantize_tensor(tensors, qtype)
136
+ dist.all_to_all_single(out_tensors, input_tensors, out_splits, in_splits, group=group)
137
+ for i, t in enumerate(_dequantize_tensor(out_tensors, qtype, quant_loss=quant_loss)):
138
+ tensors[i] = t
139
+ else:
140
+ raise RuntimeError(
141
+ f"The collective op {func} is not supported yet"
142
+ )
143
+
144
+ return wrapper
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__init__.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from functools import partial
3
+
4
+ import torch.distributed as dist
5
+
6
+ from . import (
7
+ debugging_hooks as debugging,
8
+ default_hooks as default,
9
+ powerSGD_hook as powerSGD,
10
+ quantization_hooks as quantization,
11
+ optimizer_overlap_hooks as optimizer_overlap,
12
+ )
13
+
14
+ __all__ = ['DDPCommHookType', 'register_ddp_comm_hook']
15
+
16
+ def _ddp_comm_hook_wrapper(comm_hook, model, state):
17
+ model.register_comm_hook(state, comm_hook)
18
+
19
+
20
+ def _powerSGD_comm_hook_wrapper(
21
+ comm_hook,
22
+ model,
23
+ state,
24
+ matrix_approximation_rank,
25
+ start_powerSGD_iter=1_000,
26
+ ):
27
+ """
28
+ Wrap PowerSGD communication hook.
29
+
30
+ To be consistent with the wrappers of other DDP comm hooks, the input state only needs to be a process group,
31
+ which will be wrapped up with other state info.
32
+ """
33
+ powerSGD_state = powerSGD.PowerSGDState(
34
+ process_group=state,
35
+ matrix_approximation_rank=matrix_approximation_rank,
36
+ start_powerSGD_iter=start_powerSGD_iter,
37
+ )
38
+ model.register_comm_hook(powerSGD_state, comm_hook)
39
+
40
+
41
+ class DDPCommHookType(Enum):
42
+ """
43
+ Enumerate ``ddp_comm_hooks`` and ``ddp_comm_hook_wrapper`` communucation hook types.
44
+
45
+ DDPCommHookType enumerates the hooks of ``torch.distributed.algorithms.ddp_comm_hooks``
46
+ as names and ``ddp_comm_hook_wrapper`` partials with hook specified. As an example,
47
+ you can register allreduce hook by
48
+ ``DDPCommHookType.ALLREDUCE.value(model=model, state=process_group)``.
49
+ """
50
+
51
+ ALLREDUCE = partial(_ddp_comm_hook_wrapper, comm_hook=default.allreduce_hook)
52
+ FP16_COMPRESS = partial(
53
+ _ddp_comm_hook_wrapper, comm_hook=default.fp16_compress_hook
54
+ )
55
+ BF16_COMPRESS = partial(
56
+ _ddp_comm_hook_wrapper, comm_hook=default.bf16_compress_hook
57
+ )
58
+ QUANTIZE_PER_TENSOR = partial(
59
+ _ddp_comm_hook_wrapper, comm_hook=quantization.quantization_pertensor_hook
60
+ )
61
+ QUANTIZE_PER_CHANNEL = partial(
62
+ _ddp_comm_hook_wrapper, comm_hook=quantization.quantization_perchannel_hook
63
+ )
64
+ POWER_SGD = partial(
65
+ _powerSGD_comm_hook_wrapper,
66
+ comm_hook=powerSGD.powerSGD_hook,
67
+ matrix_approximation_rank=1,
68
+ )
69
+ # Rank-2 PowerSGD can give a higher accuracy than the default rank-1 version,
70
+ # but it runs slower and consumes more memory.
71
+ POWER_SGD_RANK2 = partial(
72
+ _powerSGD_comm_hook_wrapper,
73
+ comm_hook=powerSGD.powerSGD_hook,
74
+ matrix_approximation_rank=2,
75
+ )
76
+ # Batching can lead to a faster training at the cost of accuracy.
77
+ BATCHED_POWER_SGD = partial(
78
+ _powerSGD_comm_hook_wrapper,
79
+ comm_hook=powerSGD.batched_powerSGD_hook,
80
+ matrix_approximation_rank=1,
81
+ )
82
+ BATCHED_POWER_SGD_RANK2 = partial(
83
+ _powerSGD_comm_hook_wrapper,
84
+ comm_hook=powerSGD.batched_powerSGD_hook,
85
+ matrix_approximation_rank=2,
86
+ )
87
+ NOOP = partial(
88
+ _ddp_comm_hook_wrapper, comm_hook=debugging.noop_hook,
89
+ )
90
+
91
+
92
+ def register_ddp_comm_hook(
93
+ comm_hook_type: DDPCommHookType, model, state=None
94
+ ):
95
+ """
96
+ Register ``ddp_comm_hooks`` to DDP model.
97
+
98
+ Registers the hooks of ``torch.distributed.algorithms.ddp_comm_hooks``
99
+ to the DDP model. User can specify the type of hook as an enum
100
+ ``DDPCommHookType`` type using ``comm_hook_type`` input. State input will
101
+ be passed to the model.
102
+ Uses Python comm hook implementations.
103
+
104
+ Example::
105
+ >>> # xdoctest: +SKIP
106
+ >>> register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, model, state)
107
+ """
108
+ comm_hook_type.value(model=model, state=state)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/ddp_zero_hook.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/debugging_hooks.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/default_hooks.cpython-310.pyc ADDED
Binary file (7.18 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/mixed_precision_hooks.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/optimizer_overlap_hooks.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/post_localSGD_hook.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/powerSGD_hook.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/quantization_hooks.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import weakref
2
+ from typing import Any, Callable, List, Optional
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch.distributed.optim import ZeroRedundancyOptimizer
7
+ from torch.distributed.optim.zero_redundancy_optimizer import (
8
+ _OverlapStatus,
9
+ )
10
+ from torch.nn.parallel.distributed import DistributedDataParallel
11
+
12
+ __all__ = ["hook_with_zero_step", "hook_with_zero_step_interleaved"]
13
+
14
+ # Functional optimizers require passing a list of gradients to their `step()`
15
+ # method, and ZeRO requires a functional optimizer to overlap with DDP
16
+ # Passing a `None` instead of an actual gradient indicates to the optimizer
17
+ # to not update the corresponding parameter
18
+ _NO_PARAM_UPDATE: None = None
19
+
20
+
21
+ def _perform_local_step(
22
+ bucket: dist.GradBucket,
23
+ zero: ZeroRedundancyOptimizer,
24
+ rank: int,
25
+ ):
26
+ r"""
27
+ Perform a local optimizer step using the gradients provided by ``bucket``.
28
+
29
+ Arguments:
30
+ bucket (dist.GradBucket): the bucket providing the gradients.
31
+ zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
32
+ instance to perform the :meth:`_local_step`.
33
+ rank (int): the calling process's rank.
34
+
35
+ .. warning::
36
+ This function assumes that appropriate synchronization has taken place
37
+ so that the bucket's gradients can be used.
38
+ """
39
+ overlap_info = zero._overlap_info
40
+ bucket_index = bucket.index()
41
+ assert len(zero.optim.param_groups) == 1, \
42
+ "Overlapping DDP with ZeRO only supports a single parameter group"
43
+
44
+ # Construct the `gradients` input for the local optimizer step, which
45
+ # expects `None` in a list position to indicate that the corresponding
46
+ # parameter should not be updated
47
+ num_local_optim_params = len(zero.optim.param_groups[0]["params"])
48
+ gradients: List[Optional[torch.Tensor]] = \
49
+ [_NO_PARAM_UPDATE for _ in range(num_local_optim_params)]
50
+ assert bucket_index in overlap_info.offsets, \
51
+ f"Bucket index {bucket_index} was not assigned to rank {rank}"
52
+ gradients_offset = overlap_info.offsets[bucket_index]
53
+ bucket_assignment = zero._bucket_assignments_per_rank[rank][bucket_index]
54
+ bucket_offset = bucket_assignment.offset
55
+ length = len(bucket_assignment.parameters)
56
+ bucket_gradients = bucket.gradients()[bucket_offset:bucket_offset + length]
57
+ for i, grad in enumerate(bucket_gradients):
58
+ gradients[gradients_offset + i] = grad
59
+
60
+ zero._local_step(gradients)
61
+
62
+
63
+ def _broadcast_bucket(
64
+ bucket_index: int,
65
+ zero: ZeroRedundancyOptimizer,
66
+ ):
67
+ r"""
68
+ Broadcasts a bucket's parameters.
69
+
70
+ Arguments:
71
+ bucket_index (int): the index of the bucket corresponding to the
72
+ parameters to broadcast.
73
+ zero (ZeroRedundancyOptimizer): the calling process's
74
+ :class:`ZeroRedundancyOptimizer` instance.
75
+ """
76
+ overlap_info = zero._overlap_info
77
+ assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \
78
+ "`assigned_ranks_per_bucket` is not fully constructed"
79
+ # Sort to ensure the same ordering across ranks
80
+ assigned_ranks = sorted(overlap_info.assigned_ranks_per_bucket[bucket_index])
81
+ assert len(assigned_ranks) > 0, f"Bucket {bucket_index} should be " \
82
+ "assigned to at least one rank"
83
+ for assigned_rank in assigned_ranks:
84
+ bucket_assignments = zero._bucket_assignments_per_rank[assigned_rank]
85
+ if bucket_index in bucket_assignments:
86
+ overlap_info.broadcast_handles.append(
87
+ dist.broadcast(
88
+ bucket_assignments[bucket_index].tensor,
89
+ src=dist.get_global_rank(zero.process_group, assigned_rank),
90
+ group=zero.process_group,
91
+ async_op=True,
92
+ )
93
+ )
94
+
95
+
96
+ def _save_ddp_bucket_info(
97
+ bucket: dist.GradBucket,
98
+ zero: ZeroRedundancyOptimizer,
99
+ ):
100
+ r"""
101
+ Save :class:`DistributedDataParallel` gradient bucket information for :class:`ZeroRedundancyOptimizer` instance ``zero``.
102
+
103
+ In particular, this function is meant to be called upon seeing each
104
+ gradient bucket to use when overlapping, meaning it does not save or compute any global
105
+ information.
106
+
107
+ Arguments:
108
+ bucket (dist.GradBucket): the current gradient bucket.
109
+ zero (ZeroRedundancyOptimizer): the calling process's
110
+ :class:`ZeroRedundancyOptimizer` instance.
111
+ """
112
+ overlap_info = zero._overlap_info
113
+ bucket_params = bucket.parameters()
114
+ assert len(bucket_params) > 0, "Empty bucket"
115
+
116
+ # Save the parameters in the bucket
117
+ overlap_info.params_per_bucket.append(bucket_params)
118
+ if overlap_info.shard_buckets:
119
+ # Additionally save the bucket size for the assignment heuristic to use
120
+ bucket_size = 0
121
+ for param in bucket_params:
122
+ bucket_size += param.numel()
123
+ assert overlap_info.total_size is not None
124
+ overlap_info.total_size += bucket_size
125
+
126
+
127
+ def _hook_with_zero_step_setup(
128
+ ddp_ref: weakref.ReferenceType,
129
+ zero: ZeroRedundancyOptimizer,
130
+ bucket: dist.GradBucket,
131
+ ):
132
+ r"""
133
+ Encapsulate the setup logic for :func:`hook_with_zero_step` and :func:`hook_with_zero_step_interleaved`.
134
+
135
+ This means the logic to run in the
136
+ hook before the backward pass and optimizer step can actually be
137
+ overlapped. This is factored out since it is common to both
138
+ :func:`hook_with_zero_step` and :func:`hook_with_zero_step_interleaved`.
139
+
140
+ Arguments:
141
+ ddp_ref (weakref.ReferenceType): weak reference to the process's
142
+ :class:`DistributedDataParallel` instance.
143
+ zero (ZeroRedundancyOptimizer): the calling process's
144
+ :class:`ZeroRedundancyOptimizer` instance.
145
+ bucket (dist.GradBucket): the current gradient bucket.
146
+ """
147
+ # Proceed as normal until the DDP buckets have been rebuilt
148
+ if not ddp_ref()._has_rebuilt_buckets: # type: ignore[union-attr]
149
+ assert zero._overlap_info.status == _OverlapStatus.UNINITIALIZED
150
+ return
151
+
152
+ bucket_index = bucket.index()
153
+ overlap_info = zero._overlap_info
154
+ if overlap_info.status == _OverlapStatus.UNINITIALIZED:
155
+ overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS
156
+
157
+ if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS:
158
+ if bucket_index == 0 and len(overlap_info.params_per_bucket) > 0:
159
+ # This corresponds to the first bucket of the backward pass
160
+ # immediately after all information has been saved, so we
161
+ # can perform the delayed ZeRO initialization
162
+ zero._init_zero_for_overlap()
163
+ else:
164
+ # Once DDP buckets have been rebuilt but ZeRO has not been
165
+ # properly initialized yet, save the information needed
166
+ _save_ddp_bucket_info(bucket, zero)
167
+
168
+
169
+ def hook_with_zero_step(
170
+ hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
171
+ ddp: DistributedDataParallel,
172
+ zero: ZeroRedundancyOptimizer,
173
+ shard_buckets: bool = False,
174
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
175
+ r"""
176
+ Modify ``hook`` to overlap :class:`ZeroRedundancyOptimizer` optimizer step with :class:`DistributedDataParallel` backward pass.
177
+
178
+ This approach overlaps the optimizer computation and communication with the
179
+ backward communication. In particular, the backward computation proceeds
180
+ contiguously, and the optimizer computation follows, overlapping with
181
+ outstanding backward communication (i.e. all-reduces) and possibly other
182
+ optimizer communication (i.e. broadcasts).
183
+ The optimizer step computation begins after the last gradient bucket computation has finished.
184
+
185
+ This approach may be preferred over :meth:`hook_with_zero_step_interleaved`
186
+ if communication is relatively slow compared to computation.
187
+
188
+ Arguments:
189
+ hook (Callable[[Any, dist.GradBucket], torch.futures.Future]): the hook
190
+ to modify.
191
+ ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
192
+ instance to use.
193
+ zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
194
+ instance to use.
195
+ shard_buckets (bool): if ``True``, then the assignment of each
196
+ :class:`DistributedDataParallel` bucket is partitioned across
197
+ possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
198
+ across possibly multiple ranks) to approximate uniformity; if
199
+ ``False``, then each bucket is wholly assigned to a single
200
+ :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank).
201
+
202
+ Returns:
203
+ The modified hook.
204
+
205
+ Raises:
206
+ ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
207
+ RuntimeError: if using any backend other than NCCL/HCCL since currently
208
+ Gloo may hang.
209
+
210
+ .. warning::
211
+ Given the way that overlapping :class:`DistributedDataParallel` with
212
+ :class:`ZeroRedundancyOptimizer` is currently implemented, the first
213
+ two or three training iterations do not perform parameter updates in
214
+ the optimizer step, depending on if ``static_graph=False`` or
215
+ ``static_graph=True``, respectively. This is because it needs
216
+ information about the gradient bucketing strategy used by
217
+ :class:`DistributedDataParallel`, which is not finalized until the
218
+ second forward pass if ``static_graph=False`` or until the third
219
+ forward pass if ``static_graph=True``.
220
+ """
221
+ if not zero._overlap_with_ddp:
222
+ raise ValueError(
223
+ "ZeroRedundancyOptimizer must be constructed with "
224
+ "`overlap_with_ddp=True` to use this hook properly"
225
+ )
226
+ ddp_ref = weakref.ref(ddp)
227
+
228
+ # NOTE: Gloo may hang with this overlapping approach, so we require
229
+ # NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
230
+ pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr]
231
+ if ((pg != dist.Backend.NCCL) and (pg != 'hccl')):
232
+ raise RuntimeError(
233
+ "Overlapping DDP with ZeRO using this approach currently requires "
234
+ "NCCL/HCCL backend to avoid hangs"
235
+ )
236
+
237
+ if shard_buckets:
238
+ zero._overlap_info.shard_buckets = True
239
+ zero._overlap_info.total_size = 0
240
+
241
+ def hook_with_zero_fn(
242
+ state: Any,
243
+ bucket: dist.GradBucket,
244
+ ) -> torch.futures.Future[torch.Tensor]:
245
+ r"""
246
+ Return :class:`Future` that runs the optimizer step if this corresponds to the last gradient bucket.
247
+
248
+ Perform equivalent of :class:`ZeroRedundancyOptimizer` :meth:`step` if ``bucket`` is last gradient bucket.
249
+ The function gives a gradient bucket tensor and
250
+ performs additional computation on the iteration that
251
+ the :class:`DistributedDataParallel` buckets are rebuilt to collect
252
+ information used to implement the modified hook.
253
+
254
+ Arguments:
255
+ state (Any): any state for the hook.
256
+ bucket (dist.GradBucket): the :class:`DistributedDataParallel`
257
+ gradient bucket.
258
+ """
259
+ fut = hook(state, bucket)
260
+ _hook_with_zero_step_setup(ddp_ref, zero, bucket)
261
+ if zero._overlap_info.status != _OverlapStatus.INITIALIZED:
262
+ return fut
263
+
264
+ overlap_info = zero._overlap_info
265
+ bucket_index = bucket.index()
266
+ rank = zero.global_rank
267
+
268
+ assert overlap_info.status == _OverlapStatus.INITIALIZED
269
+ assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \
270
+ "`assigned_ranks_per_bucket` is not fully constructed"
271
+ assigned_to_bucket = rank in overlap_info.assigned_ranks_per_bucket[bucket_index]
272
+
273
+ # Save the bucket reference and all-reduce future for the final bucket
274
+ if assigned_to_bucket:
275
+ overlap_info.bucket_index_to_bucket[bucket_index] = bucket
276
+ overlap_info.bucket_index_to_future[bucket_index] = fut
277
+
278
+ # Check that buckets are indexed incrementally starting from 0 in the
279
+ # order of their autograd hooks firing
280
+ if len(overlap_info.bucket_indices_seen) > 0:
281
+ assert overlap_info.bucket_indices_seen[-1] == bucket_index - 1, \
282
+ "Bucket indices are not in incremental order"
283
+ else:
284
+ assert bucket_index == 0, "Bucket indices do not start from 0"
285
+ overlap_info.bucket_indices_seen.append(bucket_index)
286
+
287
+ # Directly return the future without any optimizer computation if this
288
+ # is not the last bucket
289
+ num_buckets = len(overlap_info.params_per_bucket)
290
+ is_last_bucket = bucket_index == num_buckets - 1
291
+ if not is_last_bucket:
292
+ return fut
293
+
294
+ # Perform partial optimizer step on all buckets after the final
295
+ # bucket has been computed
296
+ # NOTE: This should not be chained as a callback to the last bucket's
297
+ # all-reduce future since that would add synchronization that delays
298
+ # all optimizer computation to wait for that last all-reduce
299
+ for bucket_index in range(num_buckets):
300
+ assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]
301
+ if rank in assigned_ranks:
302
+ # Wait on the bucket's all-reduce future to ensure correct
303
+ # gradients
304
+ assert bucket_index in overlap_info.bucket_index_to_future, \
305
+ f"All-reduce future for bucket {bucket_index} not saved " \
306
+ f"on rank {rank}"
307
+ allreduce_future = overlap_info.bucket_index_to_future[bucket_index]
308
+ allreduce_future.wait()
309
+
310
+ # Perform the partial optimizer step
311
+ curr_bucket = overlap_info.bucket_index_to_bucket[bucket_index]
312
+ _perform_local_step(curr_bucket, zero, rank)
313
+
314
+ _broadcast_bucket(bucket_index, zero)
315
+
316
+ # Ensure that all parameter updates are finished before the
317
+ # next forward pass
318
+ overlap_info.wait_for_broadcasts()
319
+ overlap_info.clear_per_iter_info()
320
+
321
+ return fut
322
+
323
+ return hook_with_zero_fn
324
+
325
+
326
+ def hook_with_zero_step_interleaved(
327
+ hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
328
+ ddp: DistributedDataParallel,
329
+ zero: ZeroRedundancyOptimizer,
330
+ shard_buckets: bool = False,
331
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
332
+ r"""
333
+ Modify ``hook`` to overlap :class:`ZeroRedundancyOptimizer` optimizer step with :class:`DistributedDataParallel` backward pass
334
+
335
+ This approach overlaps the optimizer computation and communication with the
336
+ backward computation and communication. In particular, once a bucket's
337
+ gradients have been computed, the optimizer computation using those
338
+ gradients is launched (though the actual computation must wait for the
339
+ bucket's all-reduce to complete). This yields an interleaving of all-
340
+ reduces and broadcasts in the communication stream.
341
+
342
+ This approach may be preferred over :meth:`hook_with_zero_step` if
343
+ communication is relatively fast compared to computation.
344
+
345
+ Arguments:
346
+ hook (Any * dist.GradBucket -> torch.futures.Future): the hook to
347
+ modify.
348
+ ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
349
+ instance to use.
350
+ zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
351
+ instance to use.
352
+ shard_buckets (bool): if ``True``, then the assignment of each
353
+ :class:`DistributedDataParallel` bucket is partitioned across
354
+ possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
355
+ across possibly multiple ranks) to approximate uniformity; if
356
+ ``False``, then each bucket is wholly assigned to a single
357
+ :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank).
358
+
359
+ Returns:
360
+ The modified hook.
361
+
362
+ Raises:
363
+ ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
364
+ RuntimeError: if using any backend other than NCCL since currently
365
+ Gloo may hang.
366
+
367
+ .. warning::
368
+ Given the way that overlapping :class:`DistributedDataParallel` with
369
+ :class:`ZeroRedundancyOptimizer` is currently implemented, the first
370
+ two or three training iterations do not perform parameter updates in
371
+ the optimizer step, depending on if ``static_graph=False`` or
372
+ ``static_graph=True``, respectively. This is because it needs
373
+ information about the gradient bucketing strategy used by
374
+ :class:`DistributedDataParallel`, which is not finalized until the
375
+ second forward pass if ``static_graph=False`` or until the third
376
+ forward pass if ``static_graph=True``.
377
+ """
378
+ if not zero._overlap_with_ddp:
379
+ raise ValueError(
380
+ "ZeroRedundancyOptimizer must be constructed with "
381
+ "`overlap_with_ddp=True` to use this hook properly"
382
+ )
383
+ ddp_ref = weakref.ref(ddp)
384
+
385
+ # NOTE: Gloo may hang with this overlapping approach, so we require
386
+ # NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
387
+ pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr]
388
+ if ((pg != dist.Backend.NCCL) and (pg != 'hccl')):
389
+ raise RuntimeError(
390
+ "Overlapping DDP with ZeRO using this approach currently requires "
391
+ "NCCL/HCCL backend to avoid hangs"
392
+ )
393
+
394
+ if shard_buckets:
395
+ zero._overlap_info.shard_buckets = True
396
+ zero._overlap_info.total_size = 0
397
+
398
+ def hook_with_zero_interleaved_fn(
399
+ state,
400
+ bucket: dist.GradBucket,
401
+ ) -> torch.futures.Future[torch.Tensor]:
402
+ r"""
403
+ Return :class:`Future` that gives gradient bucket tensor and performs partial :class:`ZeroRedundancyOptimizer` :meth:`step`.
404
+
405
+ This function uses the gradients in gradient in given bucket to perform a partial
406
+ :class:`ZeroRedundancyOptimizer` :meth:`step`
407
+
408
+ Arguments:
409
+ state: any state for the hook.
410
+ bucket (dist.GradBucket): the :class:`DistributedDataParallel`
411
+ gradient bucket.
412
+ """
413
+ fut = hook(state, bucket)
414
+ _hook_with_zero_step_setup(ddp_ref, zero, bucket)
415
+ if zero._overlap_info.status != _OverlapStatus.INITIALIZED:
416
+ return fut
417
+
418
+ def zero_step(fut: torch.futures.Future) -> torch.Tensor:
419
+ r"""
420
+ Perform partial :class:`ZeroRedundancyOptimizer` :meth:`step` using gradients in the :class:`DistributedDataParallel`.
421
+
422
+ Returns:
423
+ A :class:`torch.Tensor` representing the contents of the
424
+ gradient bucket.
425
+ """
426
+ overlap_info = zero._overlap_info
427
+ bucket_index = bucket.index()
428
+ rank = zero.global_rank
429
+
430
+ assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]
431
+ overlap_info.bucket_indices_seen.append(bucket_index)
432
+ if rank in assigned_ranks:
433
+ _perform_local_step(bucket, zero, rank)
434
+
435
+ _broadcast_bucket(bucket_index, zero)
436
+
437
+ num_buckets = len(overlap_info.params_per_bucket)
438
+ if len(overlap_info.bucket_indices_seen) == num_buckets:
439
+ # Ensure that all parameter updates are finished before the
440
+ # next forward pass
441
+ overlap_info.wait_for_broadcasts()
442
+ overlap_info.clear_per_iter_info()
443
+
444
+ return bucket.buffer()
445
+
446
+ return fut.then(zero_step)
447
+
448
+ return hook_with_zero_interleaved_fn
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import torch
4
+ from torch.distributed import GradBucket
5
+
6
+ __all__ = ["noop_hook"]
7
+
8
+
9
+ def noop_hook(_: Any, bucket: GradBucket) -> torch.futures.Future[torch.Tensor]:
10
+ """
11
+ Return a future that wraps the input, so it is a no-op that does not incur any communication overheads.
12
+
13
+ This hook should **only** be used for headroom analysis of allreduce optimization,
14
+ instead of the normal gradient synchronization.
15
+ For example, if only less than 10% speedup of training time can be observed after this hook is registered,
16
+ it usually implies that allreduce is not a performance bottleneck for this case.
17
+ Such instrumentation can be particularly useful
18
+ if GPU traces cannot be easily retrieved or the trace analysis is complicated
19
+ some factors such as the overlap between allreduce and computation or the desynchronization across ranks.
20
+
21
+ Example::
22
+ >>> # xdoctest: +SKIP
23
+ >>> ddp_model.register_comm_hook(None, noop_hook)
24
+ """
25
+ fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()
26
+ fut.set_result(bucket.buffer())
27
+
28
+ return fut
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, cast, Tuple
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+
6
+ __all__ = [
7
+ "allreduce_hook",
8
+ "fp16_compress_hook",
9
+ "bf16_compress_hook",
10
+ "fp16_compress_wrapper",
11
+ "bf16_compress_wrapper",
12
+ ]
13
+
14
+
15
+ def _allreduce_fut(
16
+ process_group: dist.ProcessGroup, tensor: torch.Tensor
17
+ ) -> torch.futures.Future[torch.Tensor]:
18
+ """Average the input gradient tensor by allreduce and returns a future."""
19
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
20
+
21
+ # Apply the division first to avoid overflow, especially for FP16.
22
+ tensor.div_(group_to_use.size())
23
+
24
+ return (
25
+ dist.all_reduce(tensor, group=group_to_use, async_op=True)
26
+ .get_future()
27
+ .then(lambda fut: fut.value()[0])
28
+ )
29
+
30
+
31
+ def allreduce_hook(
32
+ process_group: dist.ProcessGroup, bucket: dist.GradBucket
33
+ ) -> torch.futures.Future[torch.Tensor]:
34
+ """
35
+ Call ``allreduce`` using ``GradBucket`` tensors.
36
+
37
+ Once gradient tensors are aggregated across all workers, its ``then``
38
+ callback takes the mean and returns the result.
39
+
40
+ If user registers this DDP communication hook,
41
+ DDP results is expected to be same as the case where no hook was registered.
42
+ Hence, this won't change behavior of DDP and user can use this as a reference
43
+ or modify this hook to log useful information or any other purposes while
44
+ unaffecting DDP behavior.
45
+
46
+ Example::
47
+ >>> # xdoctest: +SKIP
48
+ >>> ddp_model.register_comm_hook(process_group, allreduce_hook)
49
+ """
50
+ return _allreduce_fut(process_group, bucket.buffer())
51
+
52
+
53
+ def fp16_compress_hook(
54
+ process_group: dist.ProcessGroup,
55
+ bucket: dist.GradBucket,
56
+ ) -> torch.futures.Future[torch.Tensor]:
57
+ """
58
+ Compress by casting ``GradBucket`` to ``torch.float16`` divided by process group size.
59
+
60
+ This DDP communication hook implements a simple gradient compression
61
+ approach that casts ``GradBucket`` tensor to half-precision floating-point format (``torch.float16``)
62
+ and then divides it by the process group size.
63
+ It allreduces those ``float16`` gradient tensors. Once compressed gradient
64
+ tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``).
65
+
66
+ Example::
67
+ >>> # xdoctest: +SKIP
68
+ >>> ddp_model.register_comm_hook(process_group, fp16_compress_hook)
69
+ """
70
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
71
+ world_size = group_to_use.size()
72
+
73
+ buffer = (
74
+ cast(Tuple[torch.Tensor, ...], bucket)[0]
75
+ if isinstance(bucket, tuple)
76
+ else bucket.buffer()
77
+ )
78
+ compressed_tensor = buffer.to(torch.float16).div_(world_size)
79
+
80
+ def decompress(fut):
81
+ decompressed_tensor = buffer
82
+ # Decompress in place to reduce the peak memory.
83
+ # See: https://github.com/pytorch/pytorch/issues/45968
84
+ value = fut if isinstance(fut, torch.Tensor) else fut.value()[0]
85
+ decompressed_tensor.copy_(value)
86
+ return decompressed_tensor
87
+
88
+ if torch._utils.is_compiling():
89
+ grad = dist._functional_collectives.all_reduce(
90
+ compressed_tensor, "sum", group_to_use
91
+ )
92
+ return decompress(grad)
93
+ else:
94
+ fut = dist.all_reduce(
95
+ compressed_tensor, group=group_to_use, async_op=True
96
+ ).get_future()
97
+ return fut.then(decompress)
98
+
99
+
100
+ # TODO: create an internal helper function and extract the duplicate code in FP16_compress and BF16_compress.
101
+ def bf16_compress_hook(
102
+ process_group: dist.ProcessGroup,
103
+ bucket: dist.GradBucket,
104
+ ) -> torch.futures.Future[torch.Tensor]:
105
+ """
106
+ Warning: This API is experimental, and it requires NCCL version later than 2.9.6.
107
+
108
+ This DDP communication hook implements a simple gradient compression
109
+ approach that casts ``GradBucket`` tensor to half-precision
110
+ `Brain floating point format <https://en.wikipedia.org/wiki/Bfloat16_floating-point_format>`_ (``torch.bfloat16``)
111
+ and then divides it by the process group size.
112
+ It allreduces those ``bfloat16`` gradient tensors. Once compressed gradient
113
+ tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``).
114
+
115
+ Example::
116
+ >>> # xdoctest: +SKIP
117
+ >>> ddp_model.register_comm_hook(process_group, bf16_compress_hook)
118
+ """
119
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
120
+ world_size = group_to_use.size()
121
+
122
+ buffer = (
123
+ cast(Tuple[torch.Tensor, ...], bucket)[0]
124
+ if isinstance(bucket, tuple)
125
+ else bucket.buffer()
126
+ )
127
+ compressed_tensor = buffer.to(torch.bfloat16).div_(world_size)
128
+
129
+ def decompress(fut):
130
+ decompressed_tensor = buffer
131
+ # Decompress in place to reduce the peak memory.
132
+ # See: https://github.com/pytorch/pytorch/issues/45968
133
+ value = fut if isinstance(fut, torch.Tensor) else fut.value()[0]
134
+ decompressed_tensor.copy_(value)
135
+ return decompressed_tensor
136
+
137
+ if torch._utils.is_compiling():
138
+ grad = dist._functional_collectives.all_reduce(
139
+ compressed_tensor, "sum", group_to_use
140
+ )
141
+ return decompress(grad)
142
+ else:
143
+ fut = dist.all_reduce(
144
+ compressed_tensor, group=group_to_use, async_op=True
145
+ ).get_future()
146
+ return fut.then(decompress)
147
+
148
+
149
+ def fp16_compress_wrapper(
150
+ hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]
151
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
152
+ """
153
+ Cast input tensor to ``torch.float16``, cast result of hook back to input dtype.
154
+
155
+ This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision
156
+ floating point format (``torch.float16``), and casts the resulting tensor of the given hook back to
157
+ the input data type, such as ``float32``.
158
+ Therefore, ``fp16_compress_hook`` is equivalent to ``fp16_compress_wrapper(allreduce_hook)``.
159
+
160
+ Example::
161
+ >>> # xdoctest: +SKIP
162
+ >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10)
163
+ >>> ddp_model.register_comm_hook(state, fp16_compress_wrapper(powerSGD_hook))
164
+ """
165
+
166
+ def fp16_compress_wrapper_hook(
167
+ hook_state, bucket: dist.GradBucket
168
+ ) -> torch.futures.Future[torch.Tensor]:
169
+ # Cast bucket tensor to FP16.
170
+ bucket.set_buffer(bucket.buffer().to(torch.float16))
171
+
172
+ fut = hook(hook_state, bucket)
173
+
174
+ def decompress(fut):
175
+ decompressed_tensor = bucket.buffer()
176
+ # Decompress in place to reduce the peak memory.
177
+ # See: https://github.com/pytorch/pytorch/issues/45968
178
+ decompressed_tensor.copy_(fut.value())
179
+ return decompressed_tensor
180
+
181
+ # Decompress after hook has run.
182
+ return fut.then(decompress)
183
+
184
+ return fp16_compress_wrapper_hook
185
+
186
+
187
+ def bf16_compress_wrapper(
188
+ hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]
189
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
190
+ """
191
+ Warning: This API is experimental, and it requires NCCL version later than 2.9.6.
192
+
193
+ This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision
194
+ `Brain floating point format <https://en.wikipedia.org/wiki/Bfloat16_floating-point_format> `_ (``torch.bfloat16``),
195
+ and casts the resulting tensor of the given hook back to the input data type, such as ``float32``.
196
+
197
+ Therefore, ``bf16_compress_hook`` is equivalent to ``bf16_compress_wrapper(allreduce_hook)``.
198
+
199
+ Example::
200
+ >>> # xdoctest: +SKIP
201
+ >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10)
202
+ >>> ddp_model.register_comm_hook(state, bf16_compress_wrapper(powerSGD_hook))
203
+ """
204
+
205
+ def bf16_compress_wrapper_hook(
206
+ hook_state, bucket: dist.GradBucket
207
+ ) -> torch.futures.Future[torch.Tensor]:
208
+ # Cast bucket tensor to BF16.
209
+ bucket.set_buffer(bucket.buffer().to(torch.bfloat16))
210
+
211
+ fut = hook(hook_state, bucket)
212
+
213
+ def decompress(fut):
214
+ decompressed_tensor = bucket.buffer()
215
+ # Decompress in place to reduce the peak memory.
216
+ # See: https://github.com/pytorch/pytorch/issues/45968
217
+ decompressed_tensor.copy_(fut.value())
218
+ return decompressed_tensor
219
+
220
+ # Decompress after hook has run.
221
+ return fut.then(decompress)
222
+
223
+ return bf16_compress_wrapper_hook
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+ from torch.autograd import Variable
4
+
5
+ from dataclasses import dataclass
6
+ from typing import Any, no_type_check
7
+ from torch.distributed.utils import _free_storage
8
+
9
+ @dataclass
10
+ class _AllreduceUpcastHookState:
11
+ """
12
+ State to manage DDP mixed precision in backward / gradient communication.
13
+
14
+ This contains a weakref to the DDP module for access to reducer and process
15
+ group, and a stream to run parameter and gradient upcasts.
16
+ """
17
+
18
+ ddp_weakref: Any
19
+ upcast_stream: torch.cuda.Stream
20
+ wait_for_stream_enqueued: bool = False
21
+
22
+ @no_type_check
23
+ def _reducer_allreduce_and_upcast_hook(
24
+ hook_state: _AllreduceUpcastHookState, bucket: dist.GradBucket
25
+ ) -> torch.futures.Future[torch.Tensor]:
26
+ """
27
+ Perform allreduce in precision ``reduce_dtype``, upcast to prepare for optimizer.
28
+
29
+ Performs allreduce in the reduced precision given by DDP's mixed precision
30
+ reduce_dtype, and upcasts parameters and gradients to fp32 in preparation
31
+ to run the optimizer.
32
+ """
33
+ ddp_weakref = hook_state.ddp_weakref
34
+ reducer, process_group = ddp_weakref().reducer, ddp_weakref().process_group
35
+ gradient_is_bucket_view = ddp_weakref().gradient_as_bucket_view
36
+ # Cast bucket if different than param_dtype.
37
+ if (
38
+ ddp_weakref().mixed_precision.param_dtype != ddp_weakref().mixed_precision.reduce_dtype
39
+ ):
40
+ # Cast bucket tensor to reduce_dtype
41
+ bucket.set_buffer(bucket.buffer().to(ddp_weakref().mixed_precision.reduce_dtype))
42
+ fut = reducer._run_allreduce_hook(bucket)
43
+ ret_fut = torch.futures.Future()
44
+ stream = hook_state.upcast_stream
45
+ with torch.cuda.stream(stream):
46
+ fut.wait()
47
+ bucket.buffer().div_(process_group.size())
48
+ ret_fut.set_result(bucket.buffer())
49
+
50
+ # Upcast parameters and gradients so optimizer step can run in fp32.
51
+ params, grads = bucket.parameters(), bucket.gradients()
52
+ for p, g in zip(params, grads):
53
+ p.data = p._fp_param
54
+ # free storage for mp param as it will be allocated again in next
55
+ # forward pass.
56
+ _free_storage(p._mp_param)
57
+ p.grad.data = p.grad.to(p.data.dtype)
58
+
59
+ # enqueue a callback to wait for this stream at end of backward
60
+ def wait_for_stream_cb():
61
+ torch.cuda.current_stream().wait_stream(stream)
62
+ # Remove post-backward hooks since they are re-installed in next
63
+ # iteration, similar to FSDP.
64
+ # Parameters that don't require grad still needed to be casted since
65
+ # they may participate in computation. However, they would not be recast
66
+ # by hook above as they don't have a grad hook installed, so cast them
67
+ # back here.
68
+ for n, p in ddp_weakref().module.named_parameters():
69
+ if hasattr(p, '_ddp_mp_hook_state'):
70
+ p._ddp_mp_hook_state[1].remove()
71
+ delattr(p, '_ddp_mp_hook_state')
72
+ if not p.requires_grad and not hasattr(p, '_ddp_ignored'):
73
+ p.data = p._fp_param
74
+
75
+ # reset for next backward pass
76
+ hook_state.wait_for_stream_enqueued = False
77
+
78
+ if not hook_state.wait_for_stream_enqueued:
79
+ Variable._execution_engine.queue_callback(
80
+ wait_for_stream_cb
81
+ )
82
+ # mark that the callback is enqueued
83
+ hook_state.wait_for_stream_enqueued = True
84
+
85
+ return ret_fut
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, List, no_type_check
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+ from torch.autograd import Variable
6
+ from functools import partial
7
+ from dataclasses import dataclass
8
+
9
+ __all__: List[str] = []
10
+
11
+ _FUNCTIONAL_OPTIM_STEP_METHOD_NAME = "step_param"
12
+
13
+ class _OptimizerHookState:
14
+ """
15
+ Holds state for running optimizer in-line after DDP communication hook.
16
+
17
+ Currently contains only optimizer class which must have a method `step_param`.
18
+ """
19
+
20
+ __slots__ = ["functional_optimizer", "params_to_optimize"]
21
+
22
+ def __init__(self, functional_optim, params=None):
23
+ self.functional_optimizer = functional_optim
24
+ self._check_valid_functional_optim()
25
+ self._set_params_to_optimize(params)
26
+
27
+ def _set_params_to_optimize(self, params):
28
+ if params is not None:
29
+ self.params_to_optimize = set(params)
30
+
31
+ def _check_valid_functional_optim(self):
32
+ if not hasattr(self.functional_optimizer, _FUNCTIONAL_OPTIM_STEP_METHOD_NAME):
33
+ raise ValueError(
34
+ f"Class {type(self.functional_optimizer)} must implement method "
35
+ f"{_FUNCTIONAL_OPTIM_STEP_METHOD_NAME}."
36
+ )
37
+
38
+
39
+ @dataclass
40
+ class _OptimInBackwardHookState:
41
+ optim_stream: torch.cuda.Stream
42
+ wait_for_optim_stream_enqueued: bool
43
+
44
+ @no_type_check
45
+ def _apply_optim_in_backward_hook(
46
+ gradient_is_bucket_view: bool
47
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
48
+ r"""
49
+ Register hook to apply the optimizer in backward.
50
+
51
+ If torch.distributed.optim._apply_optimizer_in_backward is used to overlap
52
+ optimizer with backward pass, DDP will run the below hook to run optimizer
53
+ step for parameters after gradient communication has taken place.
54
+ """
55
+ optim_in_bwd_state = _OptimInBackwardHookState(
56
+ optim_stream=torch.cuda.Stream(),
57
+ wait_for_optim_stream_enqueued=False,
58
+ )
59
+
60
+ def apply_optim_in_backward_hook(
61
+ hook_state: Any, bucket: dist.GradBucket, optim_stream_state,
62
+ ) -> torch.futures.Future[torch.Tensor]:
63
+ # Run original hook
64
+ ddp_weakref = hook_state
65
+ ddp_inst = ddp_weakref()
66
+ reducer, process_group = ddp_inst.reducer, ddp_inst.process_group
67
+ fut = reducer._run_allreduce_hook(bucket)
68
+ optimizer_stream = optim_stream_state.optim_stream
69
+ with torch.cuda.stream(optimizer_stream):
70
+ fut.wait()
71
+ # Apply gradient division since C++ side only allreduces and does
72
+ # not average. TODO: (rohan-varma) the div factor may be different
73
+ # when running with join hook
74
+ bucket.buffer().div_(process_group.size())
75
+ model_params = bucket.parameters()
76
+ grads = bucket.gradients()
77
+ # TODO (rohan-varma): upcast as needed for DDP mixed precision,
78
+ # once optimizer in backward + DDP mixed precision is supported.
79
+ for p, g in zip(model_params, grads):
80
+ if hasattr(p, '_in_backward_optimizers'):
81
+ # Note: need to set grad to the bucket's grad, because
82
+ # running allreduce results in the bucket's grad being
83
+ # reduced, but not grad field.
84
+ if not gradient_is_bucket_view:
85
+ p.grad = g
86
+ for optim in p._in_backward_optimizers:
87
+ optim.step()
88
+
89
+ # Need to return a Future[Tensor] to obey comm hook API contract.
90
+ ret_fut = torch.futures.Future()
91
+ ret_fut.set_result(bucket.buffer())
92
+
93
+ # enqueue a callback to wait for this optimizer stream at the end of
94
+ # backward and set all DDP managed grads to None.
95
+ def wait_for_optim_stream_callback():
96
+ torch.cuda.current_stream().wait_stream(
97
+ optim_stream_state.optim_stream
98
+ )
99
+ # Set DDP managed grads to None
100
+ for param in ddp_inst._get_data_parallel_params(ddp_inst.module):
101
+ if hasattr(param, '_in_backward_optimizers'):
102
+ param.grad = None
103
+
104
+ # reset for the next backwards pass
105
+ optim_stream_state.wait_for_optim_stream_enqueued = False
106
+
107
+ if not optim_stream_state.wait_for_optim_stream_enqueued:
108
+ Variable._execution_engine.queue_callback(
109
+ wait_for_optim_stream_callback
110
+ )
111
+ # mark that the callback is enqueued
112
+ optim_stream_state.wait_for_optim_stream_enqueued = True
113
+
114
+ return ret_fut
115
+
116
+ comm_hook = partial(
117
+ apply_optim_in_backward_hook, optim_stream_state=optim_in_bwd_state
118
+ )
119
+ # These are needed for DDP's logging of comm hooks
120
+ comm_hook.__name__ = apply_optim_in_backward_hook.__name__
121
+ comm_hook.__qualname__ = apply_optim_in_backward_hook.__qualname__
122
+
123
+ return comm_hook
124
+
125
+ def _hook_then_optimizer(
126
+ hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]],
127
+ optimizer_state: _OptimizerHookState,
128
+ ) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
129
+ r"""Run optimizer in a functional fashion after DDP communication hook."""
130
+ has_set_params = (
131
+ hasattr(optimizer_state, 'params_to_optimize')
132
+ and optimizer_state.params_to_optimize is not None
133
+ )
134
+
135
+ def hook_then_optimizer_wrapper(
136
+ hook_state, bucket: dist.GradBucket
137
+ ) -> torch.futures.Future[torch.Tensor]:
138
+ # Run original hook
139
+ fut = hook(hook_state, bucket)
140
+
141
+ def optimizer_step(fut):
142
+ gradient_tensors = bucket.gradients()
143
+ model_params = bucket.parameters()
144
+ for grad_tensor, model_param in zip(gradient_tensors, model_params):
145
+ if not has_set_params or model_param in optimizer_state.params_to_optimize:
146
+ optimizer_state.functional_optimizer.step_param(
147
+ model_param,
148
+ grad_tensor,
149
+ )
150
+ return bucket.buffer()
151
+
152
+ return fut.then(optimizer_step)
153
+
154
+ return hook_then_optimizer_wrapper
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+
6
+ from . import default_hooks as default
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class PostLocalSGDState:
12
+ r"""
13
+ Store state for all-reducing gradients globally until given step, then locally after.
14
+
15
+ Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``,
16
+ and all-reducing gradients locally using ``subgroup`` afterwards.
17
+
18
+ If ``process_group`` is ``None``, the global process group will be used.
19
+ If ``subgroup`` is ``None``, the intra-node process group on each machine will be used.
20
+
21
+ Additionally, ``post_local_gradient_allreduce`` may be worth tuning,
22
+ because both true and false may give a faster convergence.
23
+ """
24
+
25
+ __slots__ = [
26
+ "process_group",
27
+ "subgroup",
28
+ "start_localSGD_iter",
29
+ "post_local_gradient_allreduce",
30
+ "iter",
31
+ ]
32
+
33
+ def __init__(
34
+ self,
35
+ process_group,
36
+ subgroup,
37
+ start_localSGD_iter,
38
+ post_local_gradient_allreduce=True,
39
+ ):
40
+ """Initialize state object with given parameters and log when localSGD start."""
41
+ logger.info(
42
+ "Local SGD will be started after %s iterations", start_localSGD_iter
43
+ )
44
+
45
+ # The group used for all-reducing gradients globally.
46
+ self.process_group = process_group
47
+ # The group used for all-reducing gradients locally.
48
+ self.subgroup = subgroup
49
+ self.start_localSGD_iter = start_localSGD_iter
50
+ # Allreduce gradients locally since iteration `start_localSGD_iter`.
51
+ # This may help with the convergence efficiency at the cost of relatively cheap intra-subgroup communication.
52
+ self.post_local_gradient_allreduce = post_local_gradient_allreduce
53
+ # Iteration/step in the training loop.
54
+ self.iter = 0
55
+
56
+ def maybe_increase_iter(self, bucket):
57
+ """Track iterations and trigger log message at start of local SGD."""
58
+ # Since bucket 0 is the last bucket to allreduce in an iteration.
59
+ # Only increase `iter` when bucket 0 is processed.
60
+ if bucket.is_last():
61
+ self.iter += 1
62
+
63
+ if self.iter == self.start_localSGD_iter:
64
+ logger.info(
65
+ "Start to apply local SGD after %s iterations.", self.iter
66
+ )
67
+
68
+ def post_localSGD_hook(
69
+ state: PostLocalSGDState, bucket: dist.GradBucket
70
+ ) -> torch.futures.Future[torch.Tensor]:
71
+ """
72
+ Run post-localSGD algorithm.
73
+
74
+ This DDP communication hook is used for running post-localSGD algorithm,
75
+ by combining with a model averaging component (e.g.,
76
+ :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`)
77
+ that runs after the optimizer step.
78
+
79
+ Args:
80
+ state (PostLocalSGDState): State information to run post-localSGD.
81
+ Users mainly need to tune ``start_localSGD_iter`` to determine when to start local SGD.
82
+ bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
83
+ Note that since DDP comm hook only supports single process single device mode,
84
+ only exactly one tensor is stored in this bucket.
85
+
86
+ Returns:
87
+ Future handler of the communication, which updates the gradients in place.
88
+
89
+ Example::
90
+ >>> # xdoctest: +SKIP
91
+ >>> state = PostLocalSGDState(process_group=process_group, subgroup=subgroup,
92
+ start_localSGD_iter=10)
93
+ >>> ddp_model.register_comm_hook(state, post_localSGD_hook)
94
+ >>> # Also need to establish a model averaging module and run model averaging after ``optimizer.step()``.
95
+ >>> # Please refer to the examples in ``torch.distributed.algorithms.model_averaging.averagers`` module.
96
+ """
97
+ global_group_to_use = (
98
+ state.process_group if state.process_group is not None else dist.group.WORLD
99
+ )
100
+
101
+ # The input tensor is a flattened 1D tensor.
102
+ input_tensor = bucket.buffer()
103
+
104
+ # Run allreduce using `global_group_to_use` in the first `start_localSGD_iter` iterations.
105
+ if state.iter < state.start_localSGD_iter:
106
+ state.maybe_increase_iter(bucket)
107
+ return default._allreduce_fut(global_group_to_use, input_tensor)
108
+
109
+ # If `post_local_gradient_allreduce` is not set,
110
+ # then no gradient synchronization after the first `start_localSGD_iter` iterations.
111
+ if not state.post_local_gradient_allreduce:
112
+ fut: torch.futures.Future[torch.Tensor] = torch.futures.Future()
113
+ fut.set_result(input_tensor)
114
+ return fut
115
+
116
+ # Run allreduce using `subgroup` after the first `start_localSGD_iter` iterations.
117
+ # Note that by default, a separate subgroup for each node is created which
118
+ # causes an intra-node allreduce to be done at each training step.
119
+ # From this moment, model averaging should run after the optimizer step,
120
+ # to globally allreduce all the parameters.
121
+ if state.subgroup is None:
122
+ state.subgroup, _ = dist.new_subgroups()
123
+ return default._allreduce_fut(state.subgroup, input_tensor)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import logging
3
+ import math
4
+ from typing import Dict
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+
9
+ from . import default_hooks as default
10
+ from torch.distributed import distributed_c10d
11
+
12
+ __all__ = [
13
+ "PowerSGDState", "powerSGD_hook", "batched_powerSGD_hook"
14
+ ]
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def _orthogonalize(matrices, epsilon=0):
20
+ """
21
+ Decide between Gram-Schmidt or QR factorization to orthogonalize a batch of matrices.
22
+
23
+ QR factorization doesn't work with half-precision, but it is usually faster with a rank > 2.
24
+ """
25
+ assert len(matrices.shape) == 3 and matrices.shape[2] <= matrices.shape[1]
26
+
27
+ num_matrices = matrices.shape[0]
28
+ rank = matrices.shape[2]
29
+ dtype = matrices.dtype
30
+ if rank <= 2 or dtype in [torch.float16, torch.bfloat16]:
31
+ _orthogonalize_gram_schmidt(matrices, epsilon=epsilon)
32
+ else:
33
+ torch.linalg.qr(
34
+ matrices,
35
+ out=(
36
+ matrices,
37
+ torch.empty(num_matrices, rank, rank, device=matrices.device, dtype=dtype)
38
+ )
39
+ )
40
+
41
+ def _orthogonalize_gram_schmidt(matrices, epsilon=0):
42
+ """
43
+ Apply Gram-Schmidt procedure to orthogonalize a batch of matrices.
44
+
45
+ If epsilon is 0, this is equivalent to `torch.qr(matrices, out=(matrices, _))`,
46
+ """
47
+ num_cols = matrices.shape[2]
48
+ for i in range(num_cols):
49
+ # Normalize the i'th column.
50
+ col = matrices[:, :, i : i + 1]
51
+ # If no epsilon is added here, division by zero may be caused by vanishing gradients.
52
+ # This epsilon is not needed if the input batch of matrices covers the gradients of at least one entire layer
53
+ # in the neural network.
54
+ if epsilon == 0:
55
+ # Note that col ** 2 can underflow/overflow if we use FP16.
56
+ # May need to consider multiplying a scaling factor and dividing it later, or using bfloat16 instead.
57
+ try:
58
+ col /= torch.norm(col, dim=1, keepdim=True)
59
+ except ZeroDivisionError:
60
+ logger.error(
61
+ "The matrices to be orthogonalized has at least a column of all 0s. Please set a small value such as 1e-8 "
62
+ "as `orthogonalization_epsilon` in PowerSGD state."
63
+ )
64
+ # Recover the values from NaNs to 0s.
65
+ col.fill_(0.0)
66
+ else:
67
+ col /= torch.norm(col, dim=1, keepdim=True) + epsilon
68
+ # Project it on the rest and remove it.
69
+ if i + 1 < num_cols:
70
+ rest = matrices[:, :, i + 1 :]
71
+ rest -= torch.sum(col * rest, dim=1, keepdim=True) * col
72
+
73
+
74
+ def _should_compress(
75
+ num_rows, num_cols, matrix_approximation_rank, min_compression_rate
76
+ ):
77
+ """
78
+ Recommend if tensor given is worth compressing.
79
+
80
+ Returns a recommendation as to whether the 2D tensor described by the arguments is worth compressing,
81
+ including statistics describing the expected savings from compression. We consider a tensor worth
82
+ compressing when ``min_compression_rate`` < uncompressed size / compressed size, where
83
+ uncompressed size = ``num_rows`` * ``num_cols``,
84
+ and compressed size = (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
85
+
86
+ The result of this function is a tuple of the form (compression_recommendation, uncompressed_el_count, compressed_el_count), where:
87
+
88
+ compression_recommendation is true if the tensor is worth compressing, and false otherwise (see above);
89
+
90
+ uncompressed_el_count is the uncompressed element count, i.e. ``num_rows`` * ``num_cols``; and,
91
+
92
+ compress_el_count is the element count after compression, i.e. (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
93
+ """ # noqa: B950
94
+ uncompressed_size = num_rows * num_cols
95
+ compressed_size = (num_rows + num_cols) * matrix_approximation_rank
96
+ return (
97
+ compressed_size * min_compression_rate < uncompressed_size,
98
+ uncompressed_size,
99
+ compressed_size,
100
+ )
101
+
102
+
103
+ def _report_compression_stats(bucket, state):
104
+ """Report compression stats at frequency of ``compression_stats_logging_frequency`` specified in PowerSGD state."""
105
+ if (
106
+ bucket.is_last()
107
+ and state.iter >= state.next_stats_report
108
+ ):
109
+ stats = state.compression_stats()
110
+ logger.info(
111
+ "Compression stats: iter %s, total before compression %s, total after compression %s, "
112
+ "rate %s", state.iter, stats[1], stats[2], stats[0]
113
+ )
114
+ state.next_stats_report = state.iter + state.compression_stats_logging_frequency
115
+
116
+
117
+ class PowerSGDState:
118
+ r"""
119
+ Store both the algorithm's hyperparameters and internal state for all gradients during training.
120
+
121
+ Particularly, ``matrix_approximation_rank`` and ``start_powerSGD_iter`` are the main hyperparameters that should be tuned by the user.
122
+ For performance, we suggest to keep binary hyperparameters ``use_error_feedback`` and ``warm_start`` on.
123
+
124
+ 1. ``matrix_approximation_rank`` controls the size of compressed low-rank tensors, which determines the compression rate. The lower the rank, the stronger the compression.
125
+
126
+ 1.1. If ``matrix_approximation_rank`` is too low, the full model quality will need more training steps to reach or will never reach and yield loss in accuracy.
127
+
128
+ 1.2. The increase of ``matrix_approximation_rank`` can substantially increase the computation costs of the compression, and the accuracy may not be further improved beyond a certain ``matrix_approximation_rank`` threshold.
129
+
130
+ To tune ``matrix_approximation_rank``, we suggest to start from 1 and increase by factors of 2 (like an exponential grid search, 1, 2, 4, ...), until a satisfactory accuracy is reached. Typically only a small value 1-4 is used. For some NLP tasks (as shown in Appendix D of the original paper), this value has been increased to 32.
131
+
132
+ 2. ``start_powerSGD_iter`` defers PowerSGD compression until step ``start_powerSGD_iter``, and vanilla allreduce runs prior to step ``start_powerSGD_iter``. This hybrid scheme of **vanilla allreduce + PowerSGD** can effectively improve the accuracy, even a relatively small ``matrix_approximation_rank`` is used. This is because that, the beginning of training phase is usually very sensitive to inaccurate gradients, and compressing gradients too early may make the training quickly take a suboptimal trajectory, which can result in an irrecoverable impact on the accuracy.
133
+
134
+ To tune ``start_powerSGD_iter``, we suggest to start with 10% of total training steps, and increase it until a satisfactory accuracy is reached. If there is a warm-up stage in the training, ``start_powerSGD_iter`` typically should be no less than the number of warm-up steps.
135
+
136
+ 3. ``min_compression_rate`` is the minimum compression rate required when a layer is compressed. Due to the computation overheads incurred by the compression, a tensor is worth compressing only if there can be sufficient saving in bandwidth, where ``(num_rows + num_cols) * matrix_approximation_rank * min_compression_rate < num_rows * num_cols``. If the specified compression rate threshold cannot be satisfied, the tensor will be directly allreduced without compression.
137
+
138
+ Compression statistics are logged every ``compression_stats_logging_frequency`` iterations once PowerSGD compression starts.
139
+
140
+ 4. ``orthogonalization_epsilon`` can be a very small value (e.g., 1e-8) added to every normalized matrix column in orthogonalization step, to prevent div-by-zero error if any column has all 0s. If this can already be prevented (e.g., by batch normalization), an epsilon of 0 is recommended for accuracy.
141
+
142
+ 5. ``batch_tensors_with_same_shape`` controls whether to compress and decompress tensors with same shape in a batched operation to achieve higher parallelism. Note that you should also increase the bucket size (i.e., ``bucket_cap_mb`` arg in DDP constructor) to make more same-shaped tensors appear in the same bucket, however this may reduce the overlap between computation and communication, and increase the memory footprint due to stacking the tensors of the same shape. Set to ``True`` if the compression / decompression computation is a bottleneck.
143
+
144
+ .. warning ::
145
+ If error feedback or warm-up is enabled, the minimum value of ``start_powerSGD_iter`` allowed in DDP is 2.
146
+ This is because there is another internal optimization that rebuilds buckets at iteration 1 in DDP,
147
+ and this can conflict with any tensor memorized before the rebuild process.
148
+ """ # noqa: B950
149
+
150
+ __slots__ = [
151
+ "process_group",
152
+ # The fields below are the hyperparameters that often need to be tuned by the user.
153
+ "matrix_approximation_rank",
154
+ "start_powerSGD_iter",
155
+ # The fields below are the hyperparameters that seldom need be tuned by the user.
156
+ "min_compression_rate",
157
+ "orthogonalization_epsilon",
158
+ # The fields below are the binary hyperparameters recommended to be turned on for performance and accuracy.
159
+ "use_error_feedback",
160
+ "warm_start",
161
+ "batch_tensors_with_same_shape",
162
+ # The fields below are internal state.
163
+ "rng",
164
+ "error_dict",
165
+ "p_memory_dict",
166
+ "q_memory_dict",
167
+ "iter",
168
+ # The fields below are for recording compression stats.
169
+ "total_numel_before_compression",
170
+ "total_numel_after_compression",
171
+ "compression_stats_logging_frequency",
172
+ "next_stats_report",
173
+ ]
174
+
175
+ def __init__(
176
+ self,
177
+ process_group,
178
+ matrix_approximation_rank=1,
179
+ start_powerSGD_iter=1_000,
180
+ min_compression_rate=2,
181
+ use_error_feedback=True,
182
+ warm_start=True,
183
+ orthogonalization_epsilon=0,
184
+ random_seed=0,
185
+ compression_stats_logging_frequency=10_000,
186
+ batch_tensors_with_same_shape: bool = False,
187
+ ):
188
+ logger.info(
189
+ "PowerSGD config: matrix_approximation_rank = %s; start_powerSGD_iter = %s; "
190
+ "min_compression_rate = %s; orthogonalization_epsilon = %s; use_error_feedback = %s; warm_start = %s; "
191
+ "random_seed = %s; compression_stats_logging_frequency = %s; batch_tensors_with_same_shape = %s",
192
+ matrix_approximation_rank,
193
+ start_powerSGD_iter,
194
+ min_compression_rate,
195
+ orthogonalization_epsilon,
196
+ use_error_feedback,
197
+ warm_start,
198
+ random_seed,
199
+ compression_stats_logging_frequency,
200
+ batch_tensors_with_same_shape,
201
+ )
202
+
203
+ self.process_group = process_group
204
+ self.matrix_approximation_rank = matrix_approximation_rank
205
+ # Deferring PowerSGD compression util step 'start_powerSGD_iter' can have two advantages:
206
+ # 1) It turns out that PowerSGD may lead to a non-trivial accuracy loss,
207
+ # even if the matrix approximation rank is increased to a large value.
208
+ # To mitigate the accuracy loss, a simple yet effective way is mixing vanilla allreduce
209
+ # (or a more conservative compression such as FP16 compression) with PowerSGD.
210
+ # 2) There is an internal optimization of rebuilding buckets process in DDP,
211
+ # in order to save the memory space.
212
+ # This step takes place after the first iteration.
213
+ # However, this means that the shape of input bucketized tensors is subject to change,
214
+ # which will complicate the implementations of error feedback and warm-up.
215
+ # Running vanilla allreduce in the first few iterations can avoid this complexity.
216
+ if (use_error_feedback or warm_start) and start_powerSGD_iter <= 1:
217
+ raise ValueError(
218
+ "Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
219
+ "because PowerSGD can only be applied after the first two iterations in DDP."
220
+ )
221
+ self.start_powerSGD_iter = start_powerSGD_iter
222
+ self.min_compression_rate = min_compression_rate
223
+ # Error feedback is usually crucial for both for convergence and generalization,
224
+ # because PowerSGD is a biased compressor,
225
+ # i.e., compressing and decompressing a random gradient does not yield the original in expectation.
226
+ # This mechanism requires a temporary copy of the input gradients,
227
+ # so it increases the peak memory consumption by the size of the gradient tensor.
228
+ # However, if the target matrices are known to be exactly low-ranked (instead of just low stable rank),
229
+ # sometimes it is possible to converge to the optima without error feedback.
230
+ # See: http://proceedings.mlr.press/v54/yurtsever17a/yurtsever17a.pdf
231
+ self.use_error_feedback = use_error_feedback
232
+ # Warm-start reuses P(s) and Q(s) from the previous iteration.
233
+ # This can improve the approximation quality and hence improve the accuracy.
234
+ # Additionally, by avoiding the initialization of these low-rank tensors at every step,
235
+ # this can also accelerate training.
236
+ # However, this is at the cost of extra memory.
237
+ self.warm_start = warm_start
238
+ # Can use a very small value to prevent div-by-zero error caused by orthogonalization of vanishing gradients.
239
+ self.orthogonalization_epsilon = orthogonalization_epsilon
240
+ # The purpose of this RNG is to generate different random seeds for initializing Q across iterations,
241
+ # but in the same order for all the DDP replicas.
242
+ # Different random seeds across iterations indicate different 'projections' of the gradients at different SGD steps.
243
+ # If the same random projection is used,
244
+ # there will be differences between the gradients that are never synchronized.
245
+ import numpy as np
246
+ self.rng = np.random.RandomState(random_seed)
247
+ # Since there is only a single state instance for all the input buckets,
248
+ # need to maintain a dictionary that maps each bucket index to the local error.
249
+ self.error_dict: Dict[int, torch.Tensor] = {}
250
+ self.p_memory_dict: Dict[int, torch.Tensor] = {}
251
+ self.q_memory_dict: Dict[int, torch.Tensor] = {}
252
+ # Iteration/step in the training loop.
253
+ self.iter = 0
254
+ # Compression stats accumulators
255
+ self.total_numel_before_compression = 0
256
+ self.total_numel_after_compression = 0
257
+ # We'll report compression stats every 'compression_stats_logging_frequency' iterations
258
+ # Note that we always report compression stats at least once.
259
+ self.compression_stats_logging_frequency = max(
260
+ 1, compression_stats_logging_frequency
261
+ )
262
+ self.next_stats_report = 0
263
+ # Batching tensors with same shape can increase parallelism in compression / decompression computation.
264
+ # This requires a larger bucket size to make more same-shaped tensor to appear in one bucket, however
265
+ # this may reduce the overlap between computation and communication, and increase the memory footprint
266
+ # due to stacking tensors.
267
+ # Turn on if compression / decompression computation is a bottleneck.
268
+ self.batch_tensors_with_same_shape = batch_tensors_with_same_shape
269
+
270
+ def __getstate__(self):
271
+ r"""
272
+ Return a ``Dict[str, Any]`` which will be pickled and saved.
273
+
274
+ ``process_group`` is not serializable and excluded from
275
+ a returned state.
276
+ """
277
+ logger.warning(
278
+ "NOTE: Process group is not serializable and excluded from a saved state."
279
+ )
280
+ return {
281
+ slot: getattr(self, slot)
282
+ for slot in self.__slots__ if slot != "process_group"
283
+ }
284
+
285
+ def __setstate__(self, state):
286
+ r"""
287
+ Take a provided ``state`` and set to this ``PowerSGDState`` instance.
288
+
289
+ ``process_group`` is set to default.
290
+ """
291
+ self.process_group = distributed_c10d._get_default_group()
292
+ logger.warning(
293
+ "NOTE: Process group will be set to a default group (i.e. the world size).\
294
+ If a different group is desired, please set `self.process_group` after PowerSGD state is loaded."
295
+ )
296
+ for slot, value in state.items():
297
+ setattr(self, slot, value)
298
+
299
+ def maybe_increase_iter(self, bucket):
300
+ """Track iterations and trigger log message at start of local SGD."""
301
+ # Since bucket 0 is the last bucket to allreduce in an iteration.
302
+ # Only increase `iter` when bucket 0 is processed.
303
+ if bucket.is_last():
304
+ self.iter += 1
305
+
306
+ if self.iter == self.start_powerSGD_iter:
307
+ logger.info(
308
+ "Start to apply PowerSGD after %s iterations.", self.iter
309
+ )
310
+
311
+ def compression_stats(self):
312
+ r"""
313
+ Return latest compression statistics as tuple.
314
+
315
+ Returns tuple of form (compress_rate, numel_before_compression, numel_after_compression) where:
316
+
317
+ compress_rate is the effective compression rate i.e. (number of elements before compression) / (number of elements after compression);
318
+
319
+ numel_before_compression is the total number of elements before compression was applied; and,
320
+
321
+ numel_after_compression is the total number of elements after compression was applied.
322
+ """ # noqa: B950
323
+ compress_rate = (
324
+ self.total_numel_before_compression / self.total_numel_after_compression
325
+ if self.total_numel_after_compression > 0
326
+ else 0
327
+ )
328
+ return (
329
+ compress_rate,
330
+ self.total_numel_before_compression,
331
+ self.total_numel_after_compression,
332
+ )
333
+
334
+
335
+ def powerSGD_hook(
336
+ state: PowerSGDState, bucket: dist.GradBucket
337
+ ) -> torch.futures.Future[torch.Tensor]:
338
+ r"""
339
+ Implement PowerSGD algorithm.
340
+
341
+ This DDP communication hook implements PowerSGD gradient compression
342
+ algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
343
+ Once gradient tensors are aggregated across all workers, this hook applies
344
+ compression as follows:
345
+
346
+ 1. Views the input flattened 1D gradient tensor as a list of per-parameter tensors, and divides all the tensors into two groups:
347
+
348
+ 1.1 The tensors that should be compressed before allreduce, because the compression can give enough saving in bandwidth.
349
+
350
+ 1.2 Rest of the tensors will be directly allreduced without compression, including all the vector tensors (for biases).
351
+
352
+ 2. Handles uncompressed tensors:
353
+
354
+ 2.1. Allocate contiguous memory for those uncompressed tensors, and allreduces all the uncompressed tensors as a batch, without compression;
355
+
356
+ 2.2. Copies the individual uncompressed tensors from the contiguous memory back to the input tensor.
357
+
358
+ 3. Handles the tensors that should be compressed by PowerSGD compression:
359
+
360
+ 3.1. For each tensor M, creates two low-rank tensors P and Q for decomposing M,
361
+ such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
362
+
363
+ 3.2. Computes each P in Ps, which is equal to MQ;
364
+
365
+ 3.3. Allreduces Ps as a batch;
366
+
367
+ 3.4. Orthogonalizes each P in Ps;
368
+
369
+ 3.5. Computes each Q in Qs, which is approximately equal to M^TP;
370
+
371
+ 3.6. Allreduces Qs as a batch;
372
+
373
+ 3.7. Computes each M among all the compressed tensors, which is approximately equal to PQ^T.
374
+
375
+ Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
376
+ This not only gives the user more control over the tradeoff between speedup and accuracy,
377
+ but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
378
+
379
+ Args:
380
+ state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
381
+ To tune the compression configs, mainly need to tune ``matrix_approximation_rank``, ``start_powerSGD_iter``
382
+ and ``min_compression_rate``.
383
+ bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
384
+ Note that since DDP comm hook only supports single process single device mode,
385
+ only exactly one tensor is stored in this bucket.
386
+
387
+ Returns:
388
+ Future handler of the communication, which updates the gradients in place.
389
+
390
+ Example::
391
+ >>> # xdoctest: +SKIP
392
+ >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1,
393
+ start_powerSGD_iter=10, min_compression_rate=0.5)
394
+ >>> ddp_model.register_comm_hook(state, powerSGD_hook)
395
+ """ # noqa: B950
396
+ process_group = state.process_group
397
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
398
+ world_size = group_to_use.size()
399
+
400
+ # The input tensor is a flattened 1D tensor.
401
+ input_tensor = bucket.buffer()
402
+
403
+ # Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
404
+ if state.iter < state.start_powerSGD_iter:
405
+ state.maybe_increase_iter(bucket)
406
+ return default._allreduce_fut(group_to_use, input_tensor)
407
+
408
+ # Apply PowerSGD after `start_powerSGD_iter` iterations.
409
+ device = input_tensor.device
410
+ dtype = input_tensor.dtype
411
+
412
+ # Incorporate the error from the previous state into the gradients.
413
+ bucket_index = bucket.index()
414
+ input_tensor_cp = None
415
+ total_length = input_tensor.shape[0]
416
+ if state.use_error_feedback:
417
+ if bucket_index in state.error_dict:
418
+ input_tensor.add_(state.error_dict[bucket_index])
419
+ else:
420
+ logger.info(
421
+ "A zero tensor of length %s that represents local error is created.",
422
+ total_length
423
+ )
424
+ state.error_dict[bucket_index] = torch.zeros(
425
+ total_length, device=device, dtype=dtype
426
+ )
427
+
428
+ # Keep a copy of the input tensor,
429
+ # so that we can compute the local error caused by compression later,
430
+ # by comparing this copy and the input tensor updated after decompression.
431
+ input_tensor_cp = torch.clone(input_tensor).detach()
432
+
433
+ # Unflatten the input tensor into per-parameter tensors, for layer-wise compression.
434
+ tensors = bucket.gradients()
435
+
436
+ # Step I: Divide all the tensors into two groups,
437
+ # one will be compressed before allreduce and the other will be directly allreduced without compression.
438
+ tensors_to_compress, uncompressed_tensors = [], []
439
+ total_Ps_size = 0
440
+ total_Qs_size = 0
441
+ for tensor in tensors:
442
+ matrix = tensor.view(tensor.shape[0], -1)
443
+ n, m = matrix.shape
444
+ matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
445
+ compress_test = _should_compress(
446
+ n, m, matrix_approximation_rank, state.min_compression_rate
447
+ )
448
+ state.total_numel_before_compression += compress_test[1]
449
+ if compress_test[0]:
450
+ tensors_to_compress.append(matrix)
451
+ total_Ps_size += n * matrix_approximation_rank
452
+ total_Qs_size += m * matrix_approximation_rank
453
+ state.total_numel_after_compression += compress_test[2]
454
+ else:
455
+ uncompressed_tensors.append(tensor)
456
+ state.total_numel_after_compression += compress_test[1]
457
+
458
+ _report_compression_stats(bucket, state)
459
+
460
+ # Step II: Handle uncompressed tensors.
461
+ # Allocate contiguous memory for these tensors to allreduce efficiently.
462
+ uncompressed_tensors_memory = (
463
+ torch.cat([tensor.view(-1) for tensor in uncompressed_tensors])
464
+ if uncompressed_tensors
465
+ else torch.tensor([], device=device, dtype=dtype)
466
+ )
467
+
468
+ # Step III: Handle the tensors that should be compressed.
469
+ # Allocate contiguous memory for Ps and Qs to allreduce efficiently.
470
+ # If warm-start is enabled, reuse Ps and Qs from the previous iteration if possible.
471
+ # The memory spaces of Ps and Qs need to be allocated in the first iteration when PowerSGD is applied.
472
+ need_randomize_qs = False
473
+ if not state.warm_start or bucket_index not in state.p_memory_dict:
474
+ need_randomize_qs = True
475
+ # If warm-start is disabled, low-rank tensors will be initialized at every step.
476
+ # Only log this if warm-start to avoid spamming.
477
+ if state.warm_start:
478
+ logger.info(
479
+ "Allocating contiguous memory of length %s for Ps, and of length %s for Qs, respectively.",
480
+ total_Ps_size, total_Qs_size
481
+ )
482
+ state.p_memory_dict[bucket_index] = torch.empty(
483
+ total_Ps_size, device=device, dtype=dtype
484
+ )
485
+ state.q_memory_dict[bucket_index] = torch.empty(
486
+ total_Qs_size, device=device, dtype=dtype
487
+ )
488
+
489
+ # Batch tensors to compress by shape.
490
+ shape_to_tensors = defaultdict(list)
491
+ for tensor in tensors_to_compress:
492
+ shape_to_tensors[tensor.shape].append(tensor)
493
+
494
+ # This function decides whether to batch tensors with same shape or not according to the argument,
495
+ # so the following process could share the same code.
496
+ def maybe_batched_tensors_to_compress():
497
+ for tensors in shape_to_tensors.values():
498
+ if state.batch_tensors_with_same_shape:
499
+ batch_size = len(tensors)
500
+ if batch_size == 1:
501
+ # Use the original tensor to avoid copy.
502
+ yield tensors[0].unsqueeze(0)
503
+ else:
504
+ yield torch.stack(tensors)
505
+ else:
506
+ for tensor in tensors:
507
+ yield tensor.unsqueeze(0)
508
+
509
+ # Create Ps and Qs that point to the allocated memory.
510
+ tensors_to_compress = []
511
+ ps = []
512
+ qs = []
513
+ p_idx = 0
514
+ q_idx = 0
515
+ for tensor in maybe_batched_tensors_to_compress():
516
+ batch_size, n, m = tensor.shape
517
+ matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
518
+ tensors_to_compress.append(tensor)
519
+ ps.append(
520
+ state.p_memory_dict[bucket_index][
521
+ p_idx : p_idx + batch_size * n * matrix_approximation_rank
522
+ ].view(batch_size, n, matrix_approximation_rank)
523
+ )
524
+ qs.append(
525
+ state.q_memory_dict[bucket_index][
526
+ q_idx : q_idx + batch_size * m * matrix_approximation_rank
527
+ ].view(batch_size, m, matrix_approximation_rank)
528
+ )
529
+ p_idx += batch_size * n * matrix_approximation_rank
530
+ q_idx += batch_size * m * matrix_approximation_rank
531
+
532
+ # If warm-start is enabled, reuse Qs from the previous iteration if possible and skip filling random values.
533
+ # The exception is the first iteration when PowerSGD is applied.
534
+ if not need_randomize_qs:
535
+ for q in qs:
536
+ _orthogonalize(q, state.orthogonalization_epsilon)
537
+ else:
538
+ with torch.random.fork_rng(devices=[]):
539
+ # Fork this RNG to avoid changing the seed globally and affecting the random sampling anywhere else in the training.
540
+ # The seed makes sure that the initial random values are the same across all the DDP replicas.
541
+ # This seed should differ at every step.
542
+ # Since it is very slow to fork RNG state across all the CUDA devices,
543
+ # only fork on CPU and then move the generated tensor to the CUDA device (by overwriting q).
544
+ torch.manual_seed(state.rng.randint(1_000_000_000))
545
+ for q in qs:
546
+ q.copy_(
547
+ torch.randn(
548
+ *q.shape,
549
+ device="cpu",
550
+ dtype=dtype,
551
+ )
552
+ )
553
+ _orthogonalize(q, state.orthogonalization_epsilon)
554
+
555
+ # Compute Ps.
556
+ for tensor, q, p in zip(tensors_to_compress, qs, ps):
557
+ torch.bmm(tensor, q, out=p)
558
+
559
+ # This allreduce is only applied to uncompressed tensors,
560
+ # so it should have been kicked off before the above computation on the compressed tensors to hide more communication costs.
561
+ # However, this somehow requires a separate future chain at this time.
562
+ allreduce_contiguous_uncompressed_tensors_fut = dist.all_reduce(
563
+ uncompressed_tensors_memory, group=group_to_use, async_op=True
564
+ ).get_future()
565
+
566
+ def unpack_uncompressed_tensors_and_allreduce_ps(fut):
567
+ uncompressed_tensors_memory = fut.value()[0].div_(world_size)
568
+ idx = 0
569
+ for tensor in uncompressed_tensors:
570
+ tensor.copy_(
571
+ uncompressed_tensors_memory[idx : idx + tensor.numel()].view_as(tensor)
572
+ )
573
+ idx += tensor.numel()
574
+
575
+ # Since these Ps will be orthogonalized later, no need to divide them by world size.
576
+ return (
577
+ dist.all_reduce(
578
+ state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
579
+ )
580
+ .get_future()
581
+ .wait()[0]
582
+ )
583
+
584
+ def compute_qs(fut):
585
+ state.p_memory_dict[bucket_index] = fut.value()
586
+ for p in ps:
587
+ _orthogonalize(p, state.orthogonalization_epsilon)
588
+
589
+ # Compute Qs.
590
+ for tensor, p, q in zip(tensors_to_compress, ps, qs):
591
+ torch.bmm(tensor.transpose(1, 2), p, out=q)
592
+
593
+ # TODO: The above procedure does two matmul+allreduce steps per iteration --
594
+ # one left multiplication and one right multiplication.
595
+ # For warm-start, can take one such step at a time, and alternate between them.
596
+
597
+ # Allreduce Qs.
598
+ return (
599
+ dist.all_reduce(
600
+ state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
601
+ )
602
+ .get_future()
603
+ .wait()[0]
604
+ )
605
+
606
+ def decompress(fut):
607
+ state.q_memory_dict[bucket_index] = fut.value().div_(world_size)
608
+
609
+ for p, q, tensor in zip(ps, qs, tensors_to_compress):
610
+ torch.bmm(p, q.transpose(1, 2), out=tensor)
611
+
612
+ # Copy batched tensors back to original buffer.
613
+ if state.batch_tensors_with_same_shape:
614
+ for tensor in tensors_to_compress:
615
+ if tensor.shape[0] == 1:
616
+ # Skip tensor with batch_size == 1 since itself is the original tensor.
617
+ continue
618
+ original_tensors = shape_to_tensors[tensor.shape[1:]]
619
+ for i, original_tensor in enumerate(original_tensors):
620
+ original_tensor.copy_(tensor[i])
621
+
622
+ if torch.cuda.is_available():
623
+ torch.cuda.synchronize(device)
624
+
625
+ if state.use_error_feedback:
626
+ # Memorize the local errors.
627
+ state.error_dict[bucket_index] = input_tensor_cp - input_tensor
628
+ if not state.warm_start:
629
+ state.p_memory_dict.clear()
630
+ state.q_memory_dict.clear()
631
+
632
+ state.maybe_increase_iter(bucket)
633
+
634
+ return input_tensor
635
+
636
+ return (
637
+ allreduce_contiguous_uncompressed_tensors_fut.then(
638
+ unpack_uncompressed_tensors_and_allreduce_ps
639
+ )
640
+ .then(compute_qs)
641
+ .then(decompress)
642
+ )
643
+
644
+
645
+ def batched_powerSGD_hook(
646
+ state: PowerSGDState, bucket: dist.GradBucket
647
+ ) -> torch.futures.Future[torch.Tensor]:
648
+ r"""
649
+ Implement simplified PowerSGD algorithm.
650
+
651
+ This DDP communication hook implements a simplified PowerSGD gradient compression
652
+ algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
653
+ This variant does not compress the gradients layer by layer,
654
+ but instead compresses the flattened input tensor that batches all the gradients.
655
+ Therefore, it is **faster** than :meth:`powerSGD_hook`,
656
+ but usually results in a **much lower accuracy**, unless ``matrix_approximation_rank`` is 1.
657
+
658
+ .. warning ::
659
+ Increasing ``matrix_approximation_rank`` here may not necessarily increase the accuracy,
660
+ because batching per-parameter tensors without column/row alignment can destroy low-rank structure.
661
+ Therefore, the user should always consider :meth:`powerSGD_hook` first,
662
+ and only consider this variant when a satisfactory accuracy can be achieved when ``matrix_approximation_rank`` is 1.
663
+
664
+ Once gradient tensors are aggregated across all workers, this hook applies
665
+ compression as follows:
666
+
667
+ 1. Views the input flattened 1D gradient tensor as a square-shaped tensor M with 0 paddings;
668
+
669
+ 2. Creates two low-rank tensors P and Q for decomposing M, such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
670
+
671
+ 3. Computes P, which is equal to MQ;
672
+
673
+ 4. Allreduces P;
674
+
675
+ 5. Orthogonalizes P;
676
+
677
+ 6. Computes Q, which is approximately equal to M^TP;
678
+
679
+ 7. Allreduces Q;
680
+
681
+ 8. Computes M, which is approximately equal to PQ^T.
682
+
683
+ 9. Truncates the input tensor to the original length.
684
+
685
+ Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
686
+ This not only gives the user more control over the tradeoff between speedup and accuracy,
687
+ but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
688
+
689
+ Args:
690
+ state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
691
+ To tune the compression configs, mainly need to tune ``matrix_approximation_rank`` and ``start_powerSGD_iter``.
692
+ bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
693
+ Note that since DDP comm hook only supports single process single device mode,
694
+ only exactly one tensor is stored in this bucket.
695
+
696
+ Returns:
697
+ Future handler of the communication, which updates the gradients in place.
698
+
699
+ Example::
700
+ >>> # xdoctest: +SKIP
701
+ >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1)
702
+ >>> ddp_model.register_comm_hook(state, batched_powerSGD_hook)
703
+ """ # noqa: B950
704
+ process_group = state.process_group
705
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
706
+ world_size = group_to_use.size()
707
+
708
+ # The input tensor is a flattened 1D tensor.
709
+ input_tensor = bucket.buffer()
710
+
711
+ # Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
712
+ if state.iter < state.start_powerSGD_iter:
713
+ state.maybe_increase_iter(bucket)
714
+ return default._allreduce_fut(group_to_use, input_tensor)
715
+
716
+ # Apply PowerSGD after `start_powerSGD_iter` iterations.
717
+ device = input_tensor.device
718
+ total_length = input_tensor.shape[0]
719
+ state.total_numel_before_compression += total_length
720
+
721
+ # View the input tensor as a 2D square-shape tensor, and pad 0s if necessary.
722
+ square_side_length = math.ceil(math.sqrt(total_length))
723
+ state.total_numel_after_compression += (
724
+ square_side_length * state.matrix_approximation_rank * 2
725
+ )
726
+ padded_total_length = square_side_length ** 2
727
+ input_tensor.resize_(padded_total_length)
728
+ input_tensor[total_length:padded_total_length].fill_(0)
729
+
730
+ _report_compression_stats(bucket, state)
731
+
732
+ # Incorporate the error from the previous state into the gradients.
733
+ bucket_index = bucket.index()
734
+ input_tensor_cp = None
735
+ if state.use_error_feedback:
736
+ if bucket_index in state.error_dict:
737
+ input_tensor.add_(state.error_dict[bucket_index])
738
+ else:
739
+ logger.info(
740
+ "A zero tensor of length %s that represents local error is created.",
741
+ padded_total_length
742
+ )
743
+ state.error_dict[bucket_index] = torch.zeros(
744
+ padded_total_length, device=device, dtype=input_tensor.dtype
745
+ )
746
+
747
+ # Keep a copy of the input tensor,
748
+ # so that we can compute the local error caused by compression later,
749
+ # by comparing this copy and the input tensor updated after decompression.
750
+ input_tensor_cp = torch.clone(input_tensor).detach()
751
+ matrix = input_tensor.view(square_side_length, square_side_length)
752
+
753
+ # Reuse P and Q from the previous iteration if possible.
754
+ # The memory spaces of P and Q need to be allocated in the first iteration when PowerSGD is applied.
755
+ if not state.warm_start or bucket_index not in state.p_memory_dict:
756
+ # If warm-start is disabled, low-rank tensors will be initialized at every step.
757
+ # Only log this if warm-start to avoid spamming.
758
+ if state.warm_start:
759
+ logger.info(
760
+ "Initializing low-rank tensors P and Q, each of which has a shape of %s x %s.",
761
+ square_side_length, state.matrix_approximation_rank
762
+ )
763
+
764
+ def create_low_rank_tensor(fill_random_values, rng):
765
+ """Return a low-rank 2D tensor of square_side_length * matrix_approximation_rank."""
766
+ if fill_random_values:
767
+ with torch.random.fork_rng(devices=[]):
768
+ # Fork this RNG to avoid changing the seed globally and affecting the random sampling
769
+ # anywhere else in the training.
770
+ # The seed makes sure that the initial random values are the same across all the DDP replicas.
771
+ # This seed should differ at every step.
772
+ # Since it is very slow to fork RNG state across all the CUDA devices,
773
+ # only fork on CPU and then move the generated tensor to the CUDA device.
774
+ torch.manual_seed(rng.randint(1_000_000_000))
775
+ return torch.randn(
776
+ square_side_length,
777
+ state.matrix_approximation_rank,
778
+ device="cpu",
779
+ dtype=input_tensor.dtype,
780
+ ).to(device)
781
+ else:
782
+ return torch.empty(
783
+ square_side_length,
784
+ state.matrix_approximation_rank,
785
+ device=device,
786
+ dtype=input_tensor.dtype,
787
+ )
788
+
789
+ state.p_memory_dict[bucket_index] = create_low_rank_tensor(
790
+ fill_random_values=False, rng=state.rng
791
+ )
792
+ state.q_memory_dict[bucket_index] = create_low_rank_tensor(
793
+ fill_random_values=True, rng=state.rng
794
+ )
795
+ _orthogonalize(state.q_memory_dict[bucket_index])
796
+
797
+ torch.matmul(
798
+ matrix, state.q_memory_dict[bucket_index], out=state.p_memory_dict[bucket_index]
799
+ )
800
+ allreduce_p_fut = dist.all_reduce(
801
+ state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
802
+ ).get_future()
803
+
804
+ def compute_q(fut):
805
+ state.p_memory_dict[bucket_index] = fut.value()[0]
806
+ _orthogonalize(state.p_memory_dict[bucket_index])
807
+
808
+ torch.matmul(
809
+ matrix.t(),
810
+ state.p_memory_dict[bucket_index],
811
+ out=state.q_memory_dict[bucket_index],
812
+ )
813
+
814
+ # TODO: The above procedure does two matmul+allreduce steps per iteration --
815
+ # one left multiplication and one right multiplication.
816
+ # For warm-start, can take one such step at a time, and alternate between them.
817
+
818
+ return (
819
+ dist.all_reduce(
820
+ state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
821
+ )
822
+ .get_future()
823
+ .wait()[0]
824
+ )
825
+
826
+ def decompress(fut):
827
+ state.q_memory_dict[bucket_index] = fut.value().div_(world_size)
828
+ torch.matmul(
829
+ state.p_memory_dict[bucket_index],
830
+ state.q_memory_dict[bucket_index].t(),
831
+ out=matrix,
832
+ )
833
+
834
+ if state.use_error_feedback:
835
+ # Memorize the local errors.
836
+ state.error_dict[bucket_index] = input_tensor_cp - input_tensor
837
+ # Removing this seemingly unnecessary sync somehow may cause failures.
838
+ # See: https://github.com/pytorch/pytorch/pull/54838
839
+ if torch.cuda.is_available():
840
+ torch.cuda.synchronize(device)
841
+ if not state.warm_start:
842
+ state.p_memory_dict.clear()
843
+ state.q_memory_dict.clear()
844
+ ret = input_tensor.resize_(total_length)
845
+
846
+ state.maybe_increase_iter(bucket)
847
+
848
+ return ret
849
+
850
+ return allreduce_p_fut.then(compute_q).then(decompress)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+ from torch import nn
4
+
5
+
6
+ def _quantize_per_tensor_cuda(x, scale, zero_point):
7
+ y = torch.round(x / scale) + zero_point
8
+ y = torch.clamp(y, 0, 255).to(torch.uint8)
9
+ return y
10
+
11
+
12
+ def _dequantize_per_tensor_cuda(y, scale, zero_point):
13
+ x = scale * (y.to(torch.float32) - zero_point)
14
+ return x
15
+
16
+
17
+ def _quantize_per_channel_cuda(x, scale, zero_point):
18
+ y = torch.zeros(x.size(), device=x.device)
19
+ for i in range(x.size()[0]):
20
+ y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
21
+ y = torch.clamp(y, 0, 255).to(torch.uint8)
22
+ return y
23
+
24
+
25
+ def _dequantize_per_channel_cuda(y, scale, zero_point):
26
+ y = y.to(torch.float32).cuda(y.device)
27
+ x = torch.zeros_like(y, device=y.device)
28
+ for i in range(x.size()[0]):
29
+ x[i, :] = scale[i] * (y[i, :] - zero_point[i])
30
+ return x
31
+
32
+
33
+ def _get_allgather_out_list(all_gather_in_list, world_size):
34
+ out_list = [
35
+ torch.zeros_like(
36
+ all_gather_in_list,
37
+ device=all_gather_in_list.device,
38
+ dtype=all_gather_in_list.dtype,
39
+ )
40
+ for _ in range(world_size)
41
+ ]
42
+ return out_list
43
+
44
+
45
+ def quantization_pertensor_hook(
46
+ process_group: dist.ProcessGroup, bucket: dist.GradBucket
47
+ ) -> torch.futures.Future[torch.Tensor]:
48
+ """
49
+ Apply ``torch.quantize_per_tensor`` logic to DDP using ``allgather`` protocol.
50
+
51
+ Workers first allgather the scale and zero point of their own
52
+ ``GradBucket`` prior to the quantization. After all workers have that information,
53
+ the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's
54
+ own gradient tensor, and uses ``allgather`` to communicate these across all workers.
55
+ The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes and
56
+ aggregates each quantized gradient tensor locally and returns the mean.
57
+
58
+ .. warning ::
59
+ This is experimental, and uses ``allgather`` protocol which is considerably slower than
60
+ ``allreduce`` protocol. It works only with flattened grads.
61
+
62
+ Example::
63
+ >>> # xdoctest: +SKIP
64
+ >>> ddp_model.register_comm_hook(process_group, quantization_pertensor_hook)
65
+ """
66
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
67
+ rank = process_group.rank() if process_group is not None else dist.get_rank()
68
+ world_size = group_to_use.size()
69
+
70
+ tensor = bucket.buffer()
71
+
72
+ myObserver = torch.ao.quantization.MinMaxObserver().cuda(tensor.device)
73
+ myObserver(tensor)
74
+
75
+ s, z = myObserver.calculate_qparams()
76
+ s_and_z = torch.FloatTensor([s, z]).cuda(tensor.device)
77
+
78
+ all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size)
79
+
80
+ # First, allgather scale and zeros.
81
+ fut = dist.all_gather(
82
+ all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True
83
+ ).get_future()
84
+
85
+ def quantize_and_allgather(fut):
86
+ # Store scale and zeros across all workers.
87
+ all_ranks_s_and_z = fut.wait()[0]
88
+ # All workers quantize their own ``GradBucket`` tensors.
89
+ quantized_tensor = _quantize_per_tensor_cuda(
90
+ tensor, all_ranks_s_and_z[rank][0], all_ranks_s_and_z[rank][1]
91
+ )
92
+ # Allgather quantized tensors.
93
+ fut = dist.all_gather(
94
+ _get_allgather_out_list(quantized_tensor, world_size),
95
+ quantized_tensor,
96
+ group=group_to_use,
97
+ async_op=True,
98
+ ).get_future()
99
+
100
+ return fut.wait()
101
+
102
+ def dequantize_and_aggregate(fut):
103
+ all_ranks_quantized_tensor = fut.wait()[0]
104
+
105
+ aggregated_dequantized_tensor = torch.zeros_like(
106
+ all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32
107
+ )
108
+ # Using previously allgathered scales and zeros, dequantize gradient tensors
109
+ # locally and then aggregate them.
110
+ for r, quantized_tensor in enumerate(all_ranks_quantized_tensor):
111
+ aggregated_dequantized_tensor += _dequantize_per_tensor_cuda(
112
+ quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1]
113
+ )
114
+
115
+ return aggregated_dequantized_tensor / world_size
116
+
117
+ return fut.then(quantize_and_allgather).then(dequantize_and_aggregate)
118
+
119
+
120
+ def quantization_perchannel_hook(
121
+ process_group: dist.ProcessGroup, bucket: dist.GradBucket, bucket_size=512
122
+ ) -> torch.futures.Future[torch.Tensor]:
123
+ """
124
+ Apply``torch.quantize_per_channel`` logic to DDP using ``allgather`` protocol.
125
+
126
+ Compared to per-tensor, the main motivation of per-channel is
127
+ for considerably large tensors such as a tensor that contains 6 million
128
+ elements quantizing per a bucket size of 512 (or 128) elements may significantly
129
+ increase the resolution.
130
+
131
+ It first splits ``GradBucket`` tensor into multiple chunks (channels) of ``bucket_size``
132
+ elements. Then, workers allgather the scales and zero points of their own
133
+ ``GradBucket`` prior to the quantization. After all workers have that information,
134
+ the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's
135
+ own gradient tensor, and uses ``allgather`` to communicate these across all workers.
136
+ The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes, flattens, and
137
+ aggregates each quantized gradient tensor locally and returns the mean.
138
+
139
+ .. warning ::
140
+ This is experimental, and uses ``allgather`` protocol which is considerably slower than
141
+ ``allreduce`` protocol. It works only with flattened grads.
142
+
143
+ Example::
144
+ >>> # xdoctest: +SKIP
145
+ >>> ddp_model.register_comm_hook(process_group, quantization_perchannel_hook)
146
+ """
147
+ group_to_use = process_group if process_group is not None else dist.group.WORLD
148
+ rank = process_group.rank() if process_group is not None else dist.get_rank()
149
+ world_size = group_to_use.size()
150
+
151
+ tensor = bucket.buffer()
152
+
153
+ tensor_in_channels = (
154
+ nn.functional.pad(
155
+ input=tensor,
156
+ pad=(0, bucket_size - len(tensor) % bucket_size),
157
+ mode="constant",
158
+ value=0,
159
+ )
160
+ .view(-1, bucket_size)
161
+ .cuda(tensor.device)
162
+ )
163
+
164
+ myPerChannelObserver = torch.ao.quantization.PerChannelMinMaxObserver().cuda(
165
+ tensor.device
166
+ )
167
+ myPerChannelObserver(tensor_in_channels)
168
+
169
+ s_ch, z_ch = myPerChannelObserver.calculate_qparams()
170
+ s_and_z = torch.stack((s_ch, z_ch)).cuda(tensor.device)
171
+
172
+ all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size)
173
+ # First, allgather scale and zeros.
174
+ fut = dist.all_gather(
175
+ all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True
176
+ ).get_future()
177
+
178
+ def quantize_and_allgather(fut):
179
+ # Store scale and zeros across all workers.
180
+ all_ranks_s_and_z = fut.wait()[0]
181
+ # All workers quantize their corresponding ``GradBucket`` tensors.
182
+ quantized_tensor = _quantize_per_channel_cuda(
183
+ tensor_in_channels,
184
+ all_ranks_s_and_z[rank, 0, :],
185
+ all_ranks_s_and_z[rank, 1, :],
186
+ )
187
+ # Allgather quantized tensors.
188
+ fut = dist.all_gather(
189
+ _get_allgather_out_list(quantized_tensor, world_size),
190
+ quantized_tensor,
191
+ group=group_to_use,
192
+ async_op=True,
193
+ ).get_future()
194
+
195
+ return fut.wait()
196
+
197
+ def dequantize_and_aggregate(fut):
198
+ all_ranks_quantized_tensor = fut.wait()[0]
199
+
200
+ aggregated_dequantized_tensor = torch.zeros_like(
201
+ all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32
202
+ )
203
+ # Using previously allgathered scales and zeros, dequantize gradient tensors
204
+ # locally and then aggregate them.
205
+ for r, quantized_tensor in enumerate(all_ranks_quantized_tensor):
206
+ aggregated_dequantized_tensor += _dequantize_per_channel_cuda(
207
+ quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1]
208
+ )
209
+
210
+ return (
211
+ torch.flatten(aggregated_dequantized_tensor).cuda(tensor.device)[
212
+ : tensor.size()[0]
213
+ ]
214
+ / world_size
215
+ )
216
+
217
+ return fut.then(quantize_and_allgather).then(dequantize_and_aggregate)
venv/lib/python3.10/site-packages/torch/distributed/algorithms/join.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from abc import ABC, abstractmethod
3
+ from types import TracebackType
4
+ from typing import Any, List, NamedTuple, Optional, Type
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+
9
+ __all__ = ['JoinHook', 'Joinable', 'Join']
10
+
11
+ class JoinHook:
12
+ r"""
13
+ This defines a join hook, which provides two entry points in the join context manager.
14
+
15
+ Entry points : a main hook, which is called repeatedly while there exists a non-joined
16
+ process, and a post-hook, which is called once all processes have joined.
17
+
18
+ To implement a join hook for the generic join context manager, define a
19
+ class that inherits from :class:`JoinHook` and override ``main_hook()`` and
20
+ ``post_hook()`` as appropriate.
21
+ """
22
+
23
+ def main_hook(self) -> None:
24
+ r"""Call this hook while there exists a non-joined process to shadow collective communications in a training iteration.
25
+
26
+ Training iteration i.e., in one forward pass, backward pass, and optimizer step.
27
+ """
28
+ ...
29
+
30
+ def post_hook(self, is_last_joiner: bool) -> None:
31
+ r"""
32
+ Call hook after all processes have joined.
33
+
34
+ It is passed an additional ``bool`` argument ``is_last_joiner``, which indicates if the rank is one of the last to join.
35
+
36
+ Arguments:
37
+ is_last_joiner (bool): ``True`` if the rank is one of the last to
38
+ join; ``False`` otherwise.
39
+ """
40
+ ...
41
+
42
+
43
+ class Joinable(ABC):
44
+ r"""
45
+ This defines an abstract base class for joinable classes.
46
+
47
+ A joinable class
48
+ (inheriting from :class:`Joinable`) should implement :meth:`join_hook`,
49
+ which returns a :class:`JoinHook` instance, in addition to
50
+ :meth:`join_device` and :meth:`join_process_group` that return device and
51
+ process group information, respectively.
52
+ """
53
+
54
+ @abstractmethod
55
+ def __init__(self):
56
+ super().__init__()
57
+ self._join_config = _JoinConfig.construct_disabled_join_config()
58
+
59
+ @abstractmethod
60
+ def join_hook(self, **kwargs) -> JoinHook:
61
+ r"""
62
+ Return a :class:`JoinHook` instance for the given :class:`Joinable`.
63
+
64
+ Arguments:
65
+ kwargs (dict): a :class:`dict` containing any keyword arguments
66
+ to modify the behavior of the join hook at run time; all
67
+ :class:`Joinable` instances sharing the same join context
68
+ manager are forwarded the same value for ``kwargs``.
69
+ """
70
+ ...
71
+
72
+ @property
73
+ @abstractmethod
74
+ def join_device(self) -> torch.device:
75
+ r"""Return the device from which to perform collective communications needed by the join context manager."""
76
+ ...
77
+
78
+ @property
79
+ @abstractmethod
80
+ def join_process_group(self) -> Any:
81
+ r"""Returns the process group for the collective communications needed by the join context manager itself."""
82
+ ...
83
+
84
+
85
+ class _JoinConfig(NamedTuple):
86
+ r"""This includes all fields needed from a :class:`Joinable` instance for the join context manager side."""
87
+
88
+ enable: bool
89
+ throw_on_early_termination: bool
90
+ is_first_joinable: bool
91
+
92
+ @staticmethod
93
+ def construct_disabled_join_config():
94
+ r"""Return a :class:`_JoinConfig` instance indicating that join-related logic should be disabled.
95
+
96
+ e.g. if the caller is not in a join context manager.
97
+ """
98
+ return _JoinConfig(
99
+ enable=False,
100
+ throw_on_early_termination=False,
101
+ is_first_joinable=False
102
+ )
103
+
104
+
105
+
106
+ class Join:
107
+ r"""
108
+ This class defines the generic join context manager, which allows custom hooks to be called after a process joins.
109
+
110
+ These hooks should shadow the
111
+ collective communications of non-joined processes to prevent hanging and
112
+ erroring and to ensure algorithmic correctness. Refer to :class:`JoinHook`
113
+ for details about the hook definition.
114
+
115
+ .. warning::
116
+ The context manager requires each participating :class:`Joinable` to
117
+ call the method :meth:`notify_join_context()` before its own per-
118
+ iteration collective communications to ensure correctness.
119
+
120
+ .. warning::
121
+ The context manager requires that all ``process_group`` attributes in
122
+ the :class:`JoinHook` objects are the same. If there are multiple
123
+ :class:`JoinHook` objects, then the ``device`` of the first is used.
124
+ The process group and device information is used for checking for non-
125
+ joined processes and for notifying processes to throw an exception if
126
+ ``throw_on_early_termination`` is enabled, both of which using an all-
127
+ reduce.
128
+
129
+ Arguments:
130
+ joinables (List[Joinable]): a list of the participating
131
+ :class:`Joinable` s; their hooks are iterated over in the given
132
+ order.
133
+
134
+ enable (bool): a flag enabling uneven input detection; setting to
135
+ ``False`` disables the context manager's functionality and should
136
+ only be set when the user knows the inputs will not be uneven
137
+ (default: ``True``).
138
+
139
+ throw_on_early_termination (bool): a flag controlling whether to throw an
140
+ exception upon detecting uneven inputs (default: ``False``).
141
+
142
+ Example::
143
+
144
+ >>> import os
145
+ >>> import torch
146
+ >>> import torch.distributed as dist
147
+ >>> import torch.multiprocessing as mp
148
+ >>> # xdoctest: +SKIP
149
+ >>> import torch.nn.parallel.DistributedDataParallel as DDP
150
+ >>> import torch.distributed.optim.ZeroRedundancyOptimizer as ZeRO
151
+ >>> from torch.distributed.algorithms.join import Join
152
+ >>>
153
+ >>> # On each spawned worker
154
+ >>> def worker(rank):
155
+ >>> dist.init_process_group("nccl", rank=rank, world_size=2)
156
+ >>> model = DDP(torch.nn.Linear(1, 1).to(rank), device_ids=[rank])
157
+ >>> optim = ZeRO(model.parameters(), torch.optim.Adam, lr=0.01)
158
+ >>> # Rank 1 gets one more input than rank 0
159
+ >>> inputs = [torch.tensor([1.]).to(rank) for _ in range(10 + rank)]
160
+ >>> with Join([model, optim]):
161
+ >>> for input in inputs:
162
+ >>> loss = model(input).sum()
163
+ >>> loss.backward()
164
+ >>> optim.step()
165
+ >>> # All ranks reach here without hanging/erroring
166
+ """
167
+
168
+ def __init__(
169
+ self,
170
+ joinables: List[Joinable],
171
+ enable: bool = True,
172
+ throw_on_early_termination: bool = False,
173
+ **kwargs,
174
+ ):
175
+ if len(joinables) == 0:
176
+ raise ValueError("The join context manager requires at least one joinable")
177
+ self._joinables = joinables
178
+ self._join_hooks = [joinable.join_hook(**kwargs) for joinable in self._joinables]
179
+ self._enable = enable
180
+ self._throw_on_early_termination = throw_on_early_termination
181
+ self._set_joinable_configs()
182
+ self._extract_dist_info()
183
+
184
+ def _set_joinable_configs(self) -> None:
185
+ r"""Set the :class:`_JoinConfig` of each participating :class:`Joinable`."""
186
+ assert len(self._joinables) > 0
187
+ is_first_joinable = True
188
+ for joinable in self._joinables:
189
+ joinable._join_config = _JoinConfig(
190
+ enable=self._enable,
191
+ throw_on_early_termination=self._throw_on_early_termination,
192
+ is_first_joinable=is_first_joinable
193
+ )
194
+ is_first_joinable = False
195
+
196
+ def _extract_dist_info(self) -> None:
197
+ r"""
198
+ Extract the process group and device information from the joinables.
199
+
200
+ If there are multiple joinables, then the context manager uses the
201
+ first specified device.
202
+
203
+ Preconditions:
204
+ ``self._joinables`` is not ``None`` and is non-empty.
205
+
206
+ Raises:
207
+ ValueError
208
+ If there are multiple conflicting ``process_group`` attributes
209
+ among the ``Joinable`` objects.
210
+ """
211
+ process_group = None
212
+ device = None
213
+ for joinable in self._joinables:
214
+ if process_group is None:
215
+ process_group = joinable.join_process_group
216
+ elif process_group != joinable.join_process_group:
217
+ raise ValueError("Using join context manager with multiple process groups")
218
+ if device is None:
219
+ device = joinable.join_device
220
+ self._process_group = process_group
221
+ self._rank = dist.get_rank(self._process_group)
222
+ self._device = device
223
+
224
+ def __enter__(self):
225
+ ...
226
+
227
+ def __exit__(
228
+ self,
229
+ type: Optional[Type[BaseException]],
230
+ value: Optional[BaseException],
231
+ traceback: Optional[TracebackType]
232
+ ):
233
+ r"""
234
+ Repeatedly runs the main hooks until all processes join; then, runs the post-hooks.
235
+
236
+ Raises:
237
+ RuntimeError
238
+ If ``throw_on_early_termination=True``.
239
+ """
240
+ if not self._enable or type:
241
+ return # propagate the exception directly if one was raised
242
+
243
+ all_procs_joined = False
244
+ is_last_joiner = True
245
+
246
+ i = 0
247
+ WARN_THRESHOLD = 1000
248
+ warnings.simplefilter("once")
249
+
250
+ while not all_procs_joined:
251
+ if i > WARN_THRESHOLD:
252
+ warnings.warn(
253
+ "Detected uneven input skew of greater than "
254
+ f"{WARN_THRESHOLD}. This means that rank "
255
+ f"{self._rank} has at least {WARN_THRESHOLD} "
256
+ f"fewer inputs than other currently-active ranks. "
257
+ "This level of skew could lead to performance "
258
+ "degradation during training."
259
+ )
260
+ # Shadow the all-reduce in non-joined processes
261
+ num_nonjoined_procs = self._get_num_nonjoined_procs()
262
+ if num_nonjoined_procs == 0:
263
+ all_procs_joined = True
264
+ else:
265
+ if self._throw_on_early_termination:
266
+ self._notify_procs_to_terminate()
267
+
268
+ # Run main hooks
269
+ for join_hook in self._join_hooks:
270
+ join_hook.main_hook()
271
+
272
+ is_last_joiner = False
273
+ i += 1
274
+
275
+ # Run post-hooks
276
+ for join_hook in self._join_hooks:
277
+ join_hook.post_hook(is_last_joiner)
278
+
279
+ def _get_num_nonjoined_procs(self):
280
+ r"""Return the number of non-joined processes by shadowing an all-reduce in the non-joined processes."""
281
+ num_nonjoined_procs = torch.zeros(1, device=self._device)
282
+ dist.all_reduce(num_nonjoined_procs, group=self._process_group)
283
+ return num_nonjoined_procs.item()
284
+
285
+ def _notify_procs_to_terminate(self):
286
+ r"""Schedule an all-reduce to notify non-joined processes to terminate.
287
+
288
+ Also raise a ``RuntimeError`` indicating that the current process has exhausted its inputs.
289
+ """
290
+ ones = torch.ones(1, device=self._device)
291
+ dist.all_reduce(ones, group=self._process_group)
292
+ raise RuntimeError(f"Rank {self._rank} exhausted all inputs.")
293
+
294
+ @staticmethod
295
+ def notify_join_context(joinable: Joinable):
296
+ r"""
297
+ Notifies the join context manager that the calling process has not yet joined.
298
+
299
+ Then, if ``throw_on_early_termination=True``, checks if uneven inputs have been detected
300
+ (i.e. if one process has already joined) and throws an exception if so.
301
+
302
+ This method should be called from a :class:`Joinable` object before
303
+ its per-iteration collective communications. For example, this should
304
+ be called at the beginning of the forward pass in
305
+ :class:`DistributedDataParallel`.
306
+
307
+ Only the first :class:`Joinable` object passed into the context
308
+ manager performs the collective communications in this method, and
309
+ for the others, this method is vacuous.
310
+
311
+ Arguments:
312
+ joinable (Joinable): the :class:`Joinable` object calling this
313
+ method.
314
+
315
+ Returns:
316
+ An async work handle for the all-reduce meant to notify the context
317
+ manager that the process has not yet joined if ``joinable`` is the
318
+ first one passed into the context manager; ``None`` otherwise.
319
+ """
320
+ assert hasattr(joinable, "_join_config"), \
321
+ f"Check that the {type(joinable)} constructor calls the " \
322
+ "``Joinable`` constructor"
323
+
324
+ join_config = joinable._join_config
325
+ # First joinable is responsible for the collective communications
326
+ if not join_config.is_first_joinable or not join_config.enable:
327
+ return None
328
+
329
+ device = joinable.join_device
330
+ process_group = joinable.join_process_group
331
+
332
+ # Schedule an all-reduce to indicate that the caller has not yet joined
333
+ ones = torch.ones(1, device=device)
334
+ work = dist.all_reduce(ones, group=process_group, async_op=True)
335
+
336
+ if join_config.throw_on_early_termination:
337
+ # Check if uneven inputs have been detected
338
+ zeros = torch.zeros(1, device=device)
339
+ dist.all_reduce(zeros, group=process_group)
340
+ should_throw = zeros.item()
341
+ if should_throw:
342
+ raise RuntimeError(
343
+ "Detected at least one rank that exhausted inputs. "
344
+ "Throwing across all ranks."
345
+ )
346
+ return work
venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/averagers.cpython-310.pyc ADDED
Binary file (5.74 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/hierarchical_model_averager.cpython-310.pyc ADDED
Binary file (9.55 kB). View file