applied-ai-018 commited on
Commit
b054cff
·
verified ·
1 Parent(s): ddc4c37

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt +3 -0
  7. venv/lib/python3.10/site-packages/torch/_prims/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_prims/__pycache__/context.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_prims/__pycache__/debug_prims.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_prims/__pycache__/executor.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_prims/__pycache__/rng_prims.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_prims/rng_prims.py +268 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/__init__.py +132 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/_composable_state.py +37 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py +1084 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py +409 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py +12 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__init__.py +14 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__init__.py +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/api.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/batch_dim_utils.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/comm_tensor.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/config.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/data_parallel.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/distribute.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/experimental_ops.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/gm_transformation.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_optimization.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_utils.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/iter_graph_module.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/log_utils.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/parallel_mode.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/partial_lower.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/_spmd/api.py +575 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/_spmd/batch_dim_utils.py +179 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/_spmd/comm_tensor.py +247 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/_spmd/config.py +27 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/_spmd/data_parallel.py +824 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/_spmd/distribute.py +783 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/_spmd/experimental_ops.py +455 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/_spmd/gm_transformation.py +51 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_optimization.py +986 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_utils.py +145 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/_spmd/iter_graph_module.py +762 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/_spmd/log_utils.py +78 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/_spmd/parallel_mode.py +216 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/_spmd/partial_lower.py +268 -0
ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b76ba44617583c711f86f1de7e4a659c37648ded1f657f897dba30513ef8656
3
+ size 33555612
ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34a40fb1e962c52cede791fa69ed0d037966235db1a663f491aadda732b091ed
3
+ size 33555612
ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b604db8453bd1484546feaeba94c1ea31052848a6de2234e191cf3dfd534717
3
+ size 33555533
ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04f24ac126b0f57c4a53ee91ea64fd799da90b0b6d07ab50b38ab25eb682b804
3
+ size 50332843
ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2eaa8e4a45847d44158859761ec77111585296ae41b4e64dac17284d4bcd62d
3
+ size 50332749
ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ea30c1e8b0a2b4cb29ab44c993294f6c852a73425f9be0a57a01d9f6478834
3
+ size 415237291
venv/lib/python3.10/site-packages/torch/_prims/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (54.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_prims/__pycache__/context.cpython-310.pyc ADDED
Binary file (4.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_prims/__pycache__/debug_prims.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/_prims/__pycache__/executor.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_prims/__pycache__/rng_prims.cpython-310.pyc ADDED
Binary file (8.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/_prims/rng_prims.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+ import torch.utils._pytree as pytree
5
+ from torch import _prims
6
+ from torch._C import DispatchKey
7
+ from torch._higher_order_ops.utils import autograd_not_implemented
8
+ from torch._ops import HigherOrderOperator
9
+
10
+ from torch._prims_common import CUDARngStateHelper, make_contiguous_strides_for
11
+ from torch._prims_common.wrappers import backwards_not_supported
12
+ from torch._subclasses.fake_tensor import FakeTensorMode
13
+ from torch.fx.experimental.proxy_tensor import (
14
+ disable_proxy_modes_tracing,
15
+ ProxyTorchDispatchMode,
16
+ track_tensor_tree,
17
+ )
18
+ from torch.types import _device, _dtype
19
+
20
+
21
+ rngprim_namespace = "rngprims"
22
+ rngprim = torch.library.Library(rngprim_namespace, "DEF")
23
+ rngprim_impl = torch.library.Library(
24
+ rngprim_namespace, "IMPL", "CompositeExplicitAutograd"
25
+ )
26
+ rngprim_autograd_impl = torch.library.Library(rngprim_namespace, "IMPL", "Autograd")
27
+ rngprim_meta_impl = torch.library.Library(rngprim_namespace, "IMPL", "Meta")
28
+
29
+
30
+ def throw_on_non_cuda(device):
31
+ raise RuntimeError(
32
+ f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not "
33
+ f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is "
34
+ "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU."
35
+ )
36
+
37
+
38
+ def register_rng_prim(name, schema, impl_aten, impl_meta, doc, tags=None):
39
+ rngprim.define(schema)
40
+ rngprim_impl.impl(name, impl_aten)
41
+ rngprim_meta_impl.impl(name, impl_meta)
42
+
43
+ prim_packet = getattr(torch._ops.ops.rngprims, name)
44
+ prim = prim_packet.default
45
+ if tags:
46
+ prim._tags = tags
47
+
48
+ rngprim_autograd_impl.impl(name, backwards_not_supported(prim))
49
+
50
+ for p in (prim_packet, prim):
51
+ p.__doc__ = doc
52
+ p.return_type = torch._prims_common.RETURN_TYPE.NEW # type: ignore[attr-defined]
53
+
54
+ p.schema = schema
55
+ p.impl_aten = impl_aten
56
+ p.prim_meta_impl = impl_meta
57
+
58
+
59
+ # Philox rand offsets could be shared in future with other philox ops, so
60
+ # keeping these functions in global scope.
61
+ def philox_rand_offset_meta(
62
+ shape: torch.Size,
63
+ ):
64
+ return _prims.TensorLike(torch.tensor(0, dtype=torch.int64))
65
+
66
+
67
+ def philox_rand_offset(
68
+ shape: torch.Size,
69
+ ):
70
+ # For impl, look at the function calc_execution_policy in the file
71
+ # aten/src/ATen/native/cuda/DistributionTemplates.h. The impl was copied at
72
+ # commit hash 72aa0667bd16707d50eb8fa337092a1f5d11dfb6
73
+ numel_scalar = 1
74
+ for dim_size in shape:
75
+ numel_scalar *= dim_size
76
+ numel = torch.scalar_tensor(numel_scalar, dtype=torch.int64)
77
+
78
+ block_size = 256
79
+ unroll = 4
80
+ curand4_engine_calls = 4
81
+ device_property = torch.cuda.get_device_properties(torch.cuda.current_device())
82
+ blocks_per_sm = device_property.max_threads_per_multi_processor // block_size
83
+ grid_size = (numel + block_size - 1) // block_size
84
+ grid_size = min(grid_size, device_property.multi_processor_count * blocks_per_sm)
85
+ offset = (
86
+ (numel - 1) // (block_size * grid_size * unroll) + 1
87
+ ) * curand4_engine_calls
88
+ return offset
89
+
90
+
91
+ def register_philox_rand():
92
+ name = "philox_rand"
93
+ schema = "philox_rand(SymInt[] size, Tensor seed, Tensor offset, int[]? stride, Device? device=None, ScalarType? dtype=None) -> (Tensor, Tensor)" # noqa: B950
94
+
95
+ def _philox_rand_meta(
96
+ shape: torch.Size,
97
+ seed: torch.Tensor,
98
+ offset: torch.Tensor,
99
+ stride: Optional[Tuple[int, ...]],
100
+ device: _device,
101
+ dtype: _dtype,
102
+ ):
103
+ # stride arg will be useful for distributed usecase. Currently, its unused.
104
+ assert stride is None
105
+ stride = make_contiguous_strides_for(shape)
106
+ random_values = _prims.TensorMeta(
107
+ shape=shape, strides=stride, dtype=dtype, device=device
108
+ )
109
+ offset = philox_rand_offset_meta(shape)
110
+ return (random_values, offset)
111
+
112
+ def _philox_rand(
113
+ shape: torch.Size,
114
+ seed: torch.Tensor,
115
+ offset: torch.Tensor,
116
+ stride: Optional[Tuple[int, ...]],
117
+ device: _device,
118
+ dtype: _dtype,
119
+ ):
120
+ # stride arg will be useful for distributed usecase. Currently, its unused.
121
+ assert stride is None
122
+ if device.type == "cpu":
123
+ devices = []
124
+ else:
125
+ devices = [device]
126
+
127
+ if device.type != "cuda":
128
+ raise throw_on_non_cuda(device)
129
+
130
+ with torch.random.fork_rng(devices):
131
+ CUDARngStateHelper.set_torch_state_tensor(seed, offset)
132
+ random_values = torch.rand(shape, device=device, dtype=dtype)
133
+
134
+ return random_values, philox_rand_offset(shape)
135
+
136
+ register_rng_prim(
137
+ name=name,
138
+ schema=schema,
139
+ impl_aten=_philox_rand,
140
+ impl_meta=_philox_rand_meta,
141
+ doc="Philox based stateless rand operator",
142
+ tags=(torch.Tag.nondeterministic_seeded,),
143
+ )
144
+
145
+
146
+ def get_device(args, kwargs):
147
+ if kwargs.get("device"):
148
+ device = kwargs.get("device")
149
+ if isinstance(device, str):
150
+ device = torch.device(device)
151
+ return device.type
152
+
153
+ devices = {arg.device.type for arg in args if isinstance(arg, torch.Tensor)}
154
+ if any(dev == "cuda" for dev in devices):
155
+ return "cuda"
156
+ elif any(dev == "cpu" for dev in devices):
157
+ return "cpu"
158
+ return None
159
+
160
+
161
+ def register_run_and_save_rng_state_op():
162
+ run_and_save_rng_state = HigherOrderOperator("run_and_save_rng_state")
163
+
164
+ run_and_save_rng_state.py_impl(DispatchKey.Autograd)(
165
+ autograd_not_implemented(run_and_save_rng_state, deferred_error=True)
166
+ )
167
+
168
+ @run_and_save_rng_state.py_impl(DispatchKey.CUDA)
169
+ def impl_cuda(op, *args, **kwargs):
170
+ return torch.cuda.get_rng_state(), op(*args, **kwargs)
171
+
172
+ @run_and_save_rng_state.py_impl(DispatchKey.CPU)
173
+ def impl_cpu(op, *args, **kwargs):
174
+ return torch.get_rng_state(), op(*args, **kwargs)
175
+
176
+ @run_and_save_rng_state.py_impl(DispatchKey.BackendSelect)
177
+ def impl_backend_select(op, *args, **kwargs):
178
+ impl_map = {"cuda": impl_cuda, "cpu": impl_cpu}
179
+ device = get_device(args, kwargs)
180
+ assert device in impl_map, f"Backend not supported for {device}"
181
+ impl = impl_map[device]
182
+ return impl(op, *args, **kwargs)
183
+
184
+ @run_and_save_rng_state.py_impl(FakeTensorMode)
185
+ def impl_fake_tensor_mode(mode, op, *args, **kwargs):
186
+ # Check device to call the right impl
187
+ with mode:
188
+ return impl_backend_select(op, *args, **kwargs)
189
+
190
+ @run_and_save_rng_state.py_impl(ProxyTorchDispatchMode)
191
+ def impl_proxy_dispatch_mode(mode, op, *args, **kwargs):
192
+ if mode.enable_tracing:
193
+ out = impl_backend_select(op, *args, **kwargs)
194
+ proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, (op, *args))
195
+ proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
196
+ out_proxy = mode.tracer.create_proxy(
197
+ "call_function", run_and_save_rng_state, proxy_args, proxy_kwargs
198
+ )
199
+ return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
200
+ else:
201
+ return run_and_save_rng_state(op, *args, **kwargs)
202
+
203
+ return run_and_save_rng_state
204
+
205
+
206
+ def register_run_with_rng_state_op():
207
+ run_with_rng_state = HigherOrderOperator("run_with_rng_state")
208
+
209
+ run_with_rng_state.py_impl(DispatchKey.Autograd)(
210
+ autograd_not_implemented(run_with_rng_state, deferred_error=True)
211
+ )
212
+
213
+ @run_with_rng_state.py_impl(DispatchKey.CUDA)
214
+ def impl_cuda(rng_state, op, *args, **kwargs):
215
+ current_state = torch.cuda.get_rng_state()
216
+ torch.cuda.set_rng_state(rng_state.cpu())
217
+ out = op(*args, **kwargs)
218
+ torch.cuda.set_rng_state(current_state)
219
+ return out
220
+
221
+ @run_with_rng_state.py_impl(DispatchKey.CPU)
222
+ def impl_cpu(rng_state, op, *args, **kwargs):
223
+ current_state = torch.get_rng_state()
224
+ torch.set_rng_state(rng_state)
225
+ out = op(*args, **kwargs)
226
+ torch.set_rng_state(current_state)
227
+ return out
228
+
229
+ @run_with_rng_state.py_impl(ProxyTorchDispatchMode)
230
+ def impl_proxy_dispatch_mode(mode, rng_state, op, *args, **kwargs):
231
+ if mode.enable_tracing:
232
+ with disable_proxy_modes_tracing():
233
+ out = run_with_rng_state(rng_state, op, *args, **kwargs)
234
+ proxy_args = pytree.tree_map(
235
+ mode.tracer.unwrap_proxy, (rng_state, op, *args)
236
+ )
237
+ proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
238
+ out_proxy = mode.tracer.create_proxy(
239
+ "call_function", run_with_rng_state, proxy_args, proxy_kwargs
240
+ )
241
+ return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
242
+ else:
243
+ return run_with_rng_state(rng_state, op, *args, **kwargs)
244
+
245
+ @run_with_rng_state.py_impl(DispatchKey.BackendSelect)
246
+ def impl_backend_select(rng_state, op, *args, **kwargs):
247
+ impl_map = {"cuda": impl_cuda, "cpu": impl_cpu}
248
+ device = get_device(args, kwargs)
249
+ assert device in impl_map, f"Backend not supported for {device}"
250
+ impl = impl_map[device]
251
+ return impl(rng_state, op, *args, **kwargs)
252
+
253
+ @run_with_rng_state.py_impl(FakeTensorMode)
254
+ def impl_fake_tensor_mode(mode, rng_state, op, *args, **kwargs):
255
+ # Skip setting the set_rng_state as it does not work well with fake tensors.
256
+ # And it does not matter for the fake tensor mode.
257
+ with mode:
258
+ return op(*args, **kwargs)
259
+
260
+ return run_with_rng_state
261
+
262
+
263
+ run_and_save_rng_state = register_run_and_save_rng_state_op()
264
+ run_with_rng_state = register_run_with_rng_state_op()
265
+
266
+
267
+ def register_rng_prims():
268
+ register_philox_rand()
venv/lib/python3.10/site-packages/torch/distributed/__init__.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from enum import Enum
4
+ import pdb
5
+ import io
6
+
7
+ import torch
8
+
9
+ def is_available() -> bool:
10
+ """
11
+ Return ``True`` if the distributed package is available.
12
+
13
+ Otherwise,
14
+ ``torch.distributed`` does not expose any other APIs. Currently,
15
+ ``torch.distributed`` is available on Linux, MacOS and Windows. Set
16
+ ``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.
17
+ Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,
18
+ ``USE_DISTRIBUTED=0`` for MacOS.
19
+ """
20
+ return hasattr(torch._C, "_c10d_init")
21
+
22
+
23
+ if is_available() and not torch._C._c10d_init():
24
+ raise RuntimeError("Failed to initialize torch.distributed")
25
+
26
+ # Custom Runtime Errors thrown from the distributed package
27
+ DistError = torch._C._DistError
28
+ DistBackendError = torch._C._DistBackendError
29
+ DistNetworkError = torch._C._DistNetworkError
30
+ DistStoreError = torch._C._DistStoreError
31
+
32
+ if is_available():
33
+ from torch._C._distributed_c10d import (
34
+ Store,
35
+ FileStore,
36
+ TCPStore,
37
+ ProcessGroup as ProcessGroup,
38
+ Backend as _Backend,
39
+ PrefixStore,
40
+ Reducer,
41
+ Logger,
42
+ BuiltinCommHookType,
43
+ GradBucket,
44
+ Work as _Work,
45
+ _DEFAULT_FIRST_BUCKET_BYTES,
46
+ _register_comm_hook,
47
+ _register_builtin_comm_hook,
48
+ _broadcast_coalesced,
49
+ _compute_bucket_assignment_by_size,
50
+ _verify_params_across_processes,
51
+ _test_python_store,
52
+ DebugLevel,
53
+ get_debug_level,
54
+ set_debug_level,
55
+ set_debug_level_from_env,
56
+ _make_nccl_premul_sum,
57
+ )
58
+
59
+ class _DistributedPdb(pdb.Pdb):
60
+ """
61
+ Supports using PDB from inside a multiprocessing child process.
62
+
63
+ Usage:
64
+ _DistributedPdb().set_trace()
65
+ """
66
+ def interaction(self, *args, **kwargs):
67
+ _stdin = sys.stdin
68
+ try:
69
+ sys.stdin = open('/dev/stdin')
70
+ pdb.Pdb.interaction(self, *args, **kwargs)
71
+ finally:
72
+ sys.stdin = _stdin
73
+
74
+ def breakpoint(rank: int = 0):
75
+ """
76
+ Set a breakpoint, but only on a single rank. All other ranks will wait for you to be
77
+ done with the breakpoint before continuing.
78
+
79
+ Args:
80
+ rank (int): Which rank to break on. Default: ``0``
81
+ """
82
+ if get_rank() == rank:
83
+ pdb = _DistributedPdb()
84
+ pdb.message(
85
+ "\n!!! ATTENTION !!!\n\n"
86
+ f"Type 'up' to get to the frame that called dist.breakpoint(rank={rank})\n"
87
+ )
88
+ pdb.set_trace()
89
+ barrier()
90
+
91
+ if sys.platform != "win32":
92
+ from torch._C._distributed_c10d import (
93
+ HashStore,
94
+ _round_robin_process_groups,
95
+ )
96
+
97
+ from .distributed_c10d import * # noqa: F403
98
+
99
+ # Variables prefixed with underscore are not auto imported
100
+ # See the comment in `distributed_c10d.py` above `_backend` on why we expose
101
+ # this.
102
+
103
+ from .distributed_c10d import (
104
+ _all_gather_base,
105
+ _reduce_scatter_base,
106
+ _create_process_group_wrapper,
107
+ _rank_not_in_group,
108
+ _coalescing_manager,
109
+ _CoalescingManager,
110
+ _get_process_group_name,
111
+ )
112
+
113
+ from .rendezvous import (
114
+ rendezvous,
115
+ _create_store_from_options,
116
+ register_rendezvous_handler,
117
+ )
118
+
119
+ from .remote_device import _remote_device
120
+
121
+ set_debug_level_from_env()
122
+
123
+ else:
124
+ # This stub is sufficient to get
125
+ # python test/test_public_bindings.py -k test_correct_module_names
126
+ # working even when USE_DISTRIBUTED=0. Feel free to add more
127
+ # stubs as necessary.
128
+ # We cannot define stubs directly because they confuse pyre
129
+
130
+ class _ProcessGroupStub:
131
+ pass
132
+ sys.modules["torch.distributed"].ProcessGroup = _ProcessGroupStub # type: ignore[attr-defined]
venv/lib/python3.10/site-packages/torch/distributed/_composable_state.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import cast, Dict, Optional
2
+
3
+ import torch.nn as nn
4
+
5
+
6
+ class _State:
7
+ pass
8
+
9
+
10
+ _module_state_mapping: Dict[nn.Module, _State] = {}
11
+
12
+
13
+ def _insert_module_state(module: nn.Module, state: _State) -> None:
14
+ global _module_state_mapping
15
+ assert module not in _module_state_mapping, f"Inserting {module} more than once."
16
+ _module_state_mapping[module] = state
17
+
18
+
19
+ def _get_module_state(module: nn.Module) -> Optional[_State]:
20
+ """
21
+ Return the ``_State`` in ``model``.
22
+
23
+ Given a ``module``, this API finds out if the module is also a ``_State``
24
+ instance or if the module is managed by a composable API. If the module
25
+ is also a ``_State``, ``module`` will be casted to ``_State` and returned.
26
+ If it is managed by a composable API, the corresponding ``_State`` will
27
+ be returned.
28
+ """
29
+ global _module_state_mapping
30
+ if isinstance(module, _State):
31
+ return cast(_State, module)
32
+ else:
33
+ # https://github.com/pytorch/pytorch/issues/107054
34
+ if module in _module_state_mapping:
35
+ return _module_state_mapping[module]
36
+ else:
37
+ return None
venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py ADDED
@@ -0,0 +1,1084 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import warnings
3
+ from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.distributed.distributed_c10d as c10d
8
+ from torch._custom_ops import impl_abstract
9
+ from torch.distributed.device_mesh import DeviceMesh
10
+ from torch.fx.experimental.proxy_tensor import get_innermost_proxy_mode
11
+
12
+ from . import _functional_collectives_impl as fun_col_impl
13
+ from ._functional_collectives_impl import ( # noqa: F401
14
+ _register_tensor_wrapper,
15
+ native_funcol_enabled,
16
+ )
17
+
18
+ try:
19
+ from torch.utils._cxx_pytree import tree_map_only
20
+ except ImportError:
21
+ from torch.utils._pytree import tree_map_only # type: ignore[no-redef]
22
+
23
+
24
+ if torch._running_with_deploy():
25
+
26
+ def is_torchdynamo_compiling():
27
+ """Can't import torchdynamo in torchdeploy builds currently."""
28
+ return False
29
+
30
+ else:
31
+ try:
32
+ from torch.compiler import is_dynamo_compiling as is_torchdynamo_compiling
33
+ except Exception:
34
+ warnings.warn(
35
+ "Unable to import torchdynamo util `is_torchdynamo_compiling`, so won't support torchdynamo correctly"
36
+ )
37
+
38
+ def is_torchdynamo_compiling():
39
+ return False
40
+
41
+
42
+ """
43
+ New traceable, functional collectives.
44
+ RFC: https://github.com/pytorch/pytorch/issues/93173
45
+
46
+ compiler: trace these ops with plain-old-data schemas, then choose how to lower them.
47
+ eager: execute these 'functional' ops which in eager return AsyncCollectiveTensor subclasses,
48
+ automatically calling .wait() on underlying/hidden async 'work' obj only when fed to
49
+ a downstream op.
50
+
51
+ Issues:
52
+ * Where should these ops live? Couldn't `import torch` if putting these ops in existing torch.distributed files
53
+ * Proper support for eager requires inplace ops. We should explore having it as an option for the API.
54
+ """
55
+
56
+ """
57
+ Functional collectives are asynchronous only and we perform implicit stream synchronization
58
+ on behalf of the user.
59
+
60
+ We use AsyncCollectiveTensor to wrap the result tensor of a collective and it lets us witness
61
+ first usage of the tensor and insert cross stream sync at the right place.
62
+
63
+ The above are the easy bits, the hard one is how we match the Work object returned by
64
+ c10d and the tensor AsyncCollectiveTensor wraps. We alloc the tensor inside the collective
65
+ op implementation (see ``clone()`` call in ``_all_reduce``) and then it's handled by the
66
+ dispatcher which might call other implementations that are allowed to change the returned
67
+ tensor - even return a tensor with a different shape (see ``torch.vmap``).
68
+
69
+ This means the caller of our ops receives a Tensor that is not guaranteed to be the same
70
+ allocated by our implementations and that makes pairing The AsyncTensor to the original
71
+ tensor a lot harder. This pairing is needed so we can lookup the Work object to use.
72
+
73
+ Originally, we tried WeakKeyDictionary to map from Tensor to Work, but because Tensor's
74
+ identity is not stable across dispatch, the op caller would end up with a different Tensor
75
+ instance that would not match any in the dictionary.
76
+
77
+ With Tensor identity out of the question, we decided use the tensor data pointer, which
78
+ should be stable across all the Tensor changes done during dispatch.
79
+
80
+ We have a dictionary of tensor::data_ptr -> Work that we insert right after we call into c10d.
81
+
82
+ We use this dictionary when AsyncCollectiveTensor is used to invoke Work::wait()
83
+
84
+ Finally, we setup a finalizer against the tensor wrapper to observe it getting collected so we
85
+ can clean up stale entries in the dictionary.
86
+
87
+ To eliminate the possibility of races we have a global version counter that is used by the finalizer.
88
+
89
+ As a wise man said once: Don't cross the streams (https://www.youtube.com/watch?v=wyKQe_i9yyo)
90
+
91
+ """
92
+
93
+ """
94
+ Functional collectives can accept any of these types to describe the ranks participating in collectives.
95
+
96
+ The different types will be desugared to a canonical format
97
+ """
98
+ RANK_TYPES = Union[
99
+ List[int],
100
+ List[List[int]],
101
+ dist.ProcessGroup,
102
+ DeviceMesh,
103
+ Tuple["dist._tensor.DeviceMesh", int],
104
+ str,
105
+ ]
106
+
107
+
108
+ """
109
+ User facing APIs for functional collectives
110
+ -------------------------------------------
111
+
112
+ These apis are called by user code and expected to work both in eager execution and compilation,
113
+ but there are significant differences to how the two modes are implemented underneath.
114
+
115
+ Eager execution is 'optimized' using a tensor subclass that schedules the synchronization (via wait_tensor() op)
116
+ just before the tensor is first used. Compiled tracing currently relies on the compiler to perform this optimization,
117
+ and cannot yet correctly trace the AsyncTensor wrapper class. In the future, these paths may be unified
118
+ if sufficient subclass support is added in dynamo.
119
+
120
+ Example: all_reduce is an entrypoint API, and other collectives follow a similar pattern.
121
+
122
+ Here's how it works under torch.compile/dynamo:
123
+ all_reduce(...)
124
+ |--> _expand_group(...) - desugars processgroup into canonical/traceable format
125
+ |--> c10d_functional.all_reduce(...) - dynamo captures this op call, doesn't trace deeper
126
+ |--> _maybe_wrap_tensor(...) - wait_tensor() op is immediately called, no AsyncTensor subclass needed
127
+
128
+ And under eager execution:
129
+ all_reduce(...)
130
+ |--> _expand_group(...) - same as above, but less critical for eager
131
+ |--> c10d_functional.all_reduce(...) - dispatches to real kernel OR records op in trace
132
+ |--> _maybe_wrap_tensor(...) - AsyncTensor wrapper applied to returned tensor,
133
+ which issues wait_tensor() at the time of first use
134
+ """
135
+
136
+
137
+ def wait_tensor(tensor):
138
+ """
139
+ Wait on a tensor returned by the collectives ops.
140
+
141
+ Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.
142
+ """
143
+ if native_funcol_enabled():
144
+ return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
145
+ else:
146
+ return torch.ops.c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
147
+
148
+
149
+ def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ""):
150
+ """
151
+ Broadcasts the tensor to all processes in the given process group.
152
+
153
+ Args:
154
+ src (int): Source rank
155
+ group (ProcessGroup or List[int]): The process group to work on.
156
+ tag (str, optional): A unique identifier for the collective. Default: empty string
157
+ """
158
+ if native_funcol_enabled():
159
+ group_name = _resolve_group_name(group, tag)
160
+ tensor = torch.ops._c10d_functional.broadcast(self, src, group_name)
161
+ else:
162
+ tag, rankset, group_size = _expand_group(group, tag)
163
+ tensor = torch.ops.c10d_functional.broadcast(
164
+ self, src, tag, rankset, group_size
165
+ )
166
+ return _maybe_wrap_tensor(tensor)
167
+
168
+
169
+ def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str = ""):
170
+ """
171
+ Reduces the tensor data across all machines in such a way that all get
172
+ the final result.
173
+
174
+ The input tensor is left unmodified.
175
+
176
+ Group can be one of:
177
+ List[int]: ranks participating in the collective.
178
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
179
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
180
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
181
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
182
+
183
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
184
+ that information and perform collective algebraic optimization. Use other forms of input for that.
185
+ """
186
+ if native_funcol_enabled():
187
+ group_name = _resolve_group_name(group, tag)
188
+ tensor = torch.ops._c10d_functional.all_reduce(
189
+ self, reduceOp.lower(), group_name
190
+ )
191
+ else:
192
+ tag, rankset, group_size = _expand_group(group, tag)
193
+ tensor = torch.ops.c10d_functional.all_reduce( # type: ignore[attr-defined]
194
+ self,
195
+ reduceOp,
196
+ tag,
197
+ rankset,
198
+ group_size,
199
+ )
200
+ return _maybe_wrap_tensor(tensor)
201
+
202
+
203
+ def all_gather_tensor(
204
+ self: torch.Tensor,
205
+ gather_dim: int,
206
+ group: RANK_TYPES,
207
+ tag: str = "",
208
+ ):
209
+ """
210
+ Gather tensor data across from all machines and concatenate over ``gather_dim``.
211
+
212
+ Note that it currently only supports gather_dim = 0.
213
+
214
+ The input tensor is left unmodified.
215
+ Group can be one of:
216
+ List[int]: ranks participating in the collective.
217
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
218
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
219
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
220
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
221
+
222
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
223
+ that information and perform collective algebraic optimization. Use other forms of input for that.
224
+ """
225
+ assert self.is_contiguous()
226
+ if native_funcol_enabled():
227
+ group_name = _resolve_group_name(group, tag)
228
+ group_size = c10d._get_group_size_by_name(group_name)
229
+ tensor = torch.ops._c10d_functional.all_gather_into_tensor(
230
+ self, group_size, group_name
231
+ )
232
+ else:
233
+ tag, rankset, group_size = _expand_group(group, tag)
234
+ tensor = torch.ops.c10d_functional.all_gather_into_tensor( # type: ignore[attr-defined]
235
+ self,
236
+ tag,
237
+ rankset,
238
+ group_size,
239
+ )
240
+ res = _maybe_wrap_tensor(tensor)
241
+ # TODO this should be done inside AsyncCollectiveTensor to delay the wait() call
242
+ if gather_dim != 0:
243
+ # torch.cat access the data so we already need to wait here, first do wait
244
+ # and then chunk + cat avoid us going through ACT dispatching logic again
245
+ if isinstance(res, AsyncCollectiveTensor):
246
+ res = res.wait() # type: ignore[attr-defined]
247
+ res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim)
248
+ return res
249
+
250
+
251
+ def reduce_scatter_tensor(
252
+ self: torch.Tensor,
253
+ reduceOp: str,
254
+ scatter_dim: int,
255
+ group: RANK_TYPES,
256
+ tag: str = "",
257
+ ):
258
+ """
259
+ Reduces the tensor data across all machines in such a way that all get
260
+ the final result, then scatter the results to corresponding ranks.
261
+
262
+
263
+ The input tensor is left unmodified.
264
+ Group can be one of:
265
+ List[int]: ranks participating in the collective.
266
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
267
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
268
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
269
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
270
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
271
+ that information and perform collective algebraic optimization. Use other forms of input for that.
272
+ """
273
+ if native_funcol_enabled():
274
+ group_name = _resolve_group_name(group, tag)
275
+ group_size = c10d._get_group_size_by_name(group_name)
276
+ else:
277
+ tag, rankset, group_size = _expand_group(group, tag)
278
+
279
+ assert (
280
+ self.size(scatter_dim) % group_size == 0
281
+ ), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}"
282
+ if scatter_dim != 0:
283
+ tensor_list = torch.chunk(self, group_size, dim=scatter_dim)
284
+ self = torch.cat(tensor_list)
285
+
286
+ if native_funcol_enabled():
287
+ tensor = torch.ops._c10d_functional.reduce_scatter_tensor(
288
+ self,
289
+ reduceOp.lower(),
290
+ group_size,
291
+ group_name, # type: ignore[possibly-undefined]
292
+ )
293
+ else:
294
+ tensor = torch.ops.c10d_functional.reduce_scatter_tensor( # type: ignore[attr-defined]
295
+ self,
296
+ reduceOp,
297
+ tag,
298
+ rankset, # type: ignore[possibly-undefined]
299
+ group_size,
300
+ )
301
+ res = _maybe_wrap_tensor(tensor)
302
+ return res
303
+
304
+
305
+ def all_reduce_coalesced(
306
+ self: List[torch.Tensor], reduceOp: str, group: RANK_TYPES, tag: str = ""
307
+ ) -> List[torch.Tensor]:
308
+ """
309
+ Reduces a list of tensors across all machines in such a way that all get
310
+ the final result.
311
+
312
+ The all tensors in the input list are left unmodified.
313
+
314
+ Group can be one of:
315
+ List[int]: ranks participating in the collective.
316
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
317
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
318
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
319
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
320
+
321
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
322
+ that information and perform collective algebraic optimization. Use other forms of input for that.
323
+ """
324
+ if native_funcol_enabled():
325
+ group_name = _resolve_group_name(group, tag)
326
+ tensor_list = torch.ops._c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
327
+ self,
328
+ reduceOp.lower(),
329
+ group_name,
330
+ )
331
+ else:
332
+ tag, rankset, group_size = _expand_group(group, tag)
333
+ tensor_list = torch.ops.c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
334
+ self,
335
+ reduceOp,
336
+ tag,
337
+ rankset,
338
+ group_size,
339
+ )
340
+ return list(map(_maybe_wrap_tensor, tensor_list))
341
+
342
+
343
+ def all_gather_into_tensor_coalesced(
344
+ self: List[torch.Tensor], group: RANK_TYPES, tag: str = ""
345
+ ) -> List[torch.Tensor]:
346
+ """
347
+ Gather a list of tensors across from all machines.
348
+
349
+ Note that it currently only supports gather_dim = 0.
350
+
351
+ The input tensor is left unmodified.
352
+ Group can be one of:
353
+ List[int]: ranks participating in the collective.
354
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
355
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
356
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
357
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
358
+
359
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
360
+ that information and perform collective algebraic optimization. Use other forms of input for that.
361
+ """
362
+ if native_funcol_enabled():
363
+ group_name = _resolve_group_name(group, tag)
364
+ group_size = c10d._get_group_size_by_name(group_name)
365
+ tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
366
+ self,
367
+ group_size,
368
+ group_name,
369
+ )
370
+ else:
371
+ tag, rankset, group_size = _expand_group(group, tag)
372
+ tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
373
+ self,
374
+ tag,
375
+ rankset,
376
+ group_size,
377
+ )
378
+ return list(map(_maybe_wrap_tensor, tensor_list))
379
+
380
+
381
+ def reduce_scatter_tensor_coalesced(
382
+ inputs: List[torch.Tensor],
383
+ reduceOp: str,
384
+ scatter_dim: List[int],
385
+ group: RANK_TYPES,
386
+ tag: str = "",
387
+ ) -> List[torch.Tensor]:
388
+ """
389
+ Reduces a list of tensors across all machines in such a way that all get
390
+ the final result, then scatter the results to corresponding ranks.
391
+
392
+ The input tensors are left unmodified.
393
+ Group can be one of:
394
+ List[int]: ranks participating in the collective.
395
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
396
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
397
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
398
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
399
+
400
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
401
+ that information and perform collective algebraic optimization. Use other forms of input for that.
402
+ """
403
+ if native_funcol_enabled():
404
+ group_name = _resolve_group_name(group, tag)
405
+ group_size = c10d._get_group_size_by_name(group_name)
406
+ else:
407
+ tag, rankset, group_size = _expand_group(group, tag)
408
+
409
+ assert len(scatter_dim) == len(inputs)
410
+ for idx, (dim, tensor) in enumerate(zip(scatter_dim, inputs)):
411
+ assert (
412
+ tensor.size(dim) % group_size == 0
413
+ ), f"input dimension {dim} ({tensor.size(dim)} must be a multiple of group_size {group_size} for tensor at index {idx}"
414
+ if dim != 0:
415
+ tensor_list = torch.chunk(tensor, group_size, dim=dim)
416
+ inputs[idx] = torch.cat(tensor_list)
417
+
418
+ if native_funcol_enabled():
419
+ tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
420
+ inputs,
421
+ reduceOp.lower(),
422
+ group_size,
423
+ group_name, # type: ignore[possibly-undefined]
424
+ )
425
+ else:
426
+ tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
427
+ inputs,
428
+ reduceOp,
429
+ tag,
430
+ rankset, # type: ignore[possibly-undefined]
431
+ group_size,
432
+ )
433
+
434
+ return list(map(_maybe_wrap_tensor, tensor_list))
435
+
436
+
437
+ # This is a bit unsafe: it checks if the first argument in the schema reports as a non-mutable alias.
438
+ # Today, this maps 1:1 with "aten ops that are views".
439
+ def _is_view_op(tgt):
440
+ assert isinstance(tgt, torch._ops.OpOverload)
441
+ schema = tgt._schema
442
+ if len(schema.arguments) > 0:
443
+ first_arg = schema.arguments[0]
444
+ # check if op is a view
445
+ return first_arg.alias_info is not None and not first_arg.alias_info.is_write
446
+
447
+
448
+ def all_to_all_single(
449
+ self: torch.Tensor,
450
+ output_split_sizes: Optional[List[int]],
451
+ input_split_sizes: Optional[List[int]],
452
+ group: RANK_TYPES,
453
+ tag: str = "",
454
+ ) -> torch.Tensor:
455
+ """
456
+ Each process splits input tensor and then scatters the split list
457
+ to all processes in a group. Then concatenate the received tensors from all
458
+ the processes in the group and return single output tensor.
459
+
460
+ Group can be one of:
461
+ List[int]: ranks participating in the collective.
462
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
463
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
464
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
465
+ (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
466
+
467
+ :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
468
+ that information and perform collective algebraic optimization. Use other forms of input for that.
469
+ """
470
+ if output_split_sizes is not None:
471
+ assert all(
472
+ isinstance(size, (int, torch.SymInt)) for size in output_split_sizes
473
+ ), output_split_sizes
474
+ if input_split_sizes is not None:
475
+ assert all(
476
+ isinstance(size, (int, torch.SymInt)) for size in input_split_sizes
477
+ ), input_split_sizes
478
+ if native_funcol_enabled():
479
+ group_name = _resolve_group_name(group, tag)
480
+ group_size = c10d._get_group_size_by_name(group_name)
481
+ if output_split_sizes is None or input_split_sizes is None:
482
+ assert output_split_sizes is None and input_split_sizes is None, (
483
+ "output_split_sizes and input_split_sizes must either be "
484
+ "specified together or both set to None"
485
+ )
486
+ output_split_sizes = [self.shape[0] // group_size] * group_size
487
+ input_split_sizes = output_split_sizes
488
+ tensor = torch.ops._c10d_functional.all_to_all_single( # type: ignore[attr-defined]
489
+ self,
490
+ output_split_sizes,
491
+ input_split_sizes,
492
+ group_name,
493
+ )
494
+ else:
495
+ tag, rankset, group_size = _expand_group(group, tag)
496
+ tensor = torch.ops.c10d_functional.all_to_all_single( # type: ignore[attr-defined]
497
+ self,
498
+ output_split_sizes,
499
+ input_split_sizes,
500
+ tag,
501
+ rankset,
502
+ group_size,
503
+ )
504
+ return _maybe_wrap_tensor(tensor)
505
+
506
+
507
+ def permute_tensor(
508
+ self: torch.Tensor,
509
+ src_dst: List[int],
510
+ group: RANK_TYPES,
511
+ tag: str = "",
512
+ ) -> torch.Tensor:
513
+ """
514
+ Permutes the elements of the tensor according to the given source/destination pairs. `src_dst` should
515
+ be defined such that src_dst[m] == n means m sends to n.
516
+
517
+ Group can be one of:
518
+ List[int]: ranks participating in the collective.
519
+ List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
520
+ ProcessGroup: Will perform a collective using the ranks and tag of the PG.
521
+ DeviceMesh: Do a SPMD collective over all ranks of the mesh
522
+ (DeviceMesh, int): Do a MPMD collective over one
523
+ """
524
+ t, rankset, group_size = _expand_group(group, tag)
525
+ local_pg = c10d._find_or_create_pg_by_ranks_and_tag(t, rankset, group_size)
526
+
527
+ output_split_sizes = [0] * group_size
528
+ input_split_sizes = [0] * group_size
529
+ for src, dst in enumerate(src_dst):
530
+ if src == dist.get_rank(local_pg):
531
+ input_split_sizes[dst] = self.numel()
532
+ if dst == dist.get_rank(local_pg):
533
+ output_split_sizes[src] = self.numel()
534
+
535
+ return all_to_all_single(self, output_split_sizes, input_split_sizes, group, tag)
536
+
537
+
538
+ class AsyncCollectiveTensor(torch.Tensor):
539
+ r"""
540
+ A Tensor wrapper subclass that is used to trigger a call to wait
541
+ prior to first use of the underlying tensor.
542
+ Use it inside functional collective pytorch wrappers like the following:
543
+ def functional_collective(self, group, tag):
544
+ tag, rankset, group_size = _expand_group(group, tag)
545
+ tensor = torch.ops.c10d_functional.{collective}(self, tag, rankset, group_size)
546
+ return _maybe_wrap_tensor(tensor)
547
+ """
548
+ elem: torch.Tensor
549
+ completed: bool
550
+
551
+ __slots__ = ["elem", "completed"]
552
+
553
+ @staticmethod
554
+ def __new__(cls, elem: torch.Tensor):
555
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
556
+ cls,
557
+ elem.size(),
558
+ strides=elem.stride(),
559
+ storage_offset=elem.storage_offset(),
560
+ dtype=elem.dtype,
561
+ layout=elem.layout,
562
+ device=elem.device,
563
+ requires_grad=False,
564
+ )
565
+ r.elem = elem
566
+ r.completed = False
567
+ return r
568
+
569
+ def __tensor_flatten__(self):
570
+ return ["elem"], None
571
+
572
+ def tolist(self):
573
+ self.trigger_wait()
574
+ return self.elem.tolist()
575
+
576
+ @staticmethod
577
+ def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
578
+ assert meta is None
579
+ elem = inner_tensors["elem"]
580
+ return AsyncCollectiveTensor(elem)
581
+
582
+ def __repr__(self):
583
+ self.trigger_wait()
584
+ return f"AsyncCollectiveTensor({self.elem})"
585
+
586
+ def trigger_wait(self):
587
+ if not self.completed:
588
+ wait_tensor(self.elem)
589
+ self.completed = True
590
+ return self.elem
591
+
592
+ def wait(self) -> torch.Tensor:
593
+ wait_tensor(self.elem)
594
+ return self.elem
595
+
596
+ def _get_acs_underlying_tensor(self):
597
+ """This method enables _functional_collectives_impl to test if a tensor is an ACS"""
598
+ return self.elem
599
+
600
+ @classmethod
601
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
602
+ if func == torch.ops.aten.view.default:
603
+ # Fast handle aten.view as a lot of view related op goes to aten.view
604
+ # eventually, this avoids pytree slowdown
605
+ res = func(args[0].elem, args[1])
606
+ wrapper_res = AsyncCollectiveTensor(res)
607
+ _register_tensor_wrapper(wrapper_res)
608
+ return wrapper_res
609
+
610
+ is_view_op = _is_view_op(func)
611
+
612
+ def unwrap(e: AsyncCollectiveTensor):
613
+ # wait_tensor is idepotent and will do stream sync only once
614
+ if not is_view_op:
615
+ e.trigger_wait()
616
+ return e.elem
617
+
618
+ def wrap(e: torch.Tensor):
619
+ # wait_tensor is idepotent and will do stream sync only once
620
+ assert not isinstance(e, AsyncCollectiveTensor)
621
+ res = AsyncCollectiveTensor(e)
622
+ _register_tensor_wrapper(res)
623
+ return res
624
+
625
+ unwrapped_args = tree_map_only(AsyncCollectiveTensor, unwrap, args)
626
+ unwrapped_kwargs = tree_map_only(AsyncCollectiveTensor, unwrap, kwargs)
627
+
628
+ # we don't wrap the result as it doesn't need to be waited on.
629
+ out = func(*unwrapped_args, **unwrapped_kwargs)
630
+
631
+ # View ops dont require a sync, so we should re-wrap the outputs.
632
+ if is_view_op:
633
+ out = tree_map_only(torch.Tensor, wrap, out)
634
+
635
+ return out
636
+
637
+ def numpy(self):
638
+ return self.wait().numpy()
639
+
640
+
641
+ """
642
+ Utils and infrastructure for tracing support
643
+ """
644
+
645
+
646
+ def _expand_group(group: RANK_TYPES, tag: str = "") -> Tuple[str, List[int], int]:
647
+ """
648
+ _expand_group desugars the different RANK_TYPES types into a canonical format that is traceable.
649
+
650
+ By having this be part of the explicit eager codepath, we avoid having to specialize behavior inside
651
+ torchdynamo and can still interoperate with processgroup objects or other untraceable forms.
652
+ """
653
+ # had to define this hack _inside_ expand_group to avoid
654
+ # graph_break [('torch.* op returned non-Tensor int
655
+ # caused by 'cast_*` functions being treated as 'torch.*' ops (iiuc)
656
+ if TYPE_CHECKING:
657
+
658
+ def cast_listlistint(x):
659
+ return cast(List[List[int]], x)
660
+
661
+ def cast_listint(x):
662
+ return cast(List[int], x)
663
+
664
+ else:
665
+ # fake cast op for use at runtime since dynamo doesn't support real cast
666
+ # also, dynamo didn't like encountering 'typing' objects ()
667
+ # NotImplementedError: argument of type: <class 'typing._GenericAlias'>
668
+ def cast_listlistint(x):
669
+ return x
670
+
671
+ def cast_listint(x):
672
+ return x
673
+
674
+ rankset: List[int]
675
+ if isinstance(group, list):
676
+ if isinstance(group[0], list):
677
+ nested_list = cast_listlistint(group)
678
+ rankset = []
679
+ group_size = -1
680
+ for rs in nested_list:
681
+ rankset.extend(rs)
682
+ if group_size != -1 and group_size != len(rs):
683
+ raise ValueError(
684
+ f"group sizes must be identical found {group_size} and {len(rs)}"
685
+ )
686
+ group_size = len(rs)
687
+ else:
688
+ rankset = cast_listint(group)
689
+ group_size = len(rankset)
690
+ elif isinstance(group, dist.ProcessGroup):
691
+ rankset = dist.get_process_group_ranks(group)
692
+ group_size = len(rankset)
693
+ tag = tag or c10d._get_group_tag(group)
694
+ elif isinstance(group, DeviceMesh):
695
+ assert (
696
+ group.ndim == 1
697
+ ), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
698
+ # TODO: it should run collective in the whole mesh instead of dim 0
699
+ tag, rankset, _ = group._dim_group_infos[0]
700
+ group_size = len(rankset)
701
+ elif isinstance(group, tuple):
702
+ if (
703
+ len(group) == 2
704
+ and isinstance(group[0], DeviceMesh)
705
+ and isinstance(group[1], int)
706
+ ):
707
+ dmesh = group[0]
708
+ dim = group[1]
709
+ tag, rankset, _ = dmesh._dim_group_infos[dim]
710
+ group_size = len(rankset)
711
+ else:
712
+ raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
713
+ else:
714
+ raise ValueError(
715
+ "Invalid type for group, must be one of List, Processgroup, DeviceMesh or (DeviceMesh, int)."
716
+ )
717
+
718
+ return (tag, rankset, group_size)
719
+
720
+
721
+ def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str:
722
+ """
723
+ Given group in RANK_TYPES, return the group name.
724
+ """
725
+ # `tag` will be deprecated. See details in:
726
+ # https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208
727
+ if isinstance(group, dist.ProcessGroup):
728
+ return group.group_name
729
+ elif isinstance(group, str):
730
+ return group
731
+ elif isinstance(group, DeviceMesh):
732
+ assert (
733
+ group.ndim == 1
734
+ ), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
735
+ return group._dim_group_infos[0][2]
736
+ elif isinstance(group, tuple):
737
+ if (
738
+ len(group) == 2
739
+ and isinstance(group[0], DeviceMesh)
740
+ and isinstance(group[1], int)
741
+ ):
742
+ dmesh = group[0]
743
+ dim = group[1]
744
+ return dmesh._dim_group_infos[dim][2]
745
+ else:
746
+ raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
747
+ elif isinstance(group, list):
748
+ if not is_torchdynamo_compiling():
749
+ warnings.warn(
750
+ "The combination of ranks + tag as process group "
751
+ "identifier has been deprecated. Please switch to "
752
+ "using ProcessGroup, DeviceMesh, or group name instead."
753
+ )
754
+ return c10d._resolve_group_name_by_ranks_and_tag(cast(List[int], group), tag)
755
+ else:
756
+ raise ValueError(f"Unsupported group type: {type(group)}, {group}")
757
+
758
+
759
+ def _are_we_tracing() -> bool:
760
+ if is_torchdynamo_compiling():
761
+ return True
762
+ # If functionalization is turned on, we are almost definitely compiling/tracing.
763
+ # (In particular, AOTAutograd traces a model once with functionalization on
764
+ # but proxy tracing turned of, so this is how we detect it).
765
+ if (
766
+ torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
767
+ is not None
768
+ ):
769
+ return True
770
+ mode = get_innermost_proxy_mode()
771
+ if mode is None:
772
+ return False
773
+ return mode.tracer is not None
774
+
775
+
776
+ def _maybe_wrap_tensor(self) -> torch.Tensor:
777
+ if _are_we_tracing():
778
+ return wait_tensor(self)
779
+ res = AsyncCollectiveTensor(self)
780
+ _register_tensor_wrapper(res)
781
+ return cast(torch.Tensor, res)
782
+
783
+
784
+ def _all_gather_into_tensor_coalesced_meta(self, tag, rankset, group_size):
785
+ def mk_out_tensor(shard):
786
+ out_size = list(shard.size())
787
+ out_size[0] *= group_size
788
+ out_tensor = shard.new_empty(out_size)
789
+ return out_tensor
790
+
791
+ return [mk_out_tensor(t) for t in self]
792
+
793
+
794
+ # We now register meta kernels to deal with tracing
795
+ def _broadcast_meta(self, *args):
796
+ return torch.empty_like(self)
797
+
798
+
799
+ def _all_reduce_meta(self, *args):
800
+ return torch.empty_like(self)
801
+
802
+
803
+ def _wait_tensor_meta(self, *args):
804
+ return torch.empty_like(self)
805
+
806
+
807
+ def _all_gather_into_tensor_meta(shard, tag, rankset, group_size):
808
+ out_size = list(shard.size())
809
+ out_size[0] *= group_size
810
+ return shard.new_empty(out_size)
811
+
812
+
813
+ def _reduce_scatter_tensor_meta(input, reduce_op, tag, rankset, group_size):
814
+ out_size = list(input.size())
815
+ out_size[0] //= group_size
816
+ return input.new_empty(out_size)
817
+
818
+
819
+ def _all_reduce_coalesced_meta(self, *args):
820
+ return [torch.empty_like(t) for t in self]
821
+
822
+
823
+ def _all_reduce__meta(inp, *args):
824
+ return inp
825
+
826
+
827
+ def _broadcast__meta(inp, *args):
828
+ return inp
829
+
830
+
831
+ def _all_reduce_coalesced__meta(inputs, *args):
832
+ return inputs
833
+
834
+
835
+ def _reduce_scatter_tensor_coalesced_meta(inputs, reduceOp, tag, rankset, group_size):
836
+ def mk_out_tensor(input):
837
+ out_size = list(input.size())
838
+ out_size[0] //= group_size
839
+ out_tensor = input.new_empty(out_size)
840
+ return out_tensor
841
+
842
+ return [mk_out_tensor(t) for t in inputs]
843
+
844
+
845
+ # NB: We often say all_to_all has dynamic output size, but this is not
846
+ # technically true: instead, what typically happens is you manually
847
+ # communicate the output_split_sizes ahead of time (which is dynamic),
848
+ # but then you pass those sizes explicitly, and the all to all itself
849
+ # isn't dynamic, it just follows the specified output splits
850
+ def _all_to_all_single_meta(
851
+ input, output_split_sizes, input_split_sizes, *args, **kwargs
852
+ ):
853
+ if output_split_sizes is None:
854
+ return input.new_empty(input.size())
855
+ else:
856
+ for s in output_split_sizes:
857
+ torch._check_is_size(s)
858
+ out_size = list(input.size())
859
+ out_size[0] = sum(output_split_sizes)
860
+ return input.new_empty(out_size)
861
+
862
+
863
+ def _all_gather_into_tensor_native_meta(input, group_size, group_name):
864
+ shape = list(input.size())
865
+ shape[0] *= group_size
866
+ return input.new_empty(shape)
867
+
868
+
869
+ def _all_gather_into_tensor_coalesced_native_meta(inputs, group_size, group_name):
870
+ return [
871
+ _all_gather_into_tensor_native_meta(input, group_size, group_name)
872
+ for input in inputs
873
+ ]
874
+
875
+
876
+ def _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name):
877
+ shape = list(inp.size())
878
+ shape[0] //= group_size
879
+ return inp.new_empty(shape)
880
+
881
+
882
+ def _reduce_scatter_tensor_coalesced_native_meta(
883
+ inputs, reduce_op, group_size, group_name
884
+ ):
885
+ return [
886
+ _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name)
887
+ for inp in inputs
888
+ ]
889
+
890
+
891
+ def _register_ops():
892
+ ops_defs = [
893
+ "broadcast(Tensor self, int src, str tag, int[] ranks, int group_size) -> Tensor",
894
+ "all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
895
+ "all_reduce_coalesced(Tensor[] self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
896
+ "wait_tensor(Tensor self) -> Tensor",
897
+ "all_gather_into_tensor(Tensor shard, str tag, int[] ranks, int group_size) -> Tensor",
898
+ "all_gather_into_tensor_coalesced(Tensor[] input, str tag, int[] ranks, int group_size) -> Tensor[]",
899
+ "reduce_scatter_tensor(Tensor input, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
900
+ "reduce_scatter_tensor_coalesced(Tensor[] inputs, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
901
+ "all_to_all_single(Tensor input, SymInt[]? output_split_sizes, SymInt[]? input_split_sizes, str tag, int[] ranks, int group_size) -> Tensor", # noqa: B950
902
+ ]
903
+
904
+ my_module = sys.modules[__name__]
905
+ for op_def in ops_defs:
906
+ op_name = op_def[0 : op_def.index("(")]
907
+ backend_impl = getattr(fun_col_impl, f"_{op_name}")
908
+ meta_impl = getattr(my_module, f"_{op_name}_meta")
909
+ c10_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag)
910
+ c10_lib_impl.impl(op_name, backend_impl, "CompositeExplicitAutograd")
911
+ impl_abstract(f"c10d_functional::{op_name}")(meta_impl)
912
+
913
+
914
+ if not torch._running_with_deploy():
915
+ # Library MUST be defined at module scope or it doesn't work
916
+ # Creating a "DEF" Library always crashes torch::deploy so we create our Library instances here
917
+ # guarded against running inside it
918
+ c10_lib = torch.library.Library("c10d_functional", "DEF")
919
+ c10_lib_impl = torch.library.Library("c10d_functional", "IMPL")
920
+ _register_ops()
921
+
922
+ _c10_lib_impl = torch.library.Library("_c10d_functional", "IMPL")
923
+ _c10_lib_impl.impl("all_reduce", _all_reduce_meta, "Meta")
924
+ _c10_lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta")
925
+ _c10_lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta")
926
+ _c10_lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta")
927
+ _c10_lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta")
928
+ _c10_lib_impl.impl(
929
+ "all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta"
930
+ )
931
+ _c10_lib_impl.impl(
932
+ "all_gather_into_tensor_coalesced",
933
+ _all_gather_into_tensor_coalesced_native_meta,
934
+ "Meta",
935
+ )
936
+ _c10_lib_impl.impl(
937
+ "reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta"
938
+ )
939
+ _c10_lib_impl.impl(
940
+ "reduce_scatter_tensor_coalesced",
941
+ _reduce_scatter_tensor_coalesced_native_meta,
942
+ "Meta",
943
+ )
944
+ _c10_lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta")
945
+ _c10_lib_impl.impl("broadcast", _broadcast_meta, "Meta")
946
+ _c10_lib_impl.impl("broadcast_", _broadcast__meta, "Meta")
947
+ else:
948
+ warnings.warn(
949
+ "PyTorch Distributed functional collectives do not work with torch::deploy."
950
+ )
951
+
952
+
953
+ """
954
+ Dynamo Remappings allow seamless translation from non-functional collectives of supportable form into
955
+ functional collective calls followed by inplace copy ops, allowing them to be traced into a functional graph.
956
+
957
+ We implement this by writing a decomposition and teaching dynamo how to associate it to a corresponding op via
958
+ the mapping dict below.
959
+
960
+ These schemas intentionally match torch.distributed.distributed_c10d.* ops that we are trying to remap from
961
+ """
962
+
963
+
964
+ def all_gather_tensor_inplace(
965
+ output_tensor: torch.Tensor,
966
+ input_tensor: torch.Tensor,
967
+ group, # TODO add a type,
968
+ async_op: bool = False,
969
+ tag: str = "",
970
+ gather_dim: int = 0,
971
+ ):
972
+ assert (
973
+ not async_op
974
+ ), "Can't remap async version of inplace op to functional collective"
975
+ return output_tensor.copy_(all_gather_tensor(input_tensor, gather_dim, group, tag))
976
+
977
+
978
+ def reduce_scatter_tensor_inplace(
979
+ output: torch.Tensor,
980
+ input: torch.Tensor,
981
+ op: str = "sum", # TODO type is actually c10d ReduceOp. is this ok?
982
+ group=None, # TODO add a type
983
+ async_op: bool = False,
984
+ scatter_dim: int = 0,
985
+ tag: str = "",
986
+ ):
987
+ assert (
988
+ not async_op
989
+ ), "Can't remap async version of inplace op to functional collective"
990
+ return output.copy_(reduce_scatter_tensor(input, op, scatter_dim, group, tag))
991
+
992
+
993
+ REDUCE_OP_TO_STR = {
994
+ dist.ReduceOp.SUM: "sum",
995
+ dist.ReduceOp.AVG: "avg",
996
+ dist.ReduceOp.PRODUCT: "product",
997
+ dist.ReduceOp.MIN: "min",
998
+ dist.ReduceOp.MAX: "max",
999
+ dist.ReduceOp.BAND: "band",
1000
+ dist.ReduceOp.BOR: "bor",
1001
+ dist.ReduceOp.BXOR: "bxor",
1002
+ }
1003
+
1004
+
1005
+ def all_reduce_inplace(
1006
+ tensor: torch.Tensor,
1007
+ op: str = "sum",
1008
+ group=None,
1009
+ async_op: bool = False,
1010
+ tag: str = "",
1011
+ ):
1012
+ assert (
1013
+ not async_op
1014
+ ), "Can't remap async version of inplace op to functional collective"
1015
+
1016
+ return tensor.copy_(all_reduce(tensor, op, group, tag))
1017
+
1018
+
1019
+ def all_to_all_inplace(
1020
+ output: torch.Tensor,
1021
+ input: torch.Tensor,
1022
+ output_split_sizes=None,
1023
+ input_split_sizes=None,
1024
+ group=None,
1025
+ async_op=False,
1026
+ tag: str = "",
1027
+ ):
1028
+ assert (
1029
+ not async_op
1030
+ ), "Can't remap async version of inplace op to functional collective"
1031
+ return output.copy_(
1032
+ all_to_all_single(input, output_split_sizes, input_split_sizes, group, tag)
1033
+ )
1034
+
1035
+
1036
+ def all_gather_inplace(
1037
+ tensor_list: List[torch.Tensor],
1038
+ tensor: torch.Tensor,
1039
+ group=None,
1040
+ async_op=False,
1041
+ tag: str = "",
1042
+ ):
1043
+ assert (
1044
+ not async_op
1045
+ ), "Can't remap async version of inplace op to functional collective"
1046
+ assert all(
1047
+ t.size(0) == tensor.size(0) for t in tensor_list
1048
+ ), "Remapping variable size all_gather is not yet supported"
1049
+
1050
+ output = all_gather_tensor(tensor, 0, group, tag)
1051
+
1052
+ # Use aten.slice instead of aten.split because the latter causes
1053
+ # tensor.shape(0) to be unnecessarily baked in when it's a SymInt.
1054
+ output_splits = []
1055
+ offset = 0
1056
+ for t in tensor_list:
1057
+ output_splits.append(output[offset : offset + t.size(0)])
1058
+ offset += t.size(0)
1059
+ for dst, src in zip(tensor_list, output_splits):
1060
+ dst.copy_(src)
1061
+ return tensor_list
1062
+
1063
+
1064
+ from torch.distributed.distributed_c10d import (
1065
+ _all_gather_base as legacy_all_gather_base,
1066
+ _reduce_scatter_base as legacy_reduce_scatter_base,
1067
+ all_gather as legacy_all_gather,
1068
+ all_gather_into_tensor as legacy_allgather,
1069
+ all_reduce as legacy_allreduce,
1070
+ all_to_all_single as legacy_all_to_all_single,
1071
+ reduce_scatter_tensor as legacy_reducescatter,
1072
+ )
1073
+
1074
+ # This dict should contain sets of functions that dynamo is allowed to remap.
1075
+ # Functions in this set should accept the same args/kwargs 1:1 as their mapping.
1076
+ traceable_collective_remaps = {
1077
+ legacy_allgather: all_gather_tensor_inplace,
1078
+ legacy_reducescatter: reduce_scatter_tensor_inplace,
1079
+ legacy_allreduce: all_reduce_inplace,
1080
+ legacy_all_to_all_single: all_to_all_inplace,
1081
+ legacy_all_gather: all_gather_inplace,
1082
+ legacy_reduce_scatter_base: reduce_scatter_tensor_inplace,
1083
+ legacy_all_gather_base: all_gather_tensor_inplace,
1084
+ }
venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import warnings
4
+ import weakref
5
+ from typing import cast, Dict, List, Optional
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ import torch.distributed.distributed_c10d as c10d
10
+
11
+ """
12
+ Moved eager kernel implementations to a separate file partly for readability and partly as it is currently
13
+ easier in dynamo to set tracing policy on a file-by-file level.
14
+
15
+ Do not put code in this file that Dynamo is expected to trace into, as dynamo may disallow this whole file.
16
+
17
+ DEBUG/TESTING HELPERS:
18
+
19
+ This module includes some helpers that are quite useful when debugging or testing functional collectives:
20
+
21
+ _tensor_needs_wait
22
+ _outstanding_wait_count
23
+ _wait_all
24
+
25
+ """
26
+
27
+ _use_native_funcol: Optional[bool] = None
28
+
29
+
30
+ if torch._running_with_deploy():
31
+
32
+ def native_funcol_enabled():
33
+ return False
34
+
35
+ else:
36
+ from torch._dynamo import assume_constant_result
37
+
38
+ @assume_constant_result
39
+ def native_funcol_enabled():
40
+ global _use_native_funcol
41
+ if _use_native_funcol is None:
42
+ try:
43
+ # Disable native funcol when torch_xla is installed. This check
44
+ # will be removed once torch_xla adopts the native_funcol IR.
45
+ import torch_xla # noqa: F401
46
+
47
+ _use_native_funcol = False
48
+ except Exception:
49
+ # When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol
50
+ _use_native_funcol = (
51
+ os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1"
52
+ )
53
+
54
+ return _use_native_funcol
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+ data_ptr_to_work: Dict[int, "_WaitRegistration"] = dict()
60
+ work_version = 0
61
+
62
+
63
+ class _WaitRegistration:
64
+ def __init__(self, work):
65
+ global work_version
66
+ self.work = work
67
+ self.version = work_version
68
+ self.ptrs = set()
69
+ self.ptr_alias_count = {}
70
+ self.cleanup_count = 0
71
+ work_version += 1
72
+
73
+ def _register_tensor_ptr(self, data_ptr):
74
+ global data_ptr_to_work
75
+ data_ptr_to_work[data_ptr] = self
76
+ self.ptrs.add(data_ptr)
77
+
78
+ def _record_wrapper(self, ptr):
79
+ self._register_tensor_ptr(ptr)
80
+ self.ptr_alias_count.setdefault(ptr, 0)
81
+ self.ptr_alias_count[ptr] += 1
82
+ self.cleanup_count += 1
83
+
84
+ def wait(self):
85
+ if self.work is not None:
86
+ self.work.wait()
87
+ self.work = None
88
+ self.cleanup()
89
+
90
+ def decrement_live_tensor(self, ptr):
91
+ self.cleanup_count -= 1
92
+ if self.cleanup_count == 0:
93
+ self.wait()
94
+ else:
95
+ self.ptr_alias_count[ptr] -= 1
96
+ if (
97
+ self.ptr_alias_count[ptr] < 1
98
+ and data_ptr_to_work.get(ptr, None) == self
99
+ ):
100
+ del data_ptr_to_work[ptr]
101
+
102
+ def cleanup(self):
103
+ for ptr in self.ptrs:
104
+ if data_ptr_to_work.get(ptr, None) == self:
105
+ del data_ptr_to_work[ptr]
106
+
107
+
108
+ def _register_tensor_work(tensor_or_list, work_or_list):
109
+ if not isinstance(tensor_or_list, list):
110
+ tensor_or_list = [tensor_or_list]
111
+ if not isinstance(work_or_list, list):
112
+ reg = _WaitRegistration(work_or_list)
113
+ for tensor in tensor_or_list:
114
+ reg._register_tensor_ptr(tensor.data_ptr())
115
+ else:
116
+ for tensor, work in zip(tensor_or_list, work_or_list):
117
+ reg = _WaitRegistration(work)
118
+ reg._register_tensor_ptr(tensor.data_ptr())
119
+
120
+
121
+ def _wait_reg_dec(ptr, wait_reg):
122
+ wait_reg.decrement_live_tensor(ptr)
123
+
124
+
125
+ def _register_tensor_wrapper(tensor) -> None:
126
+ if native_funcol_enabled():
127
+ # Tensor storage -> work mapping is maintained in C++
128
+ return
129
+ global data_ptr_to_work
130
+ data_ptr = tensor.elem.data_ptr()
131
+ # Note: we should NEVER try to trace this, bc it registers runtime stuff during trace.
132
+ # Instead, backends must call this themselves when implementing traced collectives.
133
+ wait_reg = data_ptr_to_work.get(data_ptr, None)
134
+ if wait_reg is None:
135
+ warnings.warn(
136
+ "Trying to register finalizer to AsyncCollectiveTensor but the inner tensor is already gone"
137
+ )
138
+ else:
139
+ # We force the collective to be waited in the case this tensor goes away to reduce the change of deadlocks.
140
+ # NOTE: we register the callback to the ACT wrapper class, for the following reasons:
141
+ # 1. The inner tensor is referenced by the associated Work object, so it's uncollective until we release the
142
+ # associated work object
143
+ # 2. There's a n-to-1 relationship between wrappers and inner tensor due to non-waitable ops like view()
144
+ wait_reg._record_wrapper(data_ptr)
145
+ weakref.finalize(tensor, _wait_reg_dec, data_ptr, wait_reg)
146
+
147
+
148
+ def _wait_tensor(tensor: torch.Tensor) -> torch.Tensor:
149
+ global data_ptr_to_work
150
+ data_ptr = tensor.data_ptr()
151
+ wait_reg = data_ptr_to_work.get(data_ptr)
152
+ if wait_reg is not None:
153
+ wait_reg.wait()
154
+ return tensor
155
+
156
+
157
+ def _tensor_needs_wait(tensor: torch.Tensor) -> bool:
158
+ """Returns true if ```tensor``` needs to be waited. Works with ACS and inner tensors."""
159
+ if hasattr(tensor, "_get_acs_underlying_tensor"):
160
+ tensor = tensor._get_acs_underlying_tensor()
161
+ data_ptr = tensor.data_ptr()
162
+ wait_reg = data_ptr_to_work.get(data_ptr)
163
+ return wait_reg is not None and wait_reg.work is not None
164
+
165
+
166
+ def _outstanding_wait_count() -> int:
167
+ """Returns the number of outstanding work objects waiting to be waited (sic)."""
168
+ return len(data_ptr_to_work)
169
+
170
+
171
+ def _wait_all() -> None:
172
+ """Wait for all outstanding collectives."""
173
+ for work_reg in list(data_ptr_to_work.values()):
174
+ work_reg.wait()
175
+
176
+
177
+ def _str_to_reduce_op(reduceOp: str) -> dist.ReduceOp:
178
+ reduceOp = reduceOp.upper()
179
+ op = dist.ReduceOp.RedOpType.__members__.get(reduceOp)
180
+ if op is None:
181
+ raise ValueError(f"Invalid reduce operation {reduceOp}")
182
+ return cast(dist.ReduceOp, op)
183
+
184
+
185
+ """
186
+ Kernel implementations (for eager runtime only) - should never be traced by torch.compile
187
+
188
+ These functions should all be bound to dispatcher ops. During tracing, the op itself should be
189
+ captured in the graph and the backend should implement the op however it prefers.
190
+ """
191
+
192
+
193
+ def _broadcast(self, src, tag, ranks, group_size):
194
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
195
+ assert group is not None
196
+
197
+ inplace_tensor = self.clone(memory_format=torch.contiguous_format)
198
+ work = dist.broadcast(inplace_tensor, src, group=group, async_op=True)
199
+ _register_tensor_work(inplace_tensor, work)
200
+
201
+ return inplace_tensor
202
+
203
+
204
+ # TODO assert if ranks has duplicated entries
205
+ def _all_reduce(self, reduceOp, tag, ranks, group_size):
206
+ op = _str_to_reduce_op(reduceOp)
207
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
208
+ assert group is not None
209
+
210
+ inplace_tensor = self.clone(memory_format=torch.contiguous_format)
211
+ work = dist.all_reduce(inplace_tensor, op=op, group=group, async_op=True)
212
+ _register_tensor_work(inplace_tensor, work)
213
+
214
+ return inplace_tensor
215
+
216
+
217
+ def _all_reduce_coalesced(self, reduceOp, tag, ranks, group_size):
218
+ op = _str_to_reduce_op(reduceOp)
219
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
220
+ assert group is not None
221
+
222
+ inplace_tensor_list = [t.clone(memory_format=torch.contiguous_format) for t in self]
223
+ work = dist.all_reduce_coalesced(
224
+ inplace_tensor_list, op=op, group=group, async_op=True
225
+ )
226
+ _register_tensor_work(inplace_tensor_list, work)
227
+
228
+ return inplace_tensor_list
229
+
230
+
231
+ def _all_gather_into_tensor(shard, tag, ranks, group_size):
232
+ # TODO add dim support?
233
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
234
+ assert group is not None
235
+ out_size = list(shard.size())
236
+ out_size[0] *= group_size
237
+ out_tensor = shard.new_empty(out_size)
238
+ assert out_tensor.is_contiguous()
239
+ # FIXME gloo doesn't support _allgather_base
240
+ if dist.get_backend(group) == dist.Backend.GLOO or shard.is_cpu:
241
+ tensor_list = list(torch.chunk(out_tensor, group_size))
242
+ work = dist.all_gather(tensor_list, shard, group=group, async_op=True)
243
+ else:
244
+ work = dist.all_gather_into_tensor(
245
+ out_tensor, shard, group=group, async_op=True
246
+ )
247
+ _register_tensor_work(out_tensor, work)
248
+
249
+ return out_tensor
250
+
251
+
252
+ def _all_gather_into_tensor_coalesced(self, tag, rankset, group_size):
253
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, rankset, group_size)
254
+ assert group is not None
255
+
256
+ def mk_out_tensor(shard):
257
+ out_size = list(shard.size())
258
+ out_size[0] *= group_size
259
+ out_tensor = shard.new_empty(out_size)
260
+ assert out_tensor.is_contiguous()
261
+ return out_tensor
262
+
263
+ out_tensors = [mk_out_tensor(t) for t in self]
264
+
265
+ work_list = _all_gather_into_tensor_coalesced_fallback(
266
+ output_tensors=out_tensors, input_tensors=self, group=group, async_op=True
267
+ )
268
+
269
+ _register_tensor_work(out_tensors, work_list)
270
+ return out_tensors
271
+
272
+
273
+ def _reduce_scatter_tensor(
274
+ input: torch.Tensor,
275
+ reduceOp: str,
276
+ tag: str,
277
+ ranks: List[int],
278
+ group_size: int,
279
+ ):
280
+ # TODO add dim support?
281
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
282
+ assert group is not None
283
+ op = _str_to_reduce_op(reduceOp)
284
+
285
+ if dist.get_backend(group) == dist.Backend.GLOO or input.is_cpu:
286
+ # cpu::gloo backend does not have reduce_scatter we fallback to do all_reduce
287
+ # + local chunk
288
+ logger.warning(
289
+ "ProcessGroupGloo does not support reduce_scatter, falling back with all reduce!"
290
+ )
291
+ reduction_input = input.clone()
292
+ group_rank = dist.get_rank(group)
293
+ work = dist.all_reduce(reduction_input, op=op, group=group, async_op=True)
294
+ out_tensor = reduction_input.chunk(group_size, dim=0)[group_rank]
295
+ _register_tensor_work(out_tensor, work)
296
+ else:
297
+ out_size = list(input.size())
298
+ out_size[0] //= group_size
299
+ out_tensor = input.new_empty(out_size)
300
+ work = dist.reduce_scatter_tensor(
301
+ out_tensor, input, op=op, group=group, async_op=True
302
+ )
303
+ _register_tensor_work(out_tensor, work)
304
+
305
+ return out_tensor
306
+
307
+
308
+ def _reduce_scatter_tensor_coalesced(
309
+ inputs: List[torch.Tensor],
310
+ reduce_op: str,
311
+ tag: str,
312
+ ranks: List[int],
313
+ group_size: int,
314
+ ):
315
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
316
+ assert group is not None
317
+ op = _str_to_reduce_op(reduce_op)
318
+
319
+ def mk_out_tensor(shard):
320
+ out_size = list(shard.size())
321
+ out_size[0] //= group_size
322
+ out_tensor = shard.new_empty(out_size)
323
+ assert out_tensor.is_contiguous()
324
+ return out_tensor
325
+
326
+ out_tensors = [mk_out_tensor(t) for t in inputs]
327
+
328
+ work_list = _reduce_scatter_tensor_coalesced_fallback(
329
+ output_tensors=out_tensors,
330
+ input_tensors=inputs,
331
+ op=op,
332
+ group=group,
333
+ async_op=False,
334
+ )
335
+
336
+ _register_tensor_work(out_tensors, work_list)
337
+ return out_tensors
338
+
339
+
340
+ def _all_gather_into_tensor_coalesced_fallback(
341
+ output_tensors, input_tensors, group, async_op=False
342
+ ):
343
+ # all_gather_coalesced is useless, it doesn't work under NCCL and does lots of copies under Gloo
344
+ # all_gather is useless too because it's single tensor
345
+ # NCCL's PG::all_gather with multiple tensors is broken, it only works for the multi-device setting
346
+ # and fails if you mix same-size with different-size tensor lists.
347
+ # _coalescing_manager crashed NCCL when used with all_gather_into_tensor.
348
+ if input_tensors[0].is_cpu or not async_op:
349
+ work_list = []
350
+ out_tensors_sliced = [
351
+ list(torch.chunk(out_tensor, dist.get_world_size(group)))
352
+ for out_tensor in output_tensors
353
+ ]
354
+ for shard, out_tensor in zip(input_tensors, out_tensors_sliced):
355
+ work = c10d.all_gather(out_tensor, shard, group=group, async_op=async_op)
356
+ work_list.append(work)
357
+ return work_list
358
+ else:
359
+ with c10d._coalescing_manager(group=group, async_ops=True) as cm:
360
+ for in_t, out_t in zip(input_tensors, output_tensors):
361
+ dist.all_gather_into_tensor(out_t, in_t, group=group, async_op=True)
362
+ return cm
363
+
364
+
365
+ def _reduce_scatter_tensor_coalesced_fallback(
366
+ output_tensors, input_tensors, op, group, async_op=False
367
+ ):
368
+ # All the same reasons as the all_gather fallback
369
+ work_list = []
370
+ for shard, out_tensor in zip(input_tensors, output_tensors):
371
+ work = c10d.reduce_scatter_tensor(
372
+ out_tensor, shard, op=op, group=group, async_op=async_op
373
+ )
374
+ work_list.append(work)
375
+ return work_list
376
+
377
+
378
+ def _all_to_all_single(
379
+ input: torch.Tensor,
380
+ output_split_sizes: Optional[List[int]],
381
+ input_split_sizes: Optional[List[int]],
382
+ tag: str,
383
+ ranks: List[int],
384
+ group_size: int,
385
+ ):
386
+ group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
387
+
388
+ if output_split_sizes is not None:
389
+ torch._check(
390
+ input.dim() >= 1,
391
+ lambda: f"Expected input to have at least 1 dim but got {input.dim()} dim",
392
+ )
393
+ out_size = list(input.size())
394
+ out_size[0] = sum(output_split_sizes)
395
+ out_tensor = input.new_empty(out_size)
396
+ else:
397
+ out_tensor = input.new_empty(input.size())
398
+
399
+ work = c10d.all_to_all_single(
400
+ out_tensor,
401
+ input,
402
+ output_split_sizes=output_split_sizes,
403
+ input_split_sizes=input_split_sizes,
404
+ group=group,
405
+ async_op=True,
406
+ )
407
+ _register_tensor_work(out_tensor, work)
408
+
409
+ return out_tensor
venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep old package for BC purposes, this file should be removed once
2
+ # everything moves to the `torch.distributed._shard` package.
3
+ import sys
4
+ import torch
5
+ import warnings
6
+
7
+ from torch.distributed._shard.sharded_tensor import * # noqa: F403
8
+ warnings.warn(
9
+ "torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
10
+ DeprecationWarning
11
+ )
12
+ sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (561 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep old package for BC purposes, this file should be removed once
2
+ # everything moves to the `torch.distributed._shard` package.
3
+ import sys
4
+ import torch
5
+ import warnings
6
+
7
+ from torch.distributed._shard.sharding_spec import * # noqa: F403
8
+ warnings.warn(
9
+ "torch.distributed._sharding_spec will be deprecated, use torch.distributed._shard.sharding_spec instead",
10
+ DeprecationWarning
11
+ )
12
+
13
+ import torch.distributed._shard.sharding_spec as _sharding_spec
14
+ sys.modules['torch.distributed._sharding_spec'] = _sharding_spec
venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (591 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/api.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/batch_dim_utils.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/comm_tensor.cpython-310.pyc ADDED
Binary file (6.89 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/config.cpython-310.pyc ADDED
Binary file (963 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/data_parallel.cpython-310.pyc ADDED
Binary file (17.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/distribute.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/experimental_ops.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/gm_transformation.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_optimization.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_utils.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/iter_graph_module.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/log_utils.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/parallel_mode.cpython-310.pyc ADDED
Binary file (7.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/partial_lower.cpython-310.pyc ADDED
Binary file (8.97 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_spmd/api.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from contextlib import contextmanager, nullcontext
3
+ from copy import copy
4
+ from dataclasses import dataclass
5
+ from functools import partial, wraps
6
+ from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Union
7
+
8
+ from functorch import make_fx
9
+
10
+ import torch
11
+ import torch.distributed as dist
12
+
13
+ # We need to import _functional_collectives to trigger op registration
14
+ import torch.distributed._functional_collectives
15
+ import torch.nn as nn
16
+ import torch.utils._pytree as pytree
17
+
18
+ from torch import fx
19
+ from torch._decomp.decompositions import native_layer_norm_backward
20
+
21
+ from torch._subclasses.fake_tensor import FakeTensorMode
22
+ from torch.distributed._spmd.data_parallel import gradients_tagging
23
+ from torch.distributed._spmd.parallel_mode import (
24
+ DataParallel,
25
+ DTensorExpandMode,
26
+ ParallelMode,
27
+ )
28
+ from torch.distributed._tensor import Placement
29
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo, CodeGen
30
+ from torch.nn.utils import stateless
31
+ from torch.nn.utils._named_member_accessor import NamedMemberAccessor
32
+
33
+
34
+ class Override(ABC):
35
+ r"""Override the tracing and transformation behavior of :meth:`~torch.distributed._spmd.compile`.
36
+
37
+ This is useful when any part of the model is not traceable or if you prefer
38
+ to not trace it due to any reason. More specifically, users can implement
39
+ :meth:`torch.distributed._spmd.Override.replacement` to replace an original
40
+ submodule with the return new submodule. The new submodule contains
41
+ operations that users preferred to be traced, which simply be a dummy
42
+ placeholder operator. After tracing, users can implement
43
+ :meth:`torch.distributed._spmd.Override.transform` to transform the traced
44
+ graph, where the dummy placeholder operator serves as an anchor to insert
45
+ new sub-graphs.
46
+ """
47
+
48
+ @abstractmethod
49
+ def replacement(self, fqn: str, orig_submodule: torch.nn.Module) -> torch.nn.Module:
50
+ r"""Implement this method to return a new :class:`nn.Module` instance to replace the ``orig_submodule``
51
+ argument in the model.
52
+
53
+ This helps if ``orig_submodule`` is not traceable or should not be traced.
54
+
55
+ Args:
56
+ fqn (str): fully quantified name of the submodule.
57
+ orig_submodule (class:`nn.Module`): original submodule instance to replace.
58
+
59
+ Returns:
60
+ A new :class:`nn.Module` instance to replace the original one.
61
+
62
+ """
63
+ pass
64
+
65
+ @abstractmethod
66
+ def transform(
67
+ self,
68
+ gm: fx.GraphModule,
69
+ flat_state: List[torch.Tensor],
70
+ ) -> fx.GraphModule:
71
+ r"""
72
+ Given a DTensor-expanded graph and sharding schema for every node,
73
+ conduct additional transformation for the sub-graph from the :class:`nn.Module`
74
+ returned by :meth:`torch.distributed._spmd.Override.replacement` if
75
+ necessary.
76
+
77
+ Args:
78
+ gm (:class:`fx.Graph`): a DTensor-expanded graph.
79
+ flat_state (List[str, :class:`Tensor`]): a reference to the list of
80
+ flattened state. The elements in ``flat_state`` map to the first
81
+ ``len(flat_state)`` placeholders in the graph. The transformation
82
+ can add state to or remove state from ``flat_state`` as long as
83
+ it keeps ``flat_state`` and the placeholders consistent.
84
+
85
+ Returns:
86
+ The :class:`fx.Graph` after transformation.
87
+
88
+ """
89
+ pass
90
+
91
+
92
+ class _PyTreeCodeGenOutputsOnly(_PyTreeCodeGen):
93
+ # pyre-ignore[3]
94
+ def process_inputs(self, *args: Any) -> Any:
95
+ return args
96
+
97
+ # pyre-ignore[2, 3]
98
+ def gen_fn_def(self, free_vars, maybe_return_annotation):
99
+ return CodeGen.gen_fn_def(self, free_vars, maybe_return_annotation)
100
+
101
+
102
+ def _to_caller_flattened_graph_module(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
103
+ """Move the responsibility of flattening the input arguments from the graph module to the caller.
104
+
105
+ Example:
106
+
107
+ output = gm(my_struct)
108
+
109
+ gm = gm(to_caller_flattened_graph_module)
110
+
111
+ output = gm(*pytree.flatten(my_struct)[0])
112
+
113
+ """
114
+ # pyre-ignore[16]
115
+ gm._graph._codegen = _PyTreeCodeGenOutputsOnly(
116
+ pytree_info=_PyTreeInfo(
117
+ # pyre-ignore[6]
118
+ orig_args=None, # type: ignore[arg-type]
119
+ # pyre-ignore[6]
120
+ in_spec=None, # type: ignore[arg-type]
121
+ # pyre-ignore[16]
122
+ out_spec=gm._graph._codegen.pytree_info.out_spec,
123
+ )
124
+ )
125
+ gm.recompile()
126
+ return gm
127
+
128
+
129
+ # Use a dtensor expand mode for now to preserve the old behavior
130
+ # and avoid breaking existing code
131
+ dtensor_expand_mode = DTensorExpandMode()
132
+
133
+
134
+ def _override_placements(t: torch.Tensor, placements: List[Placement]):
135
+ global dtensor_expand_mode
136
+ dtensor_expand_mode._placements_override[id(t)] = placements
137
+
138
+
139
+ @contextmanager
140
+ def _rematerialize_optimizer(
141
+ opt: torch.optim.Optimizer,
142
+ named_states: Dict[str, Any],
143
+ params: Dict[str, nn.Parameter],
144
+ ):
145
+ assert opt is not None
146
+
147
+ # update opt.state with proxy tensors
148
+ orig_states = copy(opt.state)
149
+ for n in named_states:
150
+ # opt.state's key type is string, but optimizer uses Parameter as keys
151
+ opt.state[params[n]] = named_states[n] # type: ignore[index]
152
+
153
+ # FIXME: support multiple parameter groups
154
+ param_group = opt.param_groups[0]
155
+ orig_params = param_group["params"]
156
+ param_group["params"] = params.values()
157
+
158
+ try:
159
+ yield
160
+ finally:
161
+ param_group["params"] = orig_params
162
+ opt.state = orig_states
163
+
164
+
165
+ aten = torch.ops.aten # pyre-ignore
166
+
167
+
168
+ @contextmanager
169
+ def _enable_compile():
170
+ # The return value of torch._utils.is_compiling changes optimizer behavior.
171
+ # We need that function to return True to include optimizer in the graph.
172
+ # See: https://github.com/pytorch/pytorch/blob/a524123c91ab399c9dd6882c1189596dd77e7734/torch/optim/optimizer.py#L41
173
+ def f_true():
174
+ return True
175
+
176
+ orig_is_compiling_code = torch._utils.is_compiling.__code__
177
+ torch._utils.is_compiling.__code__ = f_true.__code__
178
+ try:
179
+ yield
180
+ finally:
181
+ torch._utils.is_compiling.__code__ = orig_is_compiling_code
182
+
183
+
184
+ def _foreach_add_decomp(self, other, alpha=1):
185
+ self_updated = aten._foreach_add.List(self, other, alpha=alpha)
186
+ for s, s_u in zip(self, self_updated):
187
+ s.copy_(s_u)
188
+
189
+
190
+ def _foreach_unaop_decomp(op, self):
191
+ self_updated = op(self)
192
+ for s, s_u in zip(self, self_updated):
193
+ s.copy_(s_u)
194
+
195
+
196
+ def _foreach_binop_list_decomp(op, self, other):
197
+ self_updated = op(self, other)
198
+ for s, s_u in zip(self, self_updated):
199
+ s.copy_(s_u)
200
+
201
+
202
+ def _foreach_binop_scalar_decomp(op, self, scalar=1):
203
+ self_updated = op(self, scalar)
204
+ for s, s_u in zip(self, self_updated):
205
+ s.copy_(s_u)
206
+
207
+
208
+ def _foreach_addcop_scalar_decomp(op, self, tensor1, tensor2, scalar=1):
209
+ self_updated = op(self, tensor1, tensor2, scalar)
210
+ for s, s_u in zip(self, self_updated):
211
+ s.copy_(s_u)
212
+
213
+
214
+ def _fused_adam_decomp(
215
+ self,
216
+ grads,
217
+ exp_avgs,
218
+ exp_avg_sqs,
219
+ max_exp_avg_sqs,
220
+ state_steps,
221
+ *,
222
+ lr=1,
223
+ beta1=1,
224
+ beta2=1,
225
+ weight_decay=1,
226
+ eps=1,
227
+ amsgrad=True,
228
+ maximize=True,
229
+ grad_scale=None,
230
+ found_inf=None,
231
+ ):
232
+ orig_tuple = (self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs)
233
+ updated_tuple = aten._fused_adam.default(
234
+ self,
235
+ grads,
236
+ exp_avgs,
237
+ exp_avg_sqs,
238
+ max_exp_avg_sqs,
239
+ state_steps,
240
+ lr=lr,
241
+ beta1=beta1,
242
+ beta2=beta2,
243
+ weight_decay=weight_decay,
244
+ eps=eps,
245
+ amsgrad=amsgrad,
246
+ maximize=maximize,
247
+ grad_scale=grad_scale,
248
+ found_inf=found_inf,
249
+ )
250
+
251
+ for idx, (orig, updated) in enumerate(zip(orig_tuple, updated_tuple)):
252
+ if idx == 1:
253
+ # skip gradient copying as we don't need to copy gradients back
254
+ continue
255
+ for o, u in zip(orig, updated):
256
+ o.copy_(u)
257
+
258
+
259
+ SPMD_DECOMP_TABLE = {
260
+ aten._foreach_add_.List: _foreach_add_decomp,
261
+ aten._foreach_add_.Scalar: partial(
262
+ _foreach_binop_scalar_decomp, aten._foreach_add.Scalar
263
+ ),
264
+ aten._foreach_addcdiv_.Scalar: partial(
265
+ _foreach_addcop_scalar_decomp, aten._foreach_addcdiv.Scalar
266
+ ),
267
+ aten._foreach_addcmul_.Scalar: partial(
268
+ _foreach_addcop_scalar_decomp, aten._foreach_addcmul.Scalar
269
+ ),
270
+ aten._foreach_div_.List: partial(
271
+ _foreach_binop_list_decomp, aten._foreach_div.List
272
+ ),
273
+ aten._foreach_mul_.Scalar: partial(
274
+ _foreach_binop_scalar_decomp, aten._foreach_mul.Scalar
275
+ ),
276
+ aten._foreach_div_.Scalar: partial(
277
+ _foreach_binop_scalar_decomp, aten._foreach_div.Scalar
278
+ ),
279
+ aten._foreach_neg_.default: partial(
280
+ _foreach_unaop_decomp, aten._foreach_neg.default
281
+ ),
282
+ aten._foreach_reciprocal_.default: partial(
283
+ _foreach_unaop_decomp, aten._foreach_reciprocal.default
284
+ ),
285
+ aten._foreach_sqrt_.default: partial(
286
+ _foreach_unaop_decomp, aten._foreach_sqrt.default
287
+ ),
288
+ aten._foreach_sub_.Scalar: partial(
289
+ _foreach_binop_scalar_decomp, aten._foreach_sub.Scalar
290
+ ),
291
+ aten._fused_adam_.default: _fused_adam_decomp,
292
+ aten.native_layer_norm_backward.default: native_layer_norm_backward,
293
+ }
294
+
295
+
296
+ DEDUP_TARGETS: Set[torch._ops.OpOverload] = {
297
+ torch.ops.c10d_functional.all_reduce.default,
298
+ torch.ops.c10d_functional.wait_tensor.default,
299
+ }
300
+
301
+
302
+ def _dedup_collectives(gm: fx.GraphModule) -> fx.GraphModule:
303
+ args_to_node: Dict[Tuple[Any, ...], fx.Node] = {}
304
+
305
+ for node in gm.graph.nodes:
306
+ # replace all args with the results from the first unique comm op
307
+ args = pytree.arg_tree_leaves(*node.args)
308
+
309
+ if node.target in DEDUP_TARGETS:
310
+ args_key = (node.target, *args)
311
+ unique_node = args_to_node.get(args_key, None)
312
+ if unique_node is None:
313
+ # first time seeing this combination, remember it
314
+ args_to_node[args_key] = node
315
+ else:
316
+ # the current node is a duplicate, replace it
317
+ node.replace_all_uses_with(unique_node)
318
+ gm.graph.erase_node(node)
319
+
320
+ gm.recompile()
321
+
322
+ return gm
323
+
324
+
325
+ @dataclass
326
+ class _CompiledResult:
327
+ gm: fx.GraphModule
328
+ mod: nn.Module
329
+ opt: Optional[torch.optim.Optimizer]
330
+ flat_state: List[torch.Tensor]
331
+
332
+
333
+ def _compile(
334
+ func: Callable,
335
+ module_override: Optional[List[Override]],
336
+ parallel_mode: ParallelMode,
337
+ *args: Any,
338
+ **kwargs: Any,
339
+ ) -> _CompiledResult:
340
+ # 1. Extract nn.Module and Optimizer from args and kwargs
341
+ # FIXME(@mrshenli): support multiple nn.Module instances
342
+ # FIXME(@mrshenli): support multiple Optiimzer instances
343
+ # FIXME(@mrshenli): need to broadcast model to sync parameters
344
+ mod, opt = None, None
345
+ for arg in pytree.arg_tree_leaves(*args, **kwargs):
346
+ if isinstance(arg, nn.Module):
347
+ assert mod is None, "Only support single nn.Module for now"
348
+ mod = arg
349
+ if isinstance(arg, torch.optim.Optimizer):
350
+ assert opt is None, "Only support single Optimizer for now"
351
+ opt = arg
352
+
353
+ assert mod is not None, "Couldn't find nn.Module instances from the arguments."
354
+
355
+ # 2. Override target submodules (e.g., MoE) with dummy replacements
356
+ if module_override:
357
+ accessor = NamedMemberAccessor(mod)
358
+
359
+ def swap(fqn_prefix: str, module: torch.nn.Module) -> None:
360
+ for override in module_override: # type: ignore[union-attr]
361
+ for name, child in module.named_children():
362
+ if len(name) == 0:
363
+ continue
364
+ fqn = fqn_prefix + "." + name if fqn_prefix != "" else name
365
+ new_child = override.replacement(fqn, child)
366
+ if id(new_child) == id(child):
367
+ swap(fqn, new_child)
368
+ else:
369
+ accessor.swap_submodule(fqn, new_child)
370
+
371
+ swap("", mod)
372
+
373
+ # 3. Trace statelss version of the train_step
374
+ params = dict(mod.named_parameters(remove_duplicate=False))
375
+ buffers = dict(mod.named_buffers(remove_duplicate=False))
376
+
377
+ named_states = {}
378
+ if opt is not None:
379
+ # Pass named_states instead of opt.state to stateless_func, because
380
+ # the later uses nn.Parameter as key. During tracing, we need to
381
+ # make sure optimizers can find the states using proxy tensors.
382
+ for n, p in params.items():
383
+ if p in opt.state:
384
+ # opt.state's key type is string, but optimizer uses
385
+ # Parameter as keys
386
+ named_states[n] = opt.state[p] # type: ignore[index]
387
+
388
+ is_data_parallel_mode = isinstance(parallel_mode, DataParallel)
389
+
390
+ # Lift states and parameters as function arguments so that make_fx
391
+ # can trace operations applied to them.
392
+ def stateless_func(func, params, buffers, named_states, args, kwargs):
393
+ with stateless._reparametrize_module(
394
+ mod, {**params, **buffers}
395
+ ), _rematerialize_optimizer(
396
+ opt, named_states, params
397
+ ) if opt else nullcontext():
398
+ # For DataParallel mode, install hooks first to tag the gradients
399
+ with gradients_tagging(params) if is_data_parallel_mode else nullcontext():
400
+ ret = func(*args, **kwargs)
401
+
402
+ # make sure updated parameters are returned
403
+ return ret, list(mod.parameters()), list(named_states.values()) # type: ignore[union-attr]
404
+
405
+ # FIXME: Using symbolic tracing to work around in DTensor expand mode.
406
+ # Otherwise it hits shape mismatch error, as we use local inputs to
407
+ # trace local graph and use DTensor to expand operators, where
408
+ # DTensor's shape is the global shape.
409
+ tracing_mode = "fake" if is_data_parallel_mode else "symbolic"
410
+
411
+ if is_data_parallel_mode:
412
+ fake_mode = FakeTensorMode()
413
+ data_parallel_mode = cast(DataParallel, parallel_mode)
414
+
415
+ def _get_full_batch_arg(arg: torch.Tensor) -> torch.Tensor:
416
+ # since compilation happens in the first iteration and we
417
+ # receives mini-batch input, convert them to full batch
418
+ # fake tensor input first for data parallel sharding
419
+ # propagations
420
+ fake_arg = fake_mode.from_tensor(arg)
421
+ arg_dims = [1] * arg.ndim
422
+ # expand the tensor to full batch size on its batch dim
423
+ arg_dims[data_parallel_mode.input_batch_dim] *= dist.get_world_size()
424
+ return fake_arg.repeat(arg_dims)
425
+
426
+ args = pytree.tree_map_only(
427
+ torch.Tensor,
428
+ _get_full_batch_arg,
429
+ args,
430
+ )
431
+ kwargs = pytree.tree_map_only(
432
+ torch.Tensor,
433
+ _get_full_batch_arg,
434
+ kwargs,
435
+ )
436
+
437
+ with _enable_compile(), torch.autograd.detect_anomaly(check_nan=False):
438
+ # FIXME(@mrshenli): functionalization does not work for our use
439
+ # case yet. Use explicit decompositions for foreach ops.
440
+ # Remove this when the following issue is addressed.
441
+ # Issue: https://github.com/pytorch/pytorch/issues/97852
442
+ gm = make_fx(
443
+ partial(stateless_func, func),
444
+ tracing_mode=tracing_mode,
445
+ decomposition_table=SPMD_DECOMP_TABLE,
446
+ _allow_non_fake_inputs=False,
447
+ )(params, buffers, named_states, args, kwargs)
448
+
449
+ params_and_buffers: Dict[str, Union[torch.Tensor, nn.Parameter]] = {
450
+ **params,
451
+ **buffers,
452
+ }
453
+
454
+ # 4. parallel mode to expand a single device graph to a distributed graph
455
+ gm = parallel_mode.partition(
456
+ gm,
457
+ mod,
458
+ opt,
459
+ params_and_buffers,
460
+ named_states,
461
+ args,
462
+ kwargs,
463
+ )
464
+
465
+ # 5. Move the responsibility of flattening the input arguments from the
466
+ # graph module to the caller. This serves two purposes:
467
+ # - Transformations that add/remove state need to manipulate a state
468
+ # container that maintains the state tensors in the same order as they
469
+ # appear in graph placeholders.
470
+ # - Reduced runtime cost. The state container is only flattened once upfront.
471
+ flat_state = pytree.tree_leaves([params_and_buffers, named_states])
472
+ gm = _to_caller_flattened_graph_module(gm)
473
+
474
+ # 6. dedup comm operators.
475
+ # The duplication could come from DTensor args and kwargs redistribution.
476
+ # Suppose one operator produces a Partial gradient tensor and model
477
+ # parameters are replicated. In this case, every optimizer operation using
478
+ # that Partial gradient tensor would trigger an allreduce. This is becuase
479
+ # DTensor only has local information on individual tensor/operator, which is
480
+ # not sufficient to detect duplications in the graph. This situation can
481
+ # also happen when inserting FSDP allgather if a parameter is used multiple
482
+ # times in the forward method.
483
+ # TODO(@mrshenli): @yifuwang has a suggestion of conducting expansion and
484
+ # dedup at tracer-level to avoid multiple graph passes.
485
+ gm = _dedup_collectives(gm)
486
+
487
+ # 7. Replace previously inserted dummy ones with real graphs.
488
+ if module_override:
489
+ for override in module_override:
490
+ gm = override.transform(gm, flat_state)
491
+
492
+ return _CompiledResult(gm, mod, opt, flat_state)
493
+
494
+
495
+ # Note that the Python convention of __dict__ requires the key to be str.
496
+ # TODO: ensure the key is unique.
497
+ COMPILED_OBJECT_KEY = "_compiled_obj"
498
+
499
+
500
+ def compile(
501
+ module_override: Optional[List[Override]] = None,
502
+ gm_transformation: Optional[Callable[[fx.GraphModule], fx.GraphModule]] = None,
503
+ parallel_mode: Optional[ParallelMode] = None,
504
+ ):
505
+ r"""Compile and optimize a callable, which can be a train step within a training loop.
506
+
507
+ This method will extract :class:`nn.Module` and :class:`torch.optim.Optimizer`
508
+ instances from the input arguments and trace operations applied to their
509
+ parameters and states.
510
+
511
+ Args:
512
+ module_override (Optional[List[Override]]): a list of Override instances
513
+ that will be applied to the module in order. The :class:`Override`
514
+ objects provide :class:`nn.Module` replacements during tracing and a
515
+ graph transformation function after tracing. (Default: ``None``)
516
+ gm_transformation (Optional[Callable[fx.GraphModule, fx.GraphModule]]):
517
+ a callback that will be called after the original callable is
518
+ compiled and distributed (usually after the first iteration) to
519
+ transform the compiled GraphModule into a new optimized one.
520
+ parallel_mode (Optional[ParallelMode]): a :class:`ParallelMode` object
521
+ that specifies how to parallelize the callable. Each ParallelMode
522
+ would have its own strategy to partition the model and the captured
523
+ graph (Default: ``None``)
524
+
525
+ """
526
+
527
+ def inner(func: Callable):
528
+ @wraps(func)
529
+ def wrapper(*args, **kwargs):
530
+ last_train_step = kwargs.pop("last_train_step", False) if kwargs else False
531
+ first_iter = False
532
+ # Put the COMPILED_OBJECT_KEY in ``wrapper`` instead of ``func`` as
533
+ # ``wrapper`` is the one that users will get.
534
+ compiled_obj = wrapper.__dict__.get(COMPILED_OBJECT_KEY, None)
535
+ if compiled_obj is None:
536
+ first_iter = True
537
+ global dtensor_expand_mode
538
+ mode: ParallelMode = (
539
+ dtensor_expand_mode if parallel_mode is None else parallel_mode
540
+ )
541
+
542
+ compiled_obj = _compile(func, module_override, mode, *args, **kwargs)
543
+ wrapper.__dict__[COMPILED_OBJECT_KEY] = compiled_obj
544
+
545
+ flat_inps = compiled_obj.flat_state + pytree.arg_tree_leaves(
546
+ *args, **kwargs
547
+ )
548
+
549
+ with torch.no_grad():
550
+ # N.B.: we don't need autograd as backward has already been
551
+ # captured in the graph.
552
+ if first_iter and gm_transformation:
553
+ # TODO: SPMD should provid a default and configurable
554
+ # transformation.
555
+ compiled_obj.gm = gm_transformation(compiled_obj.gm)
556
+ if not last_train_step:
557
+ output = compiled_obj.gm(*flat_inps)[0]
558
+ else:
559
+ # This is the last train step. Call IterGraphModule.forward()
560
+ # with the `last_iter` argument and catch the exception in
561
+ # case the compiled_obj is not wrapped with IterGraphModule.
562
+ try:
563
+ output = compiled_obj.gm(*flat_inps, last_iter=last_train_step)[
564
+ 0
565
+ ]
566
+ except TypeError as e:
567
+ if "last_iter" not in str(e):
568
+ raise e
569
+ output = compiled_obj.gm(*flat_inps)[0]
570
+
571
+ return output
572
+
573
+ return wrapper
574
+
575
+ return inner
venv/lib/python3.10/site-packages/torch/distributed/_spmd/batch_dim_utils.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Dict, List, Set
2
+
3
+ import torch
4
+
5
+ import torch.fx as fx
6
+
7
+ import torch.utils._pytree as pytree
8
+
9
+ from torch import Tensor
10
+
11
+ from torch.distributed._tensor import DeviceMesh, Replicate, Shard
12
+ from torch.distributed._tensor.ops.view_ops import (
13
+ DimSpec,
14
+ InputDim,
15
+ ops as view_op_rules,
16
+ )
17
+ from torch.distributed._tensor.placement_types import _Partial, DTensorSpec
18
+
19
+ aten = torch.ops.aten
20
+
21
+
22
+ class BatchDimAnalyzer:
23
+ """This class is used to analyze the batch dimension of each tensor/node in the graph.
24
+
25
+ We need to know the batch dimension of each tensor/node so that we know
26
+ exactly the sharding layout of intermediate tensors.
27
+
28
+ We possibly should evaluate using symbolic shapes to track the batch dimension.
29
+ We can experiment it later with dynamo integration (as dynamo have mark_dynamic
30
+ API which allows marking batch dimension only) or try to use FakeTensorMode to
31
+ mark the batch dimension. For now, let's just use the batch dimension of the first
32
+ input tensor as the hint to track the batch dimension of all tensors/nodes in
33
+ the graph.
34
+ """
35
+
36
+ def __init__(self, batch_dim: int = 0) -> None:
37
+ self.batch_dim = batch_dim
38
+
39
+ self.batch_dim_map: Dict[fx.Node, int] = {}
40
+ # batch dim size is used to track the batch dim size of the input tensor
41
+ self.batch_dim_size = -1
42
+
43
+ self.dim_rule_map: Dict[torch._ops.OpOverload, Callable[..., torch.Tensor]] = {
44
+ aten.squeeze.default: torch.squeeze,
45
+ aten.squeeze.dim: torch.squeeze,
46
+ aten.view.default: Tensor.view,
47
+ aten.reshape.default: torch.reshape,
48
+ aten._unsafe_view.default: Tensor.view,
49
+ aten.unsqueeze.default: torch.unsqueeze,
50
+ aten.expand.default: Tensor.expand,
51
+ aten.permute.default: torch.permute,
52
+ aten.repeat.default: Tensor.repeat,
53
+ aten.transpose.int: torch.transpose,
54
+ }
55
+
56
+ def init_batch_dim_size(self, batch_dim_size: int) -> None:
57
+ """Initialize batch dim size base on the first input batch size."""
58
+ if self.batch_dim_size != -1 and self.batch_dim_size != batch_dim_size:
59
+ raise RuntimeError(
60
+ f"batch dim size is already initialized! "
61
+ f"Found new batch size: {batch_dim_size} not "
62
+ f"matching existing batch dim size: {self.batch_dim_size}!"
63
+ )
64
+ self.batch_dim_size = batch_dim_size
65
+
66
+ def set_batch_dim(self, node: fx.Node, batch_dim: int) -> None:
67
+ self.batch_dim_map[node] = batch_dim
68
+
69
+ def get_batch_dim(self, node: fx.Node) -> int:
70
+ if node not in self.batch_dim_map:
71
+ raise RuntimeError(f"batch dim analysis failed on node: {node}!")
72
+ return self.batch_dim_map[node]
73
+
74
+ def compute_batch_dim(self, node: fx.Node, full_reduction=False) -> int:
75
+ """Compute the batch dimension for the `node`."""
76
+ assert self.batch_dim_size != -1, "batch dim size is not initialized!"
77
+
78
+ if node in self.batch_dim_map:
79
+ # if batch dim already computed, simply return it
80
+ return self.batch_dim_map[node]
81
+
82
+ if node.target in self.dim_rule_map:
83
+ view_op_rule = view_op_rules[self.dim_rule_map[node.target]] # type: ignore[index]
84
+ args_val = pytree.tree_map_only(fx.Node, lambda n: n.meta["val"], node.args)
85
+ kwargs_val = pytree.tree_map_only(
86
+ fx.Node, lambda n: n.meta["val"], node.kwargs
87
+ )
88
+ output_dim_rules = view_op_rule.dim_map(*args_val, **kwargs_val)
89
+
90
+ def collect_input_dim(cmd: DimSpec, input_dims: Set[int]):
91
+ if isinstance(cmd, InputDim):
92
+ input_dims.add(cmd.input_dim)
93
+ for inp in cmd.inputs():
94
+ collect_input_dim(inp, input_dims)
95
+
96
+ output_dim_to_input_dims: List[Set[int]] = []
97
+ for inp in output_dim_rules:
98
+ input_dims: Set[int] = set()
99
+ collect_input_dim(inp, input_dims=input_dims)
100
+ output_dim_to_input_dims.append(input_dims)
101
+
102
+ operand = node.all_input_nodes[0]
103
+ operand_batch_dim = self.get_batch_dim(operand)
104
+ for output_dim, input_dims in enumerate(output_dim_to_input_dims):
105
+ if operand_batch_dim in input_dims:
106
+ self.set_batch_dim(node, output_dim)
107
+ # update batch dim size before return
108
+ # this is because batch dim size might change during the middle
109
+ self.batch_dim_size = node.meta["val"].shape[output_dim]
110
+ return output_dim
111
+
112
+ # if there's no hints from the output_dim_rules, we infer from output
113
+ # shape to see if there's batch dim, and shard correspondingly
114
+ node_val = node.meta["val"]
115
+ if isinstance(node_val, (list, tuple)):
116
+ shapes = [val.shape for val in node_val]
117
+ else:
118
+ shapes = [node_val.shape]
119
+
120
+ # for reduction op that reduces over the sharded batch dim
121
+ # we don't generate partial, but rather, we generate shard
122
+ # This is because the intention of data parallel is to never
123
+ # do full reduction across batch dimension, it would still
124
+ # keep the reduction activation as sharded.
125
+ full_reduction = False
126
+ # loop through the dim size to find the output batch dim
127
+ for shape in shapes:
128
+ if len(shape) == 0:
129
+ full_reduction = True
130
+
131
+ for i, dim_size in enumerate(shape):
132
+ if dim_size == self.batch_dim_size:
133
+ self.set_batch_dim(node, i)
134
+ return i
135
+
136
+ operands = node.all_input_nodes
137
+ if not operands:
138
+ # if there's no operands, it must be factory ops and it's a tensor
139
+ # generated for computation and should be marked as replicated
140
+ self.set_batch_dim(node, -1)
141
+ # -1 means replicated
142
+ return -1
143
+ else:
144
+ # if there's operand we see the operand have batch dim, if operand
145
+ # have batch dim but output does not, it's either a full reduction,
146
+ # where we should stay sharded, or it's a reduction on batch dim only
147
+ # where we should produce partial
148
+ operand_batch_dim = -1
149
+ for operand in operands:
150
+ if operand in self.batch_dim_map:
151
+ operand_batch_dim = self.get_batch_dim(operand)
152
+ # self.get_batch_dim(operands[0])
153
+ if operand_batch_dim < 0:
154
+ # if operand does not have batch dim, we also don't have batch dim
155
+ self.set_batch_dim(node, operand_batch_dim)
156
+ return operand_batch_dim
157
+ elif full_reduction:
158
+ self.set_batch_dim(node, operand_batch_dim)
159
+ return operand_batch_dim
160
+ else:
161
+ # if operand have batch dim but output does not, it should
162
+ # produce partial, we use -2 to indicate partial
163
+ self.set_batch_dim(node, -2)
164
+ return -2
165
+
166
+ def compute_act_spec(self, node: fx.Node, mesh: DeviceMesh) -> DTensorSpec:
167
+ """Compute the batch dimension for the current node, then generate the sharding spec that shards on the batch dimension."""
168
+ node_batch_dim = self.compute_batch_dim(node)
169
+ if node_batch_dim == -1:
170
+ # indicate this activation is replicated
171
+ act_spec = DTensorSpec(mesh=mesh, placements=(Replicate(),))
172
+ elif node_batch_dim == -2:
173
+ # indicate this activation is partial
174
+ act_spec = DTensorSpec(mesh=mesh, placements=(_Partial(),))
175
+ else:
176
+ # indicate this activation is Shard
177
+ act_spec = DTensorSpec(mesh=mesh, placements=(Shard(node_batch_dim),))
178
+
179
+ return act_spec
venv/lib/python3.10/site-packages/torch/distributed/_spmd/comm_tensor.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from functools import partial
3
+ from typing import Any, List, Optional, Tuple
4
+
5
+ import torch
6
+ from torch._C import _disabled_torch_function_impl
7
+ from torch.fx.experimental.proxy_tensor import (
8
+ _ProxyTensor,
9
+ fetch_object_proxy,
10
+ get_innermost_proxy_mode,
11
+ get_proxy_slot,
12
+ set_proxy_slot,
13
+ track_tensor_tree,
14
+ )
15
+ from torch.utils import _pytree as pytree
16
+ from torch.utils._mode_utils import no_dispatch
17
+ from torch.utils._pytree import tree_flatten, tree_map, tree_map_only
18
+
19
+
20
+ @dataclass
21
+ class _CommResult:
22
+ # a custom type wrapping both inplace output tensor and work handle
23
+ _tensor: torch.Tensor
24
+ _work: torch.distributed._Work
25
+
26
+
27
+ def _wait_comm(comm_result: _CommResult):
28
+ # This function is only used by tracing mode as a call_function node right
29
+ # before consuming a collective result tensor.
30
+ comm_result._work.wait()
31
+ return comm_result._tensor
32
+
33
+
34
+ def _wrap_comm_result(result: Tuple[Any, Any]) -> Tuple[Any, Any]:
35
+ def wrap(work, e):
36
+ assert isinstance(e, torch.Tensor), (
37
+ "Excepting collection of tensors as the first element in the "
38
+ "return value of communication operations."
39
+ )
40
+
41
+ return _CommResult(e, work)
42
+
43
+ # E.g.,
44
+ # allreduce_ returns ([tensor], work)
45
+ # allgather_ returns ([[tensor1, tensor2]], work)
46
+ work = result[1]
47
+ return (tree_map(partial(wrap, work), result[0]), work)
48
+
49
+
50
+ def _get_tracer() -> Optional[torch.fx.Tracer]:
51
+ mode = get_innermost_proxy_mode()
52
+ if mode is None:
53
+ return None
54
+ return mode.tracer
55
+
56
+
57
+ class CommTensor(torch.Tensor):
58
+ r"""
59
+ A Tensor subclass to wrap input tensors for collective communications.
60
+
61
+ This Tensor subclass works for both eager and tracing mode.
62
+ In eager mode, it will record whether the inplace collective communication
63
+ has been launched using this Tensor and remember the corresponding work
64
+ handle. If yes, it will explicitly call wait() in the ``__torch_dispatch__``
65
+ function before subsequent operations consuming the value of the Tensor.
66
+
67
+ In tracing mode, ``CommTensor`` inserts two node into the graph using the
68
+ ``__torch_dispatch__`` function.
69
+ 1. The first node is inserted right after the
70
+ communication, wrapping both the inplace output tensor and the returned
71
+ work handle into a custom ``_CommResult`` type. We have to do this because
72
+ ``ProxyTorchDispatchMode`` only handles ``torch.Tensor``, ``_ProxyTensor``,
73
+ and ``torch.nn.Parameter`` objects and will treat the work handle
74
+ as a constant and embed that into the graph. As a result, during execution,
75
+ it will use the work handle created during tracing and will lead to wrong
76
+ result. The solution in this test is to manually create a proxy on the
77
+ return value of ``allreduce_`` which is ``([tensor], work)``, and wrap that
78
+ to ``[(_CommResult(tensor, work)), work]``. In this way, subsequent nodes can
79
+ directly consume ``_CommResult``.
80
+ 2. The second node is inserted right before any subsequent node reads from
81
+ ``_CommResult``. It will call ``wait()`` on the stashed work handle to ensure
82
+ that computation waits for communication.
83
+ """
84
+
85
+ _supported_comms: List[str] = [
86
+ "_allgather_base_",
87
+ "_reduce_scatter_base_",
88
+ "allreduce_",
89
+ "allgather_",
90
+ "alltoall_",
91
+ "broadcast_",
92
+ "reduce_scatter_",
93
+ "scatter_",
94
+ ]
95
+
96
+ _tensor: torch.Tensor
97
+ _work: Optional[torch.distributed._Work]
98
+
99
+ @staticmethod
100
+ def __new__(cls, tensor: torch.Tensor):
101
+ t = tensor._tensor if isinstance(tensor, CommTensor) else tensor
102
+ if get_innermost_proxy_mode() is None:
103
+ # noop for eager mode
104
+ return tensor
105
+
106
+ # Use non-CommTensor to avoid nested CommTensor Wrapping
107
+ r = torch.Tensor._make_subclass(cls, t, require_grad=t.requires_grad)
108
+ # The tensor object wrapped by this CommTensor
109
+ # NB: THIS CAN BE A CommTensor; see test_nested_comm_tensor_wrapping
110
+ r._tensor = tensor # type: ignore[attr-defined]
111
+ # Record the LAST `work` object returned by collective communication
112
+ # operations. If this is None, it means no collectives have called
113
+ # since last time a tensor is wrapped by CommTensor
114
+ r._work = None # type: ignore[attr-defined]
115
+ return r
116
+
117
+ def __repr__(self):
118
+ return f"CommTensor({self._tensor}, work={self._work})"
119
+
120
+ # disable __torch_function__ so that CommTensor can recursively dispatch
121
+ # with ProxyTorchDispatchMode in make_fx
122
+ __torch_function__ = _disabled_torch_function_impl
123
+
124
+ @classmethod
125
+ def _is_supported(cls, op_name):
126
+ return any(comm in op_name for comm in cls._supported_comms)
127
+
128
+ @classmethod
129
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
130
+ # shared states when unwrapping args
131
+ tracer: Optional[torch.fx.Tracer] = None
132
+ work: Optional[torch.distributed._Work] = None
133
+
134
+ # wrapped ._tensor if this is a CommTensor, and insert/call wait()
135
+ # if communication has been launched on this tensor.
136
+ def unwrap(e: Any):
137
+ if isinstance(e, CommTensor):
138
+ nonlocal tracer, work
139
+
140
+ work = e._work
141
+ # TODO(ezyang): I don't really understand what's going on
142
+ # here, but it seems that tracer doesn't reflect whether or
143
+ # not there is ambient tracing going on, but rather, whether
144
+ # or not we will trace THIS particular invocation. If we
145
+ # have a nested CommTensor, the outer layer doesn't actually
146
+ # trace and we only trace the inner layer
147
+ if not isinstance(e._tensor, CommTensor):
148
+ tracer = _get_tracer()
149
+
150
+ if work is not None:
151
+ if tracer is not None:
152
+ # insert a node to the traced graph.
153
+ proxy_res = tracer.create_proxy( # type: ignore[union-attr]
154
+ "call_function",
155
+ _wait_comm,
156
+ (get_proxy_slot(e._tensor, tracer).proxy,),
157
+ {},
158
+ name="wait_comm",
159
+ )
160
+ # HACK: update the proxy for the inplace output
161
+ set_proxy_slot(e._tensor, tracer, proxy_res)
162
+ # For eager mode, simply wait.
163
+ # During tracing, still need to wait here, to make sure the
164
+ # execution during tracing is correct.
165
+ work.wait()
166
+
167
+ # communication has been waited, stop propagating CommTensor
168
+ return e._tensor
169
+ else:
170
+ return e
171
+
172
+ def wrap(e: Any):
173
+ return CommTensor(e) if isinstance(e, torch.Tensor) else e
174
+
175
+ def set_work(work: torch.distributed._Work, e: Any):
176
+ if isinstance(e, CommTensor):
177
+ e._work = work # type: ignore[attr-defined]
178
+ elif isinstance(e, torch.Tensor):
179
+ raise RuntimeError(
180
+ "Type of output tensors from collective communication during "
181
+ "tracing should always be CommTensor instead of torch.Tensor"
182
+ )
183
+ return e
184
+
185
+ unwrapped_args = tree_map(unwrap, args)
186
+ unwrapped_kwargs = tree_map(unwrap, kwargs)
187
+
188
+ if cls._is_supported(func.__name__):
189
+ if tracer is not None:
190
+ # in tracing mode, get proxies for args
191
+ proxy_args, proxy_kwargs = tree_map_only(
192
+ _ProxyTensor,
193
+ lambda e: e.proxy,
194
+ tree_map_only(
195
+ torch.Tensor,
196
+ fetch_object_proxy(tracer),
197
+ (unwrapped_args, unwrapped_kwargs),
198
+ ),
199
+ )
200
+
201
+ # get proxy for output tuple
202
+ proxy_res = func(*proxy_args, **proxy_kwargs)
203
+ assert isinstance(proxy_res, torch.fx.Proxy)
204
+ # insert a node that wraps the output tuple into
205
+ # _CommResult(tensor, work)
206
+ comm_result_proxy = tracer.create_proxy( # type: ignore[union-attr]
207
+ "call_function",
208
+ _wrap_comm_result,
209
+ (proxy_res,),
210
+ {},
211
+ name="comm_result",
212
+ )
213
+
214
+ with no_dispatch():
215
+ # disable dispatch to avoid trigger ProxyTorchDispatchMode logic
216
+ out = func(*unwrapped_args, **unwrapped_kwargs)
217
+
218
+ # wrap output with the proxy of _CommResult, so that subsequent
219
+ # ops and link to it.
220
+ track_tensor_tree(out, comm_result_proxy, constant=None, tracer=tracer)
221
+
222
+ # N.B.: we still need to remember the work handle here, and wait
223
+ # for it later to make sure the execution during tracing is
224
+ # correct. Also, remember comm is already launched
225
+ # args[0] is always the collection of output tensors
226
+ pytree.tree_map_(partial(set_work, out[1]), args[0])
227
+
228
+ # HACK: update the proxy on the input argument as this is an
229
+ # inplace collective communication.
230
+ flat_args, args_spec = tree_flatten(unwrapped_args[0])
231
+ flat_out, out_spec = tree_flatten(out[0])
232
+ for a, o in zip(flat_args, flat_out):
233
+ set_proxy_slot(a, tracer, get_proxy_slot(o, tracer))
234
+
235
+ return out
236
+ else:
237
+ # in eager mode, simply remember work handle as an attribute
238
+ out = func(*unwrapped_args, **unwrapped_kwargs)
239
+ pytree.tree_map_(partial(set_work, out[1]), args[0])
240
+ return out
241
+ else:
242
+ if work is not None:
243
+ return func(*unwrapped_args, **unwrapped_kwargs)
244
+ else:
245
+ # we need to propagate CommTensor wrapping until the first
246
+ # subsequent operation has waited for it.
247
+ return tree_map(wrap, func(*unwrapped_args, **unwrapped_kwargs))
venv/lib/python3.10/site-packages/torch/distributed/_spmd/config.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ from types import ModuleType
4
+ from typing import Set
5
+
6
+ # log level (levels print what it says + all levels listed below it)
7
+ # DEBUG print full traces <-- lowest level + print tracing of every instruction
8
+ # INFO print compiler functions + distributed graphs
9
+ # WARN print warnings
10
+ # ERROR print exceptions
11
+ log_level: int = logging.DEBUG
12
+ # Verbose will print full stack traces on warnings and errors
13
+ verbose = False
14
+
15
+ # the name of a file to write the logs to
16
+ log_file_name: None = None
17
+
18
+
19
+ class _AccessLimitingConfig(ModuleType):
20
+ def __setattr__(self, name, value) -> None:
21
+ if name not in _allowed_config_names:
22
+ raise AttributeError(f"{__name__}.{name} does not exist")
23
+ return object.__setattr__(self, name, value)
24
+
25
+
26
+ _allowed_config_names: Set[str] = {*globals().keys()}
27
+ sys.modules[__name__].__class__ = _AccessLimitingConfig
venv/lib/python3.10/site-packages/torch/distributed/_spmd/data_parallel.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from contextlib import contextmanager
3
+ from enum import Enum
4
+
5
+ from typing import Any, cast, Dict, List, Optional, Tuple
6
+
7
+ import torch
8
+
9
+ import torch.distributed.distributed_c10d as c10d
10
+ import torch.fx as fx
11
+ import torch.library
12
+ import torch.nn as nn
13
+
14
+ import torch.utils._pytree as pytree
15
+
16
+ from torch.distributed._spmd.batch_dim_utils import BatchDimAnalyzer
17
+ from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
18
+
19
+ from torch.distributed._tensor._utils import compute_local_shape
20
+ from torch.distributed._tensor.op_schema import (
21
+ OpStrategy,
22
+ PlacementStrategy,
23
+ StrategyType,
24
+ TupleStrategy,
25
+ )
26
+ from torch.distributed._tensor.placement_types import _Partial, DTensorSpec, Placement
27
+ from torch.distributed._tensor.redistribute import redistribute_local_tensor
28
+ from torch.fx import GraphModule
29
+ from torch.fx.experimental.proxy_tensor import make_fx
30
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata
31
+ from torch.nn.utils._named_member_accessor import NamedMemberAccessor
32
+
33
+ aten = torch.ops.aten
34
+
35
+ # Dummy op used by data parallel to tag gradients.
36
+ _spmd_lib_def = torch.library.Library("_spmd", "DEF")
37
+ _spmd_lib_def.define("tag_grad(Tensor self) -> Tensor")
38
+
39
+ _spmd_lib_impl = torch.library.Library("_spmd", "IMPL")
40
+ _spmd_lib_impl.impl("tag_grad", lambda x: x, "CompositeExplicitAutograd")
41
+
42
+
43
+ class DataParallelStyle(Enum):
44
+ """This enum represents the style of the data-parallel operation.
45
+
46
+ We have three types of Data Parallel style:
47
+ 1. DEFAULT: the default data parallel style, which is to represent a mixed
48
+ replicate and fully shard behavior. For each parameter that is able
49
+ to be sharded evenly, we shard it, otherwise we would replicate the
50
+ parameter. This style avoids potential padding if the parameters
51
+ cannot be sharded evenly, but it would generate a mixed of all_reduce
52
+ and reduce_scatter.
53
+ 2. REPLICATE: the data parallel style that replicates all model parameters.
54
+ This is similar to the behavior of DistributedDataParallel.
55
+ 3. FULLY_SHARD: the data parallel style that shards all model parameters. This
56
+ is similar to the behavior of FullyShardedDataParallel, the
57
+ difference is that FullyShardedDataParallel (ZERO-3), which
58
+ shards the model using FlatParameter based sharding,
59
+ while this style shards each parameter into DTensor.
60
+ """
61
+
62
+ DEFAULT = 0
63
+ REPLICATE = 1
64
+ FULLY_SHARD = 2
65
+
66
+
67
+ class NodeType(Enum):
68
+ """NodeType is an enum that records the type of the tensors in the graph.
69
+
70
+ This is used to determine the data parallel strategy.
71
+ """
72
+
73
+ PARAM = 0
74
+ ACT = 1
75
+ GRAD = 2
76
+ STATE = 3
77
+ NON_TENSOR = 4 # NON_TENSOR is to tag non tensor node (i.e. graph output)
78
+
79
+
80
+ class DataParallelStrategy(OpStrategy):
81
+ """DataParallelStrategy is a special case of OpStrategy that only records the "data parallel style" placement
82
+ strategy for each fx Node.
83
+
84
+ It takes a list of PlacementStrategy, where each PlacementStrategy describes
85
+ one way to distribute the tensor and computation. In the DataParallel case,
86
+ there're two possible ways to distribute the parameters:
87
+ 1. replicate the parameter over a set of devices (DDP like behavior)
88
+ 2. shard the parameter on its tensor dimension 0 over a set of devices
89
+ (FSDP like behavior).
90
+
91
+ In addition to the strategy list, we also need to:
92
+ 1. `node_type`: record the type of each node in the graph, so that we can
93
+ determine how to propagate in a data parallel fashion.
94
+ 2. `reduce_over_batch` is specifically tied to data parallel as the loss
95
+ calculation usually results in scalar tensor where it comes from a
96
+ reduction over the batch dimension. We need to know this information
97
+ so that we could keep the output as sharded.
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ node_type: NodeType,
103
+ strategy_list: List[PlacementStrategy],
104
+ reduction_over_batch: bool = False,
105
+ ):
106
+ super().__init__(strategy_list)
107
+ self.node_type = node_type
108
+ self.reduction_over_batch = reduction_over_batch
109
+
110
+ def __str__(self) -> str:
111
+ return f"type: {self.node_type}, {super().__str__()}"
112
+
113
+
114
+ @contextmanager
115
+ def gradients_tagging(params: Dict[str, torch.Tensor]):
116
+ """Tag the gradient of the parameters with a special tag, so that we can identify them during SPMD expansion.
117
+
118
+ It's safe to trace those hooks and we would remove those nodes later.
119
+ """
120
+ tagging_hooks = []
121
+ try:
122
+ for p in params.values():
123
+ h = p.register_hook(torch.ops._spmd.tag_grad)
124
+ tagging_hooks.append(h)
125
+ yield
126
+ finally:
127
+ # remove those hooks after tracing
128
+ for h in tagging_hooks:
129
+ h.remove()
130
+
131
+
132
+ def _gen_shard_strategy(
133
+ mesh: DeviceMesh, shard_dim: int, input_specs: Optional[List[DTensorSpec]] = None
134
+ ) -> PlacementStrategy:
135
+ """Util function to generate a shard strategy on shard_dim."""
136
+ return PlacementStrategy(
137
+ output_specs=DTensorSpec(mesh=mesh, placements=(Shard(shard_dim),)),
138
+ input_specs=input_specs,
139
+ )
140
+
141
+
142
+ def _gen_replicate_strategy(
143
+ mesh: DeviceMesh, input_specs: Optional[List[DTensorSpec]] = None
144
+ ) -> PlacementStrategy:
145
+ """Util function to generate a replicate strategy."""
146
+ return PlacementStrategy(
147
+ output_specs=DTensorSpec(mesh=mesh, placements=(Replicate(),)),
148
+ input_specs=input_specs,
149
+ )
150
+
151
+
152
+ def _gen_partial_strategy(mesh: DeviceMesh) -> PlacementStrategy:
153
+ """Util function to generate a partial strategy."""
154
+ # NOTE: we use AVG by default, avg reduction is needed depending on
155
+ # the loss function, for most loss function it should do
156
+ # gradient averaging. There might be certain cases it should
157
+ # not do gradient averaging (i.e. sum) but it's pretty rare.
158
+ # TODO: Only NCCL supports AVG so using backend like Gloo would
159
+ # crash, we should figure out a way to support avg reduction
160
+ # for non-NCCL backend
161
+ reduce_op = c10d.ReduceOp.AVG # type: ignore[attr-defined]
162
+ return PlacementStrategy(
163
+ output_specs=DTensorSpec(mesh=mesh, placements=(_Partial(reduce_op),)),
164
+ )
165
+
166
+
167
+ def build_data_parallel_strategies(
168
+ train_step_graph: GraphModule,
169
+ num_params: int,
170
+ num_states: int,
171
+ mesh: DeviceMesh,
172
+ batch_dim: int = 0,
173
+ ) -> Dict[fx.Node, StrategyType]:
174
+ """Loop through the train step graph and build the data parallel strategy for each fx Node."""
175
+ activation_idx = num_params + num_states
176
+ non_compute_ops = [
177
+ aten.clone.default,
178
+ aten.detach.default,
179
+ aten.ones_like.default,
180
+ aten.reshape.default,
181
+ aten.t.default,
182
+ aten.view.default,
183
+ torch.ops._spmd.tag_grad.default,
184
+ operator.getitem,
185
+ ]
186
+
187
+ tuple_strategy_ops = [aten._fused_adam.default]
188
+
189
+ dp_strategy_map: Dict[fx.Node, StrategyType] = {}
190
+ batch_dim_analyzer = BatchDimAnalyzer(batch_dim)
191
+ placeholder_idx = 0
192
+ num_param_grad = 0
193
+
194
+ # first we backward propagate to mark the param gradients sharding
195
+ # with tag_grad node helps and then delete the tag_grad nodes
196
+ for node in reversed(list(train_step_graph.graph.nodes)):
197
+ # find a param_grad node via the tagging
198
+ if node.target == torch.ops._spmd.tag_grad.default:
199
+ cur_node = node
200
+ while cur_node.target in non_compute_ops:
201
+ cur_node = cur_node.args[0]
202
+ partial_strategy = _gen_partial_strategy(mesh)
203
+ dp_strategy_map[cur_node] = DataParallelStrategy(
204
+ NodeType.GRAD, [partial_strategy]
205
+ )
206
+ num_param_grad += 1
207
+ # remove the tag_grad node from graph
208
+ node.replace_all_uses_with(node.args[0])
209
+ train_step_graph.graph.erase_node(node)
210
+
211
+ if num_param_grad == num_params:
212
+ # early break if we have already processed all param_grads
213
+ break
214
+
215
+ # next we forward propagate to mark all the sharding
216
+ for node in train_step_graph.graph.nodes:
217
+ if node.op == "placeholder":
218
+ if "val" not in node.meta:
219
+ # NOTE: There're certain cases where the placeholder nodes do
220
+ # not have real tensor values:
221
+ # 1. optimizer states can be None sometimes, i.e. SGD with
222
+ # no momentum, optimizer states populate `momentum` state
223
+ # as None, the full graph we get from `compile` would have
224
+ # None as the placeholder value
225
+ # 2. function args might not only contain params or activations,
226
+ # but also contain other non-tensor inputs, i.e. the model
227
+ # and optimizer instances baked in as a placeholder, there might
228
+ # also be some scalar argument which is not a tensor
229
+ #
230
+ # For the above cases, we create a NON_TENSOR stratgy so that we
231
+ # know it's not a tensor and we don't need to shard it
232
+ dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])
233
+
234
+ elif placeholder_idx < num_params:
235
+ # during compilation there's an assumption that the first num_params
236
+ # placeholders should be parameters
237
+ shard_strategy = _gen_shard_strategy(mesh, 0)
238
+ replica_strategy = _gen_replicate_strategy(mesh)
239
+ dp_strategy_map[node] = DataParallelStrategy(
240
+ NodeType.PARAM, [replica_strategy, shard_strategy]
241
+ )
242
+
243
+ elif placeholder_idx < activation_idx:
244
+ # optimizer states follow the same strategy as
245
+ # the corresponding parameters
246
+ replica_strategy = _gen_replicate_strategy(mesh)
247
+ shard_strategy = _gen_shard_strategy(mesh, 0)
248
+
249
+ dp_strategy_map[node] = DataParallelStrategy(
250
+ NodeType.STATE, [replica_strategy, shard_strategy]
251
+ )
252
+ else:
253
+ activation_batch_dim_size = node.meta["val"].shape[batch_dim]
254
+ # find the first activation node and use its batch dim size
255
+ if batch_dim_analyzer.batch_dim_size == -1:
256
+ batch_dim_analyzer.init_batch_dim_size(activation_batch_dim_size)
257
+
258
+ batch_dim_analyzer.set_batch_dim(node, batch_dim)
259
+ shard_strategy = _gen_shard_strategy(mesh, batch_dim)
260
+ dp_strategy_map[node] = DataParallelStrategy(
261
+ NodeType.ACT, [shard_strategy]
262
+ )
263
+ placeholder_idx += 1
264
+ elif node.op == "call_function":
265
+ # Annotate node types for the computation graph
266
+ # Data Parallel node propagation logic:
267
+ # param (non-compute) -> out: param
268
+ # grad (non-compute before/after) -> out: grad
269
+ # state -> output: state
270
+ #
271
+ # param + activation (param must be replicate, act be sharded) -> out: activation
272
+ # param/state + grad (param/state/grad be the same spec) -> out: param/state
273
+ # param + state -> out: param
274
+
275
+ if node.target in non_compute_ops:
276
+ # At this point, we should have removed all the `tag_grad` nodes in the graph
277
+ assert node.target != torch.ops._spmd.tag_grad.default
278
+
279
+ input_nodes = node.all_input_nodes
280
+ assert (
281
+ len(input_nodes) == 1
282
+ ), f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}"
283
+ arg_strategy = dp_strategy_map[input_nodes[0]]
284
+
285
+ if node.target == operator.getitem:
286
+ # for getitem call, just forward the strategy from the input
287
+ getitem_idx = node.args[1]
288
+ if isinstance(arg_strategy, TupleStrategy):
289
+ # for tuple strategy, we need to get the child strategy from the tuple
290
+ dp_strategy_map[node] = arg_strategy.childs[getitem_idx]
291
+ else:
292
+ # if it's not a tuple strategy, we just forward the arg strategy
293
+ dp_strategy_map[node] = arg_strategy
294
+ else:
295
+ assert isinstance(arg_strategy, DataParallelStrategy)
296
+ arg_node_type = arg_strategy.node_type
297
+ if arg_node_type == NodeType.PARAM:
298
+ replica_strategy = _gen_replicate_strategy(mesh)
299
+ dp_strategy_map[node] = DataParallelStrategy(
300
+ NodeType.PARAM, [replica_strategy]
301
+ )
302
+ elif arg_node_type == NodeType.GRAD:
303
+ partial_sig = _gen_partial_strategy(mesh)
304
+ dp_strategy_map[node] = DataParallelStrategy(
305
+ NodeType.GRAD, [partial_sig]
306
+ )
307
+ elif arg_node_type == NodeType.ACT:
308
+ arg_node_spec = batch_dim_analyzer.compute_act_spec(
309
+ input_nodes[0], mesh
310
+ )
311
+
312
+ output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)
313
+
314
+ shard_strategy = PlacementStrategy(
315
+ output_specs=output_spec, input_specs=[arg_node_spec]
316
+ )
317
+ dp_strategy_map[node] = DataParallelStrategy(
318
+ NodeType.ACT, [shard_strategy]
319
+ )
320
+ else:
321
+ raise RuntimeError(
322
+ f"non compute op not supporting {arg_node_type}! "
323
+ )
324
+
325
+ # finished processing this non-compute node
326
+ continue
327
+
328
+ # for computatation nodes, we need to check all the inputs
329
+ input_args = node.all_input_nodes
330
+ input_specs = []
331
+ if node in dp_strategy_map:
332
+ # found a param_grad node that already have output pre-filled spec
333
+ # fill in the expected input specs for the pre-filled strategy
334
+ node_strategy = dp_strategy_map[node]
335
+ assert isinstance(node_strategy, DataParallelStrategy)
336
+ node_type = node_strategy.node_type
337
+ assert node_type == NodeType.GRAD
338
+ produce_param_grad_strat = node_strategy.strategies
339
+ has_activation = False
340
+ for arg in input_args:
341
+ arg_strategy = dp_strategy_map[arg]
342
+ assert isinstance(arg_strategy, DataParallelStrategy)
343
+ arg_node_type = arg_strategy.node_type
344
+ if arg_node_type == NodeType.ACT:
345
+ # activation sharded
346
+ has_activation = True
347
+ act_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)
348
+
349
+ input_specs.append(act_spec)
350
+
351
+ if has_activation:
352
+ assert len(produce_param_grad_strat) == 1
353
+ produce_param_grad_strat[0].input_specs = input_specs
354
+ elif node.target in tuple_strategy_ops:
355
+ # ops that need to build tuple strategy instead of normal strategy
356
+ # This should happen rarely and only needed when we need to generate
357
+ # different node strategy for multiple outputs (i.e. fused_adam op)
358
+ # TODO: Currently this specializes to fused optimizer ops, but we need
359
+ # to see how to generalize this strategy building logic
360
+ output_strategy_len = len(node.args) - 1
361
+ tuple_strategies = []
362
+ for i in range(output_strategy_len):
363
+ if not isinstance(node.args[i], list):
364
+ raise RuntimeError(
365
+ f"Expecting list as arg to build Tuple Strategy, but found type {type(node.args[i])}!"
366
+ )
367
+ # for list/tuple arg, use the first one to find out the node type
368
+ if len(node.args[i]) > 0:
369
+ arg_strategy = dp_strategy_map[node.args[i][0]]
370
+ assert isinstance(arg_strategy, DataParallelStrategy)
371
+ assert arg_strategy.node_type in [
372
+ NodeType.PARAM,
373
+ NodeType.GRAD,
374
+ NodeType.STATE,
375
+ ], "Expecting param/grad/state as arg to build Tuple Strategy!"
376
+ replica_strategy = _gen_replicate_strategy(mesh)
377
+ shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)
378
+ out_node_strategy: StrategyType = DataParallelStrategy(
379
+ arg_strategy.node_type, [replica_strategy, shard_strategy]
380
+ )
381
+
382
+ tuple_strategies.append(out_node_strategy)
383
+
384
+ output_tuple_strategy = TupleStrategy(tuple(tuple_strategies))
385
+ dp_strategy_map[node] = output_tuple_strategy
386
+ else:
387
+ # NOTE: This is the common region for all regular computation ops
388
+
389
+ input_node_types = [
390
+ cast(DataParallelStrategy, dp_strategy_map[arg]).node_type
391
+ for arg in input_args
392
+ if isinstance(dp_strategy_map[arg], DataParallelStrategy)
393
+ ]
394
+ if NodeType.GRAD in input_node_types:
395
+ # param/state + grad, build up acceptable strategy
396
+ # the strategy should be the same for all the inputs/outputs
397
+ # TODO: optimizer parts should follow the dtensor prop logic
398
+ # to support more general cases that allows optimizer states
399
+ # to have different shardings compare to the params
400
+ replica_strategy = _gen_replicate_strategy(mesh)
401
+ shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)
402
+ output_node_type = NodeType.PARAM
403
+
404
+ non_grad_types = [t for t in input_node_types if t != NodeType.GRAD]
405
+
406
+ output_node_type = non_grad_types[0]
407
+ for non_grad_type in non_grad_types:
408
+ assert (
409
+ non_grad_type == output_node_type
410
+ ), f"Found more than one non grad types! Expect {output_node_type} but found {non_grad_type}!"
411
+ assert output_node_type in [
412
+ NodeType.PARAM,
413
+ NodeType.STATE,
414
+ ], f"Expecting output node type to be either state or param, but found {output_node_type}!"
415
+
416
+ dp_strategy_map[node] = DataParallelStrategy(
417
+ output_node_type, [replica_strategy, shard_strategy]
418
+ )
419
+ elif NodeType.STATE in input_node_types:
420
+ # either param + state or state + state
421
+ replica_strategy = _gen_replicate_strategy(mesh)
422
+ shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)
423
+ output_node_type = (
424
+ NodeType.PARAM
425
+ if NodeType.PARAM in input_node_types
426
+ else NodeType.STATE
427
+ )
428
+
429
+ dp_strategy_map[node] = DataParallelStrategy(
430
+ output_node_type, [replica_strategy, shard_strategy]
431
+ )
432
+ elif NodeType.PARAM in input_node_types:
433
+ if NodeType.ACT in input_node_types:
434
+ # param + activation, build up acceptable strategy
435
+ # param must be replicated, activation must be sharded
436
+ for arg in input_args:
437
+ arg_strategy = dp_strategy_map[arg]
438
+ assert isinstance(arg_strategy, DataParallelStrategy)
439
+ node_type = arg_strategy.node_type
440
+ if node_type == NodeType.ACT:
441
+ # compute activation spec
442
+ act_spec = batch_dim_analyzer.compute_act_spec(
443
+ arg, mesh
444
+ )
445
+
446
+ input_specs.append(act_spec)
447
+ elif node_type == NodeType.PARAM:
448
+ # param must be replicated
449
+ input_specs.append(
450
+ DTensorSpec(mesh=mesh, placements=(Replicate(),))
451
+ )
452
+ else:
453
+ raise RuntimeError(
454
+ f"Expecting node with parameter and activation, but found {input_node_types}! "
455
+ )
456
+ # produce activation type sharding for output
457
+ output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)
458
+
459
+ act_strategy = PlacementStrategy(
460
+ output_specs=output_spec, input_specs=input_specs
461
+ )
462
+
463
+ dp_strategy_map[node] = DataParallelStrategy(
464
+ NodeType.ACT, [act_strategy]
465
+ )
466
+ else:
467
+ # If inputs only have parameters, the
468
+ # strategy of this node should follow input
469
+ dp_strategy_map[node] = dp_strategy_map[input_args[0]]
470
+ else:
471
+ # If input nodes does not have PARAM/GRAD/STATE, then
472
+ # it should be a pure activation computation, it should
473
+ # produce activation output.
474
+ # Activations are usually sharded unless model creates
475
+ # new tensors during computation, which depend on whether
476
+ # the new tensor associate with a batch dim or not, it could
477
+ # be shard/replicate/partial, batch dim analyzer should tell
478
+ # us the correct sharding.
479
+ for arg in input_args:
480
+ arg_strategy = dp_strategy_map[arg]
481
+ assert isinstance(arg_strategy, DataParallelStrategy)
482
+ input_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)
483
+
484
+ input_specs.append(input_spec)
485
+
486
+ act_spec = batch_dim_analyzer.compute_act_spec(node, mesh)
487
+ op_strategy = PlacementStrategy(
488
+ output_specs=act_spec, input_specs=input_specs
489
+ )
490
+ dp_strategy_map[node] = DataParallelStrategy(
491
+ NodeType.ACT, [op_strategy]
492
+ )
493
+
494
+ elif node.op == "output":
495
+ dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])
496
+ else:
497
+ raise RuntimeError(f"op code {node.op} not supported")
498
+
499
+ return dp_strategy_map # type: ignore[return-value]
500
+
501
+
502
+ def mark_data_parallel_shardings(
503
+ train_step_graph: GraphModule,
504
+ num_parameters: int,
505
+ num_states: int,
506
+ dp_strategy_map: Dict[fx.Node, StrategyType],
507
+ parallel_mode: DataParallelStyle = DataParallelStyle.FULLY_SHARD,
508
+ ) -> None:
509
+ """Mark the sharding for the nodes in the train_step_graph."""
510
+ activation_idx = num_parameters + num_states
511
+ placeholder_idx = 0
512
+ for node in train_step_graph.graph.nodes:
513
+ node_strategy = dp_strategy_map[node]
514
+ if node.op == "placeholder":
515
+ assert isinstance(node_strategy, DataParallelStrategy)
516
+ node_type = node_strategy.node_type
517
+ node_strategies = node_strategy.strategies
518
+ if node_type == NodeType.NON_TENSOR:
519
+ # set node sharding to None
520
+ node_sharding = None
521
+ elif placeholder_idx < activation_idx:
522
+ assert len(node_strategies) > 0, "node_strategies should not be empty"
523
+ if parallel_mode == DataParallelStyle.REPLICATE:
524
+ # set to replicate for replicate style
525
+ node_sharding = node_strategies[0]
526
+ elif parallel_mode == DataParallelStyle.FULLY_SHARD:
527
+ # set to shard for fully shard style
528
+ if len(node_strategies) == 1:
529
+ # only one strategy, use that instead
530
+ # i.e. optimizer state steps can only be replicate
531
+ node_sharding = node_strategies[0]
532
+ else:
533
+ # use the full sharding strategy
534
+ node_sharding = node_strategies[1]
535
+ elif parallel_mode == DataParallelStyle.DEFAULT:
536
+ # TODO: add support for default mode
537
+ # default mode would generate either replicate or shard
538
+ raise NotImplementedError("default mode not implemented")
539
+ else:
540
+ assert len(node_strategies) > 0, "node_strategies should not be empty"
541
+ # mark activation as sharded on batch dim
542
+ node_sharding = node_strategies[0]
543
+
544
+ node.meta["sharding"] = node_sharding # type: ignore[possibly-undefined]
545
+
546
+ placeholder_idx += 1
547
+ elif node.op == "call_function":
548
+ if isinstance(node_strategy, TupleStrategy):
549
+ # For tuple strategy in the data parallel mode, it should have the same strategy
550
+ # for all tuple elements, assert that then use the first element's strategy as sharding
551
+ first_strategy = cast(DataParallelStrategy, node_strategy.childs[0])
552
+ for child_strategy in node_strategy.childs:
553
+ assert isinstance(child_strategy, DataParallelStrategy)
554
+ assert child_strategy.strategies == first_strategy.strategies
555
+
556
+ node_strategies = first_strategy.strategies
557
+ else:
558
+ assert isinstance(node_strategy, DataParallelStrategy)
559
+ node_strategies = node_strategy.strategies
560
+
561
+ assert (
562
+ len(node_strategies) <= 2
563
+ ), "data parallel should have at most 2 strategies"
564
+ if len(node_strategies) == 1:
565
+ node.meta["sharding"] = node_strategies[0]
566
+ elif len(node_strategies) == 2:
567
+ if parallel_mode == DataParallelStyle.REPLICATE:
568
+ # set to replicate for replicate style
569
+ node.meta["sharding"] = node_strategies[0]
570
+ elif parallel_mode == DataParallelStyle.FULLY_SHARD:
571
+ # set to shard for fully shard style
572
+ node.meta["sharding"] = node_strategies[1]
573
+ else:
574
+ raise RuntimeError("default mode not supported yet!")
575
+ else:
576
+ raise RuntimeError(
577
+ f"node {node} strategy length {len(node_strategies)} is not expected!"
578
+ )
579
+ elif node.op == "output":
580
+ assert (
581
+ isinstance(node_strategy, DataParallelStrategy)
582
+ and node_strategy.node_type == NodeType.NON_TENSOR
583
+ ), "output node should not be tensor"
584
+ node.meta["sharding"] = None
585
+ else:
586
+ raise RuntimeError(f"op code {node.op} not supported")
587
+
588
+
589
+ def _partition_val(val: Any, spec: DTensorSpec) -> Any:
590
+ """Util function to convert a full tensor val to its local component."""
591
+ if isinstance(val, torch.Tensor):
592
+ local_shard = val
593
+ if val.ndim == 0:
594
+ # If it's already a scalar tensor, it is already local, we don't
595
+ # need to do anything
596
+ return local_shard
597
+
598
+ for idx, placement in enumerate(spec.placements):
599
+ if placement.is_shard():
600
+ placement = cast(Shard, placement)
601
+ num_chunks = spec.mesh.size(mesh_dim=idx)
602
+ my_coord = spec.mesh.get_coordinate()
603
+ assert my_coord is not None, "current rank not in mesh!"
604
+ my_coord_on_mesh_dim = my_coord[idx]
605
+ local_shard = placement._split_tensor(
606
+ local_shard, num_chunks, with_padding=False, contiguous=False
607
+ )[0][my_coord_on_mesh_dim]
608
+ return local_shard
609
+ elif isinstance(val, (tuple, list)):
610
+ return val.__class__(_partition_val(v, spec) for v in val)
611
+ else:
612
+ raise RuntimeError(f"val type {type(val)} not supported")
613
+
614
+
615
+ def partitioner(graph: GraphModule) -> GraphModule:
616
+ """Graph partitioner that partitions the single device graph to distributed graph."""
617
+ shape_adjustment_ops = {
618
+ aten._unsafe_view.default: 1,
619
+ aten.expand.default: 1,
620
+ aten.new_zeros.default: 1,
621
+ aten.ones.default: 0,
622
+ aten.reshape.default: 1,
623
+ aten.view.default: 1,
624
+ aten.zeros.default: 0,
625
+ }
626
+ # partition the graph to distributed
627
+ for node in graph.graph.nodes:
628
+ node_sharding = node.meta["sharding"]
629
+ # None sharding means this node don't need sharding
630
+ if node_sharding is None:
631
+ continue
632
+
633
+ if node.op == "placeholder":
634
+ out_spec = node_sharding.output_spec
635
+ if not hasattr(out_spec, "from_local"):
636
+ local_val = _partition_val(node.meta["val"], out_spec)
637
+ # update node value
638
+ node.meta["val"] = local_val
639
+ elif node.op == "call_function":
640
+ out_spec = node_sharding.output_spec
641
+
642
+ # check if there's misaligned sharding, insert reshard if there is
643
+ expected_input_specs = node_sharding.input_specs
644
+ for idx, input_arg in enumerate(node.all_input_nodes):
645
+ input_arg_sharding = input_arg.meta["sharding"]
646
+
647
+ input_arg_spec = input_arg_sharding.output_spec
648
+ desired_spec = (
649
+ out_spec
650
+ if expected_input_specs is None
651
+ else expected_input_specs[idx]
652
+ )
653
+ if input_arg_spec != desired_spec:
654
+ input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"]
655
+ desired_spec.tensor_meta = input_arg.meta["tensor_meta"]
656
+ input_arg_tensor = input_arg.meta["val"]
657
+
658
+ # insert reshard operation
659
+ def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:
660
+ return redistribute_local_tensor(
661
+ local_tensor,
662
+ input_arg_spec,
663
+ desired_spec,
664
+ )
665
+
666
+ reshard_gm = make_fx(reshard_fn)(input_arg_tensor)
667
+ reshard_gm_nodes = list(reshard_gm.graph.nodes)
668
+ input_node = reshard_gm_nodes[0]
669
+ with graph.graph.inserting_before(node):
670
+ output_node = graph.graph.graph_copy(
671
+ reshard_gm.graph,
672
+ val_map={
673
+ input_node: input_arg,
674
+ },
675
+ )
676
+ node.replace_input_with(input_arg, output_node)
677
+
678
+ output_val = node.meta["val"]
679
+
680
+ if node.target == torch.ops.aten.repeat.default:
681
+ # for repeat op, we need to infer the repeat sizes
682
+ assert isinstance(output_val, torch.Tensor)
683
+ local_shape = compute_local_shape(
684
+ output_val.shape, out_spec.mesh, out_spec.placements
685
+ )
686
+ input_shape = node.args[0].meta["val"].shape
687
+
688
+ def infer_repeat_sizes(repeated_shape, input_shape):
689
+ repeated_size = [1] * len(repeated_shape)
690
+ padded_length = len(repeated_shape) - len(input_shape)
691
+ for i in range(len(repeated_shape)):
692
+ if i < padded_length:
693
+ repeated_size[i] = repeated_shape[i]
694
+ else:
695
+ repeated_size[i] = (
696
+ repeated_shape[i] // input_shape[i - padded_length]
697
+ )
698
+
699
+ return repeated_size
700
+
701
+ node.update_arg(1, infer_repeat_sizes(local_shape, input_shape))
702
+
703
+ elif node.target in shape_adjustment_ops:
704
+ # for view related op that needs shape, adjust shape to local shape if needed
705
+ assert isinstance(output_val, torch.Tensor)
706
+ local_shape = compute_local_shape(
707
+ output_val.shape, out_spec.mesh, out_spec.placements
708
+ )
709
+ shape_arg_num = shape_adjustment_ops[node.target]
710
+ node.update_arg(shape_arg_num, local_shape)
711
+
712
+ # convert output val to its local component
713
+ node.meta["val"] = _partition_val(output_val, out_spec)
714
+
715
+ elif node.op == "output":
716
+ break
717
+ else:
718
+ raise RuntimeError(f"op code {node} not supported")
719
+
720
+ # clean up the graph by removing sharding and partitioning related metadata
721
+ for node in graph.graph.nodes:
722
+ if "sharding" in node.meta:
723
+ del node.meta["sharding"]
724
+ if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor):
725
+ local_tensor_meta = _extract_tensor_metadata(node.meta["val"])
726
+ node.meta["tensor_meta"] = local_tensor_meta
727
+
728
+ graph.graph.lint()
729
+ graph.recompile()
730
+ return graph
731
+
732
+
733
+ def partition_data_parallel(
734
+ graph: GraphModule,
735
+ model: nn.Module,
736
+ optimizer: Optional[torch.optim.Optimizer],
737
+ params_buffers: Dict[str, torch.Tensor],
738
+ named_states: Dict[str, Any],
739
+ args: Tuple[Any, ...],
740
+ kwargs: Dict[str, Any],
741
+ mesh: DeviceMesh,
742
+ parallel_style: DataParallelStyle,
743
+ input_batch_dim: int,
744
+ ) -> GraphModule:
745
+ """Partition the graph to into a data parallel graph.
746
+
747
+ This function also shards/replicates the model parameters and optimizer states to DTensors.
748
+ """
749
+ num_params_buffers = len(params_buffers)
750
+ flattened_states = pytree.tree_leaves(named_states)
751
+ num_states = len(flattened_states)
752
+
753
+ changed = graph.graph.eliminate_dead_code()
754
+ if changed:
755
+ graph.recompile()
756
+
757
+ # 1. First build up data parallel strategies for the whole graph
758
+ strategy_map = build_data_parallel_strategies(
759
+ graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim
760
+ )
761
+
762
+ # 2. Next we mark the data parallel strategy for each node base on
763
+ # the parallel_style
764
+ mark_data_parallel_shardings(
765
+ graph,
766
+ num_parameters=num_params_buffers,
767
+ num_states=num_states,
768
+ dp_strategy_map=strategy_map,
769
+ parallel_mode=parallel_style,
770
+ )
771
+
772
+ # 3. Partition the single machine graph to the distribute graph
773
+ partitioned_graph = partitioner(graph)
774
+
775
+ # preserve node types for the expanded graph
776
+ for node in partitioned_graph.graph.nodes:
777
+ if node in strategy_map:
778
+ node_strategy = strategy_map[node]
779
+ if isinstance(node_strategy, DataParallelStrategy):
780
+ node.meta["node_type"] = node_strategy.node_type
781
+ elif isinstance(node_strategy, TupleStrategy):
782
+ node.meta["node_type"] = NodeType.NON_TENSOR
783
+ else:
784
+ raise RuntimeError(f"Unknown node strategy {node_strategy}")
785
+ else:
786
+ # if the nodes are expanded nodes (collectives), we mark them
787
+ # the same type as the input node.
788
+ input_node = node.all_input_nodes[0]
789
+ node.meta["node_type"] = input_node.meta["node_type"]
790
+
791
+ # 4. Last, inplace partition the weights and optim states to
792
+ # DTensors base on the parallel style
793
+ accessor = NamedMemberAccessor(model)
794
+ for param_key, param in params_buffers.items():
795
+ placement: Placement = Replicate()
796
+ if parallel_style == DataParallelStyle.FULLY_SHARD:
797
+ placement = Shard(0)
798
+ elif parallel_style != DataParallelStyle.REPLICATE:
799
+ raise RuntimeError(f"parallel style {parallel_style} not supported yet")
800
+
801
+ dtensor_param = distribute_tensor(param, mesh, [placement])
802
+ # update re-parameterized module param dict and optim states dict to DTensor
803
+ params_buffers[param_key] = dtensor_param.to_local()
804
+ # update module parameters to DTensor
805
+ accessor.set_tensor(param_key, dtensor_param)
806
+
807
+ # update the optimizer state key and values to DTensor
808
+ if optimizer is not None and param in optimizer.state:
809
+ param_states = named_states[param_key]
810
+ param_dtensor_states = {}
811
+ for state_key, state_val in param_states.items():
812
+ if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:
813
+ # shard/replicate non-scalar tensors, for scalar tensor, we
814
+ # don't do anything
815
+ dtensor_state = distribute_tensor(state_val, mesh, [placement])
816
+ param_dtensor_states[state_key] = dtensor_state
817
+ param_states[state_key] = dtensor_state.to_local()
818
+ else:
819
+ param_dtensor_states[state_key] = state_val
820
+
821
+ optimizer.state.pop(param) # type: ignore[call-overload]
822
+ optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]
823
+
824
+ return partitioned_graph
venv/lib/python3.10/site-packages/torch/distributed/_spmd/distribute.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import operator
3
+ from dataclasses import dataclass
4
+ from enum import auto, Enum
5
+ from functools import partial
6
+ from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Union
7
+
8
+ import torch
9
+ import torch.distributed._spmd.experimental_ops
10
+ import torch.fx as fx
11
+
12
+ from torch.distributed._spmd.comm_tensor import _get_tracer
13
+ from torch.distributed._spmd.graph_utils import OP
14
+ from torch.distributed._spmd.log_utils import get_logger
15
+
16
+ from torch.distributed._tensor import DeviceMesh, DTensor
17
+ from torch.distributed._tensor.op_schema import OpSchema
18
+ from torch.distributed._tensor.placement_types import (
19
+ _Partial,
20
+ DTensorSpec,
21
+ Placement,
22
+ Replicate,
23
+ Shard,
24
+ TensorMeta,
25
+ )
26
+ from torch.distributed._tensor.redistribute import redistribute_local_tensor
27
+ from torch.fx.experimental.proxy_tensor import make_fx, proxy_slot
28
+ from torch.utils import _pytree as pytree
29
+ from torch.utils._pytree import tree_flatten, tree_map, tree_map_only, tree_unflatten
30
+
31
+
32
+ logger: Optional[logging.Logger] = None
33
+
34
+ aten = torch.ops.aten
35
+
36
+
37
+ class TrainingPhase(Enum):
38
+ FORWARD = auto()
39
+ BACKWARD = auto()
40
+
41
+
42
+ @dataclass
43
+ class Schema:
44
+ mesh: DeviceMesh
45
+ placements: List[Placement]
46
+
47
+
48
+ @dataclass
49
+ class DSymInt:
50
+ """DSymInt represents a value retrieved by a SymInt op from a DTensor.
51
+
52
+ DSymInt helps View and Factory ops to determine the placement and shape of the
53
+ output tensor, as those operators either do not have an input DTensor or
54
+ the input DTensor is insufficient to determine the output tensor's placement.
55
+ """
56
+
57
+ global_value: int # value that the SymInt evaluates to
58
+ local_value: int # vaue that this SymInt evaluates to on the local shard
59
+ mesh: DeviceMesh # device mesh of the DTensor where this SymInt is retrieved from
60
+
61
+ def is_shard(self) -> bool:
62
+ return self.local_value != self.global_value
63
+
64
+ @classmethod
65
+ def from_node(cls, node: fx.Node, dtensor: DTensor) -> "DSymInt":
66
+ dim: int = 0
67
+ if node.target == aten.sym_size:
68
+ dim = cast(int, node.args[1])
69
+ return cls(
70
+ global_value=dtensor.size(dim),
71
+ local_value=dtensor.to_local().size(dim),
72
+ mesh=dtensor.device_mesh,
73
+ )
74
+ elif node.target == aten.sym_numel:
75
+ return cls(
76
+ global_value=dtensor.numel(),
77
+ local_value=dtensor.to_local().numel(),
78
+ mesh=dtensor.device_mesh,
79
+ )
80
+ elif node.target == aten.sym_stride:
81
+ dim = cast(int, node.args[1])
82
+ return cls(
83
+ global_value=dtensor.stride(dim),
84
+ local_value=dtensor.to_local().stride(dim),
85
+ mesh=dtensor.device_mesh,
86
+ )
87
+ else:
88
+ raise NotImplementedError(f"DSymInt does not support {node.target}")
89
+
90
+
91
+ def _is_partial_dtensor(obj: Any) -> bool:
92
+ """Check if object is 1) DTensor and 2) with any placement of _Partial."""
93
+ if not isinstance(obj, DTensor):
94
+ return False
95
+
96
+ is_partial = False
97
+ for placement in obj.placements:
98
+ if isinstance(placement, _Partial):
99
+ is_partial = True
100
+ break
101
+
102
+ return is_partial
103
+
104
+
105
+ def _dispatch_with_local_tensors(
106
+ op: torch._ops.OpOverload,
107
+ local_args: Tuple[Any, ...],
108
+ kwargs: Optional[Dict[str, Any]] = None,
109
+ specs: Optional[
110
+ Dict[
111
+ torch.Tensor,
112
+ Tuple[torch.Size, DeviceMesh, Sequence[Placement], Sequence[Placement]],
113
+ ]
114
+ ] = None,
115
+ ) -> Any:
116
+ if kwargs is None:
117
+ kwargs = {}
118
+ if specs is None:
119
+ specs = {}
120
+
121
+ def redistribute(arg: Any) -> Any:
122
+ tensor_shape, mesh, current_placement, target_placement = specs[arg]
123
+ tensor_meta = TensorMeta(
124
+ tensor_shape,
125
+ stride=arg.stride(),
126
+ dtype=arg.dtype,
127
+ )
128
+ current_spec = DTensorSpec(
129
+ mesh, tuple(current_placement), tensor_meta=tensor_meta
130
+ )
131
+ target_spec = DTensorSpec(
132
+ mesh, tuple(target_placement), tensor_meta=tensor_meta
133
+ )
134
+
135
+ return (
136
+ redistribute_local_tensor(arg, current_spec, target_spec) # type: ignore[index]
137
+ if isinstance(arg, torch.Tensor) and arg in specs # type: ignore[operator]
138
+ else arg
139
+ )
140
+
141
+ # TODO: this is broken because it won't redistributed potential tensors on the kwargs
142
+ return op(*tree_map(redistribute, local_args), **kwargs)
143
+
144
+
145
+ # Figure out how to specify a type spec for the return specs value
146
+ # without the entire structure.
147
+ # pyre-fixme
148
+ def _update_specs_for_redistribute(args, target_schema, redistribute):
149
+ # Code adapted from pack_args_kwargs_with_local_tensor
150
+ flatten_args, args_tree_spec = tree_flatten(args)
151
+ flatten_args_schema = pytree.tree_leaves(target_schema.args_schema)
152
+
153
+ specs: Dict[
154
+ torch.Tensor,
155
+ Tuple[
156
+ torch.Size,
157
+ DeviceMesh,
158
+ Sequence[Placement],
159
+ Sequence[Placement],
160
+ ],
161
+ ] = {}
162
+ for i, arg in enumerate(flatten_args):
163
+ if isinstance(arg, DTensor):
164
+ if redistribute:
165
+ specs[arg._local_tensor] = (
166
+ arg.size(),
167
+ flatten_args_schema[i].mesh,
168
+ arg.placements,
169
+ flatten_args_schema[i].placements,
170
+ )
171
+ flatten_args_schema[i] = arg._local_tensor
172
+
173
+ unflattened_args = tree_unflatten(flatten_args_schema, args_tree_spec)
174
+ return specs, unflattened_args
175
+
176
+
177
+ # When no tensor redistribution is required, we only need to update non-tensor args
178
+ # of the node according to op_schema and avoid building a GraphModule just for the
179
+ # node.
180
+ def _update_node_from_op_schema(node: torch.fx.Node, op_schema: OpSchema) -> None:
181
+ flat_args, args_tree_spec = tree_flatten(node.args)
182
+ flat_args_schema = pytree.tree_leaves(op_schema.args_schema)
183
+
184
+ def is_sym_int_or_int(arg: Union[int, torch.fx.Node]) -> bool:
185
+ if isinstance(arg, torch.fx.Node):
186
+ return arg.target in [
187
+ aten.sym_size,
188
+ aten.sym_numel,
189
+ aten.sym_stride,
190
+ ]
191
+ return isinstance(arg, int)
192
+
193
+ assert len(flat_args) == len(flat_args_schema)
194
+ for i, (arg, arg_schema) in enumerate(zip(flat_args, flat_args_schema)):
195
+ if is_sym_int_or_int(arg) and isinstance(arg_schema, int):
196
+ flat_args[i] = arg_schema
197
+
198
+ args = tree_unflatten(flat_args, args_tree_spec)
199
+ for idx, arg in enumerate(args):
200
+ node.update_arg(idx, arg)
201
+ return None
202
+
203
+
204
+ def _remap_arg(node_to_obj: Dict[fx.Node, Any], arg: Any) -> Any:
205
+ if isinstance(arg, torch.fx.Node):
206
+ obj = node_to_obj[arg]
207
+ if _get_tracer():
208
+ # This is a shared arg, already has a tracer from previous
209
+ # tracing. Delete the tracer.
210
+ del cast(Dict[Any, Any], obj.__dict__)[proxy_slot]
211
+ return obj
212
+ else:
213
+ return arg
214
+
215
+
216
+ def unpack_sizes_and_dims(
217
+ sizes: List[Union[DSymInt, int]], mesh: DeviceMesh
218
+ ) -> Tuple[List[int], List[Placement]]:
219
+ local_sizes: List[int] = [
220
+ s.local_value if isinstance(s, DSymInt) else s for s in sizes
221
+ ]
222
+ placements: List[Placement] = [
223
+ Shard(i)
224
+ for i, a in enumerate(sizes)
225
+ if (isinstance(a, DSymInt) and a.is_shard())
226
+ ] or [Replicate()]
227
+
228
+ assert len(placements) == mesh.ndim, (
229
+ f"The number of sharded dimensions ({len(placements)}) must "
230
+ f"match number of dimensions in device mesh ({mesh.ndim})."
231
+ )
232
+
233
+ return local_sizes, placements
234
+
235
+
236
+ def binop_sym_int_consumer_rule(node: fx.Node, args: Tuple[Any, ...]) -> DTensor:
237
+ assert len(args) == 2, f"Expect two args but got op {node.target} with args {args}"
238
+ assert isinstance(
239
+ args[0], DTensor
240
+ ), f"Expect 1st argument to be DTensor but got {args[0]}"
241
+ assert isinstance(args[1], list), f"Expect 2nd argument as list but got {args[1]}"
242
+
243
+ # extract sharded dimensions in the size list, the output DTensor should
244
+ # follow these placements.
245
+ local_sizes, placements = unpack_sizes_and_dims(args[1], args[0].device_mesh)
246
+
247
+ # set node args to real int sizes.
248
+ node.args = (node.args[0], local_sizes)
249
+ op = cast(torch._ops.OpOverload, node.target)
250
+ return DTensor.from_local(
251
+ local_tensor=op(args[0]._local_tensor, local_sizes),
252
+ device_mesh=args[0].device_mesh,
253
+ placements=placements,
254
+ run_check=False,
255
+ )
256
+
257
+
258
+ def slice_backwad_sym_int_consumer_rule(
259
+ node: fx.Node, args: Tuple[Any, ...]
260
+ ) -> DTensor:
261
+ grad_output, input_sizes, dim, start, end, step = args
262
+
263
+ local_sizes: List[int] = [
264
+ s.local_value if isinstance(s, DSymInt) else s for s in input_sizes
265
+ ]
266
+
267
+ input_tensor = torch.zeros(
268
+ local_sizes, device=grad_output.device, dtype=grad_output.dtype
269
+ )
270
+ return DTensor.from_local(
271
+ local_tensor=torch.slice_scatter(
272
+ input_tensor, grad_output.to_local(), dim, start, end, step
273
+ ),
274
+ device_mesh=grad_output.device_mesh,
275
+ placements=grad_output.placements,
276
+ run_check=False,
277
+ )
278
+
279
+
280
+ def factory_with_sizes_rule(
281
+ node: fx.Node,
282
+ args: Tuple[Any, ...],
283
+ kwargs: Dict[str, Any],
284
+ default_mesh: DeviceMesh,
285
+ ) -> DTensor:
286
+ flat_args = pytree.arg_tree_leaves(*args)
287
+ assert not any(isinstance(a, DTensor) for a in flat_args), (
288
+ f"Not expect DTensor argument for factory op, but got {node.target} "
289
+ f"with arguments {args}."
290
+ )
291
+ assert isinstance(args[0], list), f"Expect 2nd argument as list but got {args[1]}"
292
+
293
+ local_sizes, placements = unpack_sizes_and_dims(args[0], default_mesh)
294
+ node.args = (local_sizes, *args[1:])
295
+ op = cast(torch._ops.OpOverload, node.target)
296
+ return DTensor.from_local(
297
+ local_tensor=op(*node.args, **kwargs),
298
+ device_mesh=default_mesh,
299
+ placements=placements,
300
+ run_check=False,
301
+ )
302
+
303
+
304
+ def factory_arange_rule(
305
+ node: fx.Node,
306
+ args: Tuple[Any, ...],
307
+ kwargs: Dict[str, Any],
308
+ default_mesh: DeviceMesh,
309
+ ) -> DTensor:
310
+ node.args = tree_map(lambda a: a.local_value if isinstance(a, DSymInt) else a, args)
311
+ op = cast(torch._ops.OpOverload, node.target)
312
+ return DTensor.from_local(
313
+ local_tensor=op(*node.args, **kwargs),
314
+ device_mesh=default_mesh,
315
+ placements=[Replicate()],
316
+ run_check=False,
317
+ )
318
+
319
+
320
+ def default_factory_op_rule(
321
+ node: fx.Node,
322
+ args: Tuple[Any, ...],
323
+ kwargs: Dict[str, Any],
324
+ default_mesh: DeviceMesh,
325
+ ) -> DTensor:
326
+ node.args, node.kwargs = args, kwargs
327
+ op = cast(torch._ops.OpOverload, node.target)
328
+ return DTensor.from_local(
329
+ local_tensor=op(*node.args, **node.kwargs),
330
+ device_mesh=default_mesh,
331
+ placements=[Replicate()],
332
+ run_check=False,
333
+ )
334
+
335
+
336
+ # Dispatch override for view and factory ops that consume SymInt arguments,
337
+ # where the output spec should follow dimension placement where the SymInt comes
338
+ # from.
339
+ VIEW_SYM_INT_CONSUMERS: Dict[torch._ops.OpOverload, Callable] = {
340
+ aten._unsafe_view.default: binop_sym_int_consumer_rule,
341
+ aten.expand.default: binop_sym_int_consumer_rule,
342
+ aten.slice_backward.default: slice_backwad_sym_int_consumer_rule,
343
+ aten.view.default: binop_sym_int_consumer_rule,
344
+ }
345
+
346
+ FACTORY_SYM_INT_CONSUMERS: Dict[torch._ops.OpOverload, Callable] = {
347
+ aten.full.default: factory_with_sizes_rule,
348
+ aten.arange.default: factory_arange_rule,
349
+ aten.arange.start: factory_arange_rule,
350
+ }
351
+
352
+
353
+ # Dispatch override for factory ops, as DTensor cannot propogate sharding spec
354
+ # without DTensor inputs.
355
+ FACTORY_OPS: Dict[torch._ops.OpOverload, Callable] = {
356
+ aten.scalar_tensor.default: default_factory_op_rule,
357
+ aten.arange.start: default_factory_op_rule,
358
+ aten.zeros.default: default_factory_op_rule,
359
+ }
360
+
361
+
362
+ def _get_dtensor_dispatch_graph(
363
+ node: fx.Node,
364
+ node_to_obj: Dict[fx.Node, Any],
365
+ *,
366
+ force_make_fx: bool = False,
367
+ default_mesh: Optional[DeviceMesh] = None,
368
+ ) -> Optional[fx.GraphModule]:
369
+ with torch.no_grad():
370
+ # Args should be a list of objects post remapping.
371
+ args = tree_map(partial(_remap_arg, node_to_obj), node.args)
372
+ kwargs = tree_map(partial(_remap_arg, node_to_obj), node.kwargs)
373
+
374
+ op_overload = cast(torch._ops.OpOverload, node.target)
375
+
376
+ if any(
377
+ a.is_shard()
378
+ for a in pytree.arg_tree_leaves(*args)
379
+ if isinstance(a, DSymInt)
380
+ ):
381
+ if op_overload in VIEW_SYM_INT_CONSUMERS:
382
+ assert len(kwargs) == 0, f"Expect empty kwargs, but got {kwargs}"
383
+ node_to_obj[node] = VIEW_SYM_INT_CONSUMERS[op_overload](node, args)
384
+ return None
385
+ elif op_overload in FACTORY_SYM_INT_CONSUMERS:
386
+ assert default_mesh is not None, "Requires default mesh for factory ops"
387
+ node_to_obj[node] = FACTORY_SYM_INT_CONSUMERS[op_overload](
388
+ node, args, kwargs, default_mesh
389
+ )
390
+ return None
391
+ else:
392
+ assert isinstance(logger, logging.Logger)
393
+ logger.warning(
394
+ "Assuming using local_value from SymInt for %s"
395
+ "is mathematically correct. Full args are %s.",
396
+ op_overload,
397
+ args,
398
+ )
399
+
400
+ if node.target == aten.view.default:
401
+ # HACK: this is a hack to get around with the fact that some
402
+ # view operations on a "global" tensor is invalid usage
403
+ # but somehow the view operation on the batch input might hit it
404
+ # so we convert the view op to reshape before calling DTensor
405
+ op_overload = aten.reshape.default
406
+
407
+ # DSymInt args are not sharded on any dimension, local value and global
408
+ # value should be the same
409
+ args = tree_map(lambda a: a.local_value if isinstance(a, DSymInt) else a, args)
410
+ kwargs = tree_map(
411
+ lambda a: a.local_value if isinstance(a, DSymInt) else a, kwargs
412
+ )
413
+
414
+ if op_overload in FACTORY_OPS:
415
+ # Don't pass factory ops to DTensor dispatch, as DTensor cannot
416
+ # propagate sharding spec without DTensor inputs.
417
+ node_to_obj[node] = FACTORY_OPS[op_overload](
418
+ node, args, kwargs, default_mesh
419
+ )
420
+ return None
421
+
422
+ dispatch = partial(
423
+ _dispatch_with_local_tensors,
424
+ op_overload,
425
+ kwargs=kwargs,
426
+ specs=args,
427
+ )
428
+
429
+ gm = make_fx(dispatch, _allow_non_fake_inputs=False)(args)
430
+ # FIXME(@wanchaol, @mrshenli): the above seems to accidentally captured
431
+ # DeviceMesh tensor ops when handling inplace operators? The ``_to_copy`` is
432
+ # not connected to graph output. So, using DCE to get rid of it, but this
433
+ # doesn't look correct.
434
+ #
435
+ # The following operators appear in the captured graph, where the dtype is
436
+ # torch.int64.
437
+ #
438
+ # get_attr _tensor_constant0 _tensor_constant0 ()
439
+ # call_function transpose aten.transpose.int (_tensor_constant0, -1, 0)
440
+ # call_function view aten.view.default (transpose, [-1, 2])
441
+ # call_function view_1 aten.view.default (view, [2])
442
+ # call_function _to_copy aten._to_copy.default (view_1,)
443
+ gm.graph.eliminate_dead_code()
444
+
445
+ return gm
446
+
447
+
448
+ def _build_dummy_add_graph(
449
+ dt: DTensor, node_to_obj: Dict[fx.Node, Any]
450
+ ) -> Tuple[fx.GraphModule, Any]:
451
+ """Create a graph for a dummy add function from a partial DTensor.
452
+
453
+ This dummy add is used for triggering all_reduce on a Partial DTensor
454
+ during the DTensor expansion of the traced graph.
455
+ Also returns the actual DTensor after resharding.
456
+ """
457
+
458
+ def dummy_add(grad: torch.Tensor, zero: torch.Tensor) -> torch.Tensor:
459
+ return grad + zero
460
+
461
+ grad: torch.Tensor = dt._local_tensor
462
+ zero: torch.Tensor = torch.zeros_like(dt._local_tensor)
463
+
464
+ traced_add = make_fx(dummy_add)(grad, zero)
465
+
466
+ placeholders = [n for n in traced_add.graph.nodes if n.op == OP.PLACEHOLDER]
467
+ call_functions = [n for n in traced_add.graph.nodes if n.op == OP.CALL_FUNCTION]
468
+ assert len(placeholders) == 2
469
+ assert len(call_functions) == 1
470
+ node_to_obj[placeholders[0]] = dt
471
+ node_to_obj[placeholders[1]] = DTensor.from_local(
472
+ zero, dt.device_mesh, [Replicate()], run_check=False
473
+ )
474
+
475
+ traced_dispatch = _get_dtensor_dispatch_graph(
476
+ call_functions[0], node_to_obj, force_make_fx=True
477
+ )
478
+ assert traced_dispatch is not None
479
+
480
+ # TODO(anj): This depends on the call function node -> actual DTensor output
481
+ # mapping that we want to avoid for SPMD expansion
482
+ return traced_dispatch, node_to_obj[call_functions[0]]
483
+
484
+
485
+ def _convert_output(
486
+ gm: fx.GraphModule,
487
+ node: fx.Node,
488
+ node_to_obj: Dict[fx.Node, Any],
489
+ ) -> fx.Node:
490
+ new_args = []
491
+ has_partial = False
492
+ for argument in node.args[0]: # type: ignore[union-attr]
493
+ if not isinstance(argument, fx.Node):
494
+ new_args.append(argument)
495
+ continue
496
+
497
+ obj = node_to_obj[argument]
498
+
499
+ if not _is_partial_dtensor(obj):
500
+ new_args.append(argument)
501
+ continue
502
+
503
+ has_partial = True
504
+
505
+ # we know it's a dtensor from is partial DT check...
506
+ dt = cast(DTensor, obj)
507
+
508
+ traced_dispatch, result_obj = _build_dummy_add_graph(dt, node_to_obj)
509
+
510
+ wait = [
511
+ n
512
+ for n in traced_dispatch.graph.nodes
513
+ if n.name == "wait_comm" or n.name == "wait_tensor"
514
+ ]
515
+ add = [n for n in traced_dispatch.graph.nodes if n.name == "add"]
516
+ assert len(wait) == 1 and len(add) == 1
517
+
518
+ # remove add node and replace it with wait node
519
+ add[0].replace_all_uses_with(wait[0])
520
+ traced_dispatch.graph.eliminate_dead_code()
521
+ # also update the actual DTensor corresponding to the node
522
+ # TODO(anj): We require mapping of the final DTensor output to the wait
523
+ # comm node.
524
+ node_to_obj[wait[0]] = result_obj
525
+
526
+ value_remap: Dict[fx.Node, fx.Node] = {}
527
+ for dtn in traced_dispatch.graph.nodes:
528
+ if dtn.op == OP.PLACEHOLDER:
529
+ # do nothing, ignore placeholders, as it has
530
+ # already been prepared in value_remap
531
+ value_remap[dtn] = argument
532
+ elif dtn.op == OP.OUTPUT:
533
+ assert (
534
+ len(dtn.args) == 1 and len(dtn.args[0]) == 1
535
+ ), f"Expecting single output, but got {dtn.args} {len(dtn.args)}"
536
+ new_args.append(value_remap[dtn.args[0][0]])
537
+ # the concrete DTensor value of output was added when creating the
538
+ # inner graph (in _build_dummy_add_graph). Just add it to the final
539
+ # output node so that we can report the final output specs correctly.
540
+ # TODO(anj): We are depending on the concrete DTensor output of the dummy add.
541
+ node_to_obj[value_remap[dtn.args[0][0]]] = node_to_obj[dtn.args[0][0]]
542
+
543
+ else:
544
+ if dtn.op == OP.GET_ATTR:
545
+ setattr(
546
+ gm,
547
+ dtn.target,
548
+ getattr(traced_dispatch, dtn.target),
549
+ )
550
+ with gm.graph.inserting_before(node):
551
+ value_remap[dtn] = gm.graph.node_copy(dtn, lambda n: value_remap[n])
552
+ if has_partial:
553
+ gm.graph.erase_node(node)
554
+ return gm.graph.output(new_args)
555
+ else:
556
+ return node
557
+
558
+
559
+ def _rebuild_graph(
560
+ gm: fx.GraphModule,
561
+ node_replacements: Dict[torch.fx.Node, torch.fx.GraphModule],
562
+ ) -> None:
563
+ # replace nodes in local traced graph with DTensor's dispatch graph
564
+ for node in gm.graph.nodes:
565
+ if node not in node_replacements:
566
+ continue
567
+
568
+ traced_dispatch = node_replacements[node]
569
+ # Map DT's dispatch graph input placeholder nodes to the ones in
570
+ # local traced graph. It uses index-based accessing, which is
571
+ # brittle, just for testing purpose.
572
+ flatten_args = pytree.arg_tree_leaves(*node.args)
573
+ i, value_remap = 0, {}
574
+ for dtn in traced_dispatch.graph.nodes:
575
+ if dtn.op == OP.PLACEHOLDER:
576
+ value_remap[dtn] = flatten_args[i]
577
+ i += 1
578
+
579
+ # insert DT's dispatch graph to traced local graph.
580
+ with gm.graph.inserting_before(node):
581
+ for dtn in traced_dispatch.graph.nodes:
582
+ if dtn.op == OP.PLACEHOLDER:
583
+ # do nothing, ignore placeholders, as it has already
584
+ # been prepared in value_remap
585
+ pass
586
+ elif dtn.op == OP.OUTPUT:
587
+ assert (
588
+ len(dtn.args) == 1
589
+ ), f"Expecting single output, but got {dtn.args} {len(dtn.args[0])}"
590
+ outputs = dtn.args[0]
591
+ # we currently support two very specific types of output
592
+ # 1. single output
593
+ # 2. multiple outputs resulting from getitem of all elements of tuple
594
+ if len(outputs) == 1:
595
+ # for single output, we replace the node with the single node
596
+ output = outputs[0]
597
+ else:
598
+ # for multiple outputs, we check that these outputs correspond
599
+ # to all elements of a tuple. In that case, we replace
600
+ # uses of the output directly with the original tuple
601
+ source = None
602
+ for i, out in enumerate(outputs):
603
+ # we allow None outputs for certain items in the tuple
604
+ if out is None:
605
+ continue
606
+ assert out.op == "call_function"
607
+ assert out.target.__module__ == "_operator"
608
+ assert out.target.__name__ == "getitem"
609
+ assert source is None or source == out.args[0]
610
+ source = out.args[0]
611
+ assert out.args[1] == i
612
+ assert source is not None
613
+ output = source
614
+
615
+ new_node = value_remap[output]
616
+ node.replace_all_uses_with(new_node)
617
+ else:
618
+ value_remap[dtn] = gm.graph.node_copy(dtn, lambda n: value_remap[n])
619
+ if all(
620
+ isinstance(n.target, torch._ops.OpOverload)
621
+ and n.target._schema.name.startswith(
622
+ ("aten::_foreach", "aten::_fused_adam")
623
+ )
624
+ for n in [dtn, node]
625
+ ):
626
+ # FIXME(@mrshenli): This is a temporary solution enable
627
+ # foreach ops. The problem is that foreach ops returns
628
+ # List[Tensor], but make_fx will flatten that before
629
+ # passing those tensors to output node, which will
630
+ # introduce additional getitem nodes. These redundant
631
+ # getitem nodes breaks graph correctness as we cannot do
632
+ # getitem(getitem(foreach_out, 0), 0). This temporary
633
+ # solution skips getitem nodes in DTensor expanded
634
+ # subgraphs.
635
+ node.replace_all_uses_with(value_remap[dtn])
636
+ break
637
+ # explicitly erase node instead of relying on DCE, as DCE does not
638
+ # remove inplace copy_ correctly.
639
+ gm.graph.erase_node(node)
640
+
641
+ gm.graph.eliminate_dead_code()
642
+ gm.recompile()
643
+
644
+
645
+ def _get_last_consumer_to_nodes(
646
+ graph: fx.Graph,
647
+ ) -> Dict[fx.Node, List[fx.Node]]:
648
+ # Run through reverse nodes and record the first instance of a use
649
+ # of a given node. This represents the *last* use of the node in the
650
+ # execution order of the program, which we will use to free unused
651
+ # values
652
+ node_to_last_consumer: Dict[fx.Node, fx.Node] = {}
653
+ last_consumer_to_nodes: Dict[fx.Node, List[fx.Node]] = {}
654
+
655
+ def _register_final_consumer(arg_node: fx.Node, consumer: fx.Node) -> None:
656
+ if arg_node not in node_to_last_consumer:
657
+ node_to_last_consumer[arg_node] = consumer
658
+ last_consumer_to_nodes.setdefault(consumer, []).append(arg_node)
659
+
660
+ for node in reversed(graph.nodes):
661
+ fx.node.map_arg(
662
+ node.args, lambda arg_node: _register_final_consumer(arg_node, node)
663
+ )
664
+ fx.node.map_arg(
665
+ node.kwargs,
666
+ lambda kwarg_node: _register_final_consumer(kwarg_node, node),
667
+ )
668
+
669
+ return last_consumer_to_nodes
670
+
671
+
672
+ def _convert_to_distributed(
673
+ gm: fx.GraphModule,
674
+ inps: List[torch.Tensor],
675
+ schemas: List[Schema],
676
+ default_mesh: Optional[DeviceMesh] = None,
677
+ _allow_partial: bool = False,
678
+ ) -> Tuple[fx.GraphModule, Dict[str, Schema]]:
679
+ """Transform a graph module to a distributed graph module.
680
+
681
+ Returns:
682
+ - transformed graph module
683
+ - map from output name to DTensorSpec
684
+
685
+ """
686
+ global logger
687
+ logger = get_logger("spmd_exp")
688
+ operators = {getattr(operator, name) for name in operator.__all__}
689
+ node_to_obj: Dict[fx.Node, Any] = {}
690
+ # map local op node in traced_f to its corresponding subgraph of
691
+ # DTensor ops.
692
+ node_replacements: Dict[torch.fx.Node, torch.fx.GraphModule] = {}
693
+
694
+ last_consumer_to_nodes = _get_last_consumer_to_nodes(gm.graph)
695
+
696
+ output_schemas: Dict[str, Schema] = {}
697
+ for i, node in enumerate(gm.graph.nodes):
698
+ assert logger is not None
699
+ logger.info("node%s: op=%s target=%s", i, node.op, node.target)
700
+ if node.op == OP.PLACEHOLDER:
701
+ assert i < len(
702
+ inps
703
+ ), f"got more placeholder nodes ({i + 1}) than inputs ({len(inps)})"
704
+
705
+ # our example inputs are local shards. Create DTensors from them.
706
+ node_to_obj[node] = DTensor.from_local(
707
+ inps[i].clone(), # use clone to avoid modifications from inplace ops
708
+ schemas[i].mesh,
709
+ schemas[i].placements,
710
+ # prevent running this collective in backwards pass
711
+ run_check=False,
712
+ )
713
+ elif isinstance(node.target, torch._ops.OpOverloadPacket):
714
+ dtensor = cast(DTensor, node_to_obj[node.args[0]])
715
+ node_to_obj[node] = DSymInt.from_node(node, dtensor)
716
+ elif isinstance(node.target, torch._ops.OpOverload):
717
+ replacement = _get_dtensor_dispatch_graph(
718
+ node, node_to_obj, default_mesh=default_mesh
719
+ )
720
+ if replacement is not None:
721
+ node_replacements[node] = replacement
722
+ elif node.op == OP.OUTPUT:
723
+ if not _allow_partial:
724
+ # Returns an expanded dummy add node that ensures
725
+ # that the partial output tensor has been converted
726
+ # to a replicated tensor.
727
+ node = _convert_output(gm, node, node_to_obj)
728
+
729
+ # Save output sharding for the inputs to backward pass.
730
+ # TODO(anj): Pipe the output schema for the BW pass
731
+ # instead of requiring the full output DTensor to be
732
+ # materialized.
733
+ for inp_arg in node.args[0]:
734
+ if isinstance(inp_arg, fx.Node):
735
+ obj = node_to_obj[inp_arg]
736
+ if isinstance(obj, DTensor):
737
+ output_schemas[inp_arg.name] = Schema(
738
+ obj.device_mesh, obj.placements # type: ignore[arg-type]
739
+ )
740
+ elif node.op == OP.CALL_FUNCTION:
741
+ args = tree_map(partial(_remap_arg, node_to_obj), node.args)
742
+ kwargs = tree_map(partial(_remap_arg, node_to_obj), node.kwargs)
743
+
744
+ dsymints = list(
745
+ filter(lambda a: isinstance(a, DSymInt), args + tuple(kwargs.values()))
746
+ )
747
+
748
+ if node.target in operators and len(dsymints) > 0:
749
+ assert all(
750
+ dsymints[0].mesh == d.mesh for d in dsymints
751
+ ), "all DSymInts must have the same mesh. "
752
+
753
+ local_args = tree_map_only(DSymInt, lambda a: a.local_value, args)
754
+ local_kwargs = tree_map_only(DSymInt, lambda a: a.local_value, kwargs)
755
+
756
+ global_args = tree_map_only(DSymInt, lambda a: a.global_value, args)
757
+ global_kwargs = tree_map_only(DSymInt, lambda a: a.global_value, kwargs)
758
+
759
+ node.args = local_args
760
+ node.kwargs = local_kwargs
761
+
762
+ node_to_obj[node] = DSymInt(
763
+ local_value=node.target(*local_args, **local_kwargs),
764
+ global_value=node.target(*global_args, **global_kwargs),
765
+ mesh=dsymints[0].mesh,
766
+ )
767
+ else:
768
+ assert len(dsymints) == 0, (
769
+ "SPMD expansion does not support SymInt in non-operator "
770
+ f"nodes, got {node.target}."
771
+ )
772
+ node_to_obj[node] = node.target(*args, **kwargs)
773
+ else:
774
+ raise ValueError(f"Unrecognized node.op type {node.op}")
775
+
776
+ if node in last_consumer_to_nodes:
777
+ # Save memory by deleting objs that wont be used anymore.
778
+ for arg_node in last_consumer_to_nodes[node]:
779
+ del node_to_obj[arg_node]
780
+
781
+ _rebuild_graph(gm, node_replacements)
782
+
783
+ return gm, output_schemas
venv/lib/python3.10/site-packages/torch/distributed/_spmd/experimental_ops.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import cast, List, Optional, Sequence, Tuple
3
+
4
+ import torch
5
+ from torch.distributed._tensor.op_schema import OpSchema, OutputSharding
6
+ from torch.distributed._tensor.ops.common_rules import pointwise_rule
7
+ from torch.distributed._tensor.ops.utils import register_prop_rule
8
+
9
+ from torch.distributed._tensor.placement_types import (
10
+ _Partial,
11
+ DTensorSpec,
12
+ Placement,
13
+ Replicate,
14
+ Shard,
15
+ TensorMeta,
16
+ )
17
+
18
+ aten = torch.ops.aten # pyre-ignore
19
+
20
+
21
+ @register_prop_rule( # pyre-ignore
22
+ [
23
+ aten._foreach_neg.default,
24
+ aten._foreach_reciprocal.default,
25
+ aten._foreach_sqrt.default,
26
+ ]
27
+ )
28
+ def _prop__foreach_unaop(op_schema: OpSchema) -> OutputSharding:
29
+ self = op_schema.args_schema[0]
30
+ assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self)
31
+ # FIXME(@mrshenli): for sqrt, this is only mathematically correct for
32
+ # Replicate and Shard tensor.
33
+ return OutputSharding(output_spec=self)
34
+
35
+
36
+ @register_prop_rule( # pyre-ignore
37
+ [
38
+ aten._foreach_add.List,
39
+ aten._foreach_div.List,
40
+ aten._foreach_mul.List,
41
+ ]
42
+ )
43
+ def _prop__foreach_binop_list(op_schema: OpSchema) -> OutputSharding:
44
+ self, other = op_schema.args_schema[:2]
45
+ scalar = None if len(op_schema.args_schema) < 3 else op_schema.args_schema[2]
46
+ assert isinstance(self, list) and all(
47
+ isinstance(s, DTensorSpec) for s in self
48
+ ), f"Expect a List[DTensorSpec] but got {self}"
49
+ assert isinstance(other, list) and all(
50
+ isinstance(o, DTensorSpec) for o in other
51
+ ), f"Expect a List[DTensorSpec] but got {other}"
52
+ assert len(self) == len(other), (
53
+ "Two tensor lists must match in length, "
54
+ f"but got {len(self)} and {len(other)}"
55
+ )
56
+
57
+ if any(s != o for s, o in zip(self, other)):
58
+ # If DTensorSpec for the two operand do not match, suggest using
59
+ # self's DTensorSpec. This will trigger allreduce if other is partial
60
+ # and self is replicated.
61
+ return OutputSharding(
62
+ output_spec=None,
63
+ schema_suggestions=[
64
+ OpSchema(
65
+ op=op_schema.op,
66
+ args_schema=(self, self, scalar) if scalar else (self, self),
67
+ kwargs_schema=op_schema.kwargs_schema,
68
+ )
69
+ ],
70
+ )
71
+ else:
72
+ return OutputSharding(output_spec=self)
73
+
74
+
75
+ @register_prop_rule( # pyre-ignore
76
+ [
77
+ aten._foreach_add.Scalar,
78
+ aten._foreach_div.Scalar,
79
+ aten._foreach_mul.Scalar,
80
+ aten._foreach_sub.Scalar,
81
+ ]
82
+ )
83
+ def _prop__foreach_binop_scalar(op_schema: OpSchema) -> OutputSharding:
84
+ self, scalar = op_schema.args_schema
85
+ assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self)
86
+ assert not isinstance(scalar, list)
87
+ return OutputSharding(output_spec=self)
88
+
89
+
90
+ @register_prop_rule( # pyre-ignore
91
+ [
92
+ aten._foreach_addcdiv.Scalar,
93
+ aten._foreach_addcmul.Scalar,
94
+ ]
95
+ )
96
+ def _prop__foreach_addcop_scalar(op_schema: OpSchema):
97
+ self, tensor1, tensor2 = op_schema.args_schema[:3]
98
+ scalar = None if len(op_schema.args_schema) < 4 else op_schema.args_schema[3]
99
+ assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self)
100
+ assert isinstance(tensor1, list) and all(isinstance(s, DTensorSpec) for s in self)
101
+ assert isinstance(tensor2, list) and all(isinstance(s, DTensorSpec) for s in self)
102
+ if any(s != t1 or s != t2 for s, t1, t2 in zip(self, tensor1, tensor2)):
103
+ # If DTensorSpec for the two operand do not match, suggest using
104
+ # self's DTensorSpec. This will trigger allreduce if other is partial
105
+ # and self is replicated.
106
+ return OutputSharding(
107
+ output_spec=None,
108
+ schema_suggestions=[
109
+ OpSchema(
110
+ op=op_schema.op,
111
+ args_schema=(self, self, self, scalar)
112
+ if scalar
113
+ else (self, self, self),
114
+ kwargs_schema=op_schema.kwargs_schema,
115
+ )
116
+ ],
117
+ )
118
+ else:
119
+ return OutputSharding(output_spec=self)
120
+
121
+
122
+ @register_prop_rule([aten._foreach_pow.ScalarAndTensor]) # pyre-ignore
123
+ def _prop__foreach_pow_scalar_and_tensor(op_schema: OpSchema):
124
+ scala, exponent = op_schema.args_schema
125
+ assert isinstance(exponent, list) and all(
126
+ isinstance(s, DTensorSpec) for s in exponent
127
+ )
128
+ return OutputSharding(output_spec=exponent)
129
+
130
+
131
+ @register_prop_rule([aten._fused_adam.default]) # pyre-ignore
132
+ def _prop__fused_adam(op_schema: OpSchema):
133
+ NT = 5
134
+ tesnor_list_args: Tuple[List[DTensorSpec]] = op_schema.args_schema[:NT] # type: ignore[assignment]
135
+
136
+ assert all(isinstance(schema, list) for schema in tesnor_list_args)
137
+ assert all(
138
+ isinstance(s, DTensorSpec) for schema in tesnor_list_args for s in schema
139
+ )
140
+
141
+ tensor_schemas: Tuple[List[DTensorSpec]] = [ # type: ignore[assignment]
142
+ schema for schema in tesnor_list_args if len(schema)
143
+ ]
144
+
145
+ assert all(len(s) == len(tensor_schemas[0]) for s in tensor_schemas), (
146
+ "expect the same number of gradients and states, but got "
147
+ f"{[len(s) for s in tensor_schemas]}."
148
+ )
149
+
150
+ if any(any(t != ts[0] for t in ts) for ts in zip(*tensor_schemas)):
151
+ new_schemas: Tuple[List[DTensorSpec]] = tuple( # type: ignore[assignment]
152
+ op_schema.args_schema[0] if len(s) else s for s in tesnor_list_args
153
+ )
154
+ return OutputSharding(
155
+ output_spec=None,
156
+ schema_suggestions=[
157
+ OpSchema(
158
+ op=op_schema.op,
159
+ args_schema=new_schemas + op_schema.args_schema[NT:],
160
+ kwargs_schema=op_schema.kwargs_schema,
161
+ )
162
+ ],
163
+ )
164
+ else:
165
+ return OutputSharding(output_spec=(op_schema.args_schema[0],) * NT) # type: ignore[arg-type]
166
+
167
+
168
+ @register_prop_rule(aten.nll_loss_forward.default) # pyre-ignore
169
+ def _prop_nll_loss_forward(op_schema: OpSchema) -> OutputSharding:
170
+ self, target = op_schema.args_schema[:2]
171
+ assert isinstance(self, DTensorSpec)
172
+ assert isinstance(target, DTensorSpec)
173
+ if self.placements != target.placements:
174
+ # Self and target must match in placements, which should be shard along
175
+ # batch dimension in data parallell use cases. Force redistribute.
176
+
177
+ # need to create a new self instead return (target, target) as target
178
+ # and self might not match in shape.
179
+ new_self = DTensorSpec(
180
+ mesh=self.mesh,
181
+ placements=target.placements,
182
+ tensor_meta=self.tensor_meta,
183
+ )
184
+ return OutputSharding(
185
+ output_spec=None,
186
+ schema_suggestions=[
187
+ OpSchema(
188
+ op=op_schema.op,
189
+ args_schema=(new_self, target) + op_schema.args_schema[2:],
190
+ kwargs_schema=op_schema.kwargs_schema,
191
+ )
192
+ ],
193
+ )
194
+ else:
195
+ return OutputSharding(
196
+ output_spec=(
197
+ # by default, nll_loss_forward conducts a reduction and returns
198
+ # a scalar tensor, and hence the _Partial placements.
199
+ DTensorSpec(mesh=self.mesh, placements=(_Partial(),)),
200
+ # the 2nd output total_weight is always a scalar tensor
201
+ DTensorSpec(mesh=self.mesh, placements=(Replicate(),)),
202
+ )
203
+ )
204
+
205
+
206
+ @register_prop_rule(aten.nll_loss_backward.default) # pyre-ignore
207
+ def _prop_nll_loss_backward(op_schema: OpSchema) -> OutputSharding:
208
+ grad_output, self = op_schema.args_schema[:2]
209
+ assert isinstance(grad_output, DTensorSpec)
210
+ assert isinstance(self, DTensorSpec)
211
+ return OutputSharding(output_spec=self)
212
+
213
+
214
+ @register_prop_rule(aten.stack.default)
215
+ def _prop_stack(op_schema: OpSchema) -> OutputSharding:
216
+ tensors = op_schema.args_schema[0]
217
+ dim = 0 if len(op_schema.args_schema) == 1 else cast(int, op_schema.args_schema[1])
218
+ assert (
219
+ isinstance(tensors, list) and len(tensors) > 0
220
+ ), "expect at least one tensor to stack"
221
+ assert all(
222
+ isinstance(t, DTensorSpec) for t in tensors
223
+ ), f"expect a list of DTensorSpecs, but got {tensors}"
224
+ assert all(
225
+ t.shape == tensors[0].shape for t in tensors
226
+ ), f"expect all tensors to have the same shape, but got {tensors}."
227
+ # TODO: provide schema_suggestions when placements do not match
228
+ assert all(
229
+ t.placements == tensors[0].placements for t in tensors
230
+ ), f"expect all tensors to have the same placements, but got {tensors}."
231
+ assert all(
232
+ not p.is_shard(dim) for p in tensors[0].placements
233
+ ), "DTensor does not support stack on sharded dimension."
234
+
235
+ return OutputSharding(
236
+ output_spec=DTensorSpec(mesh=tensors[0].mesh, placements=tensors[0].placements)
237
+ )
238
+
239
+
240
+ @register_prop_rule(aten.select.int)
241
+ def _prop_select(op_schema: OpSchema) -> OutputSharding:
242
+ tensor, dim = op_schema.args_schema[:2]
243
+ assert isinstance(tensor, DTensorSpec)
244
+ assert isinstance(dim, int)
245
+ placements: Sequence[Placement] = tensor.placements
246
+ assert all(
247
+ not p.is_shard(dim) for p in placements
248
+ ), "DTensor does not support select on sharded dimension."
249
+
250
+ # select will remove one dimension, decrement dim of Shard placements by 1
251
+ # if they are larger than dim.
252
+ new_placements: List[Placement] = []
253
+ for p in placements:
254
+ # Using isinstance instead of is_shard so that mypy won't complain
255
+ # about accessing dim attribute.
256
+ if isinstance(p, Shard) and p.dim > dim:
257
+ new_placements.append(Shard(p.dim - 1))
258
+ else:
259
+ new_placements.append(p)
260
+
261
+ return OutputSharding(
262
+ output_spec=DTensorSpec(mesh=tensor.mesh, placements=tuple(new_placements))
263
+ )
264
+
265
+
266
+ @register_prop_rule(aten.native_layer_norm.default) # pyre-ignore
267
+ def _prop_native_layer_norm(op_schema: OpSchema) -> OutputSharding:
268
+ input, normalized_shape, weight, bias, eps = op_schema.args_schema
269
+ assert isinstance(input, DTensorSpec)
270
+ assert isinstance(normalized_shape, (tuple, list))
271
+ if weight is not None:
272
+ assert isinstance(weight, DTensorSpec)
273
+ assert all(isinstance(p, Replicate) for p in weight.placements)
274
+ if bias is not None:
275
+ assert isinstance(bias, DTensorSpec)
276
+ assert all(isinstance(p, Replicate) for p in bias.placements)
277
+ # only the left-most (non-normalized) dimensions of the input can be sharded
278
+ batch_ndim = len(input.shape) - len(normalized_shape)
279
+ assert all(
280
+ isinstance(p, Replicate) or (isinstance(p, Shard) and p.dim < batch_ndim,)
281
+ for p in input.placements
282
+ )
283
+ stats_spec = DTensorSpec(
284
+ mesh=input.mesh,
285
+ placements=input.placements,
286
+ )
287
+ return OutputSharding(output_spec=(input, stats_spec, stats_spec))
288
+
289
+
290
+ @register_prop_rule(aten.native_layer_norm_backward.default) # pyre-ignore
291
+ def _prop_native_layer_norm_backward(op_schema: OpSchema) -> OutputSharding:
292
+ (
293
+ grad,
294
+ input,
295
+ normalized_shape,
296
+ result1,
297
+ result2,
298
+ weight,
299
+ bias,
300
+ grad_input_mask,
301
+ ) = op_schema.args_schema
302
+ assert isinstance(grad, DTensorSpec)
303
+ assert isinstance(grad_input_mask, (list, tuple))
304
+ if weight is not None:
305
+ assert isinstance(weight, DTensorSpec)
306
+ assert all(isinstance(s, Replicate) for s in weight.placements)
307
+ if bias is not None:
308
+ assert isinstance(bias, DTensorSpec)
309
+ assert all(isinstance(s, Replicate) for s in bias.placements)
310
+ # ensure sharding on dim 0, which will trigger the "Partial" output on
311
+ # weight and bias grads
312
+ assert any(
313
+ isinstance(s, Shard) and s.dim == 0 for s in grad.placements
314
+ ), f"Got {grad.placements}"
315
+ weight_grad = (
316
+ DTensorSpec(
317
+ mesh=weight.mesh,
318
+ placements=tuple([_Partial()] * weight.mesh.ndim),
319
+ )
320
+ if weight
321
+ else None
322
+ )
323
+ bias_grad = (
324
+ DTensorSpec(
325
+ mesh=bias.mesh,
326
+ placements=tuple([_Partial()] * bias.mesh.ndim),
327
+ )
328
+ if bias
329
+ else None
330
+ )
331
+ return OutputSharding(
332
+ # NOTE: type errors below are legit. This is because DTensor currently
333
+ # doesn't support Optional return values. Need to be fixed in DTensor repo.
334
+ output_spec=(
335
+ grad if grad_input_mask[0] else None,
336
+ weight_grad if grad_input_mask[1] else None,
337
+ bias_grad if grad_input_mask[2] else None,
338
+ ),
339
+ )
340
+
341
+
342
+ def _refine_sharding(
343
+ op_schema: OpSchema, active_dim: Optional[int]
344
+ ) -> Sequence[Placement]:
345
+ """Considers 2 first inputs of op_schema as having same shape, and returns suggested placement for a pointwise operation."""
346
+ # consider the operating dimension as a singleton to prevent sharding on it
347
+ # however, if active_dim is None, this means the input and output shapes are equal and
348
+ # we'll apply exactly the pointwise rule.
349
+
350
+ args_schema = []
351
+ for s in op_schema.args_schema[:2]:
352
+ assert isinstance(s, DTensorSpec) and s.tensor_meta is not None
353
+ args_schema.append(
354
+ DTensorSpec(
355
+ mesh=s.mesh, # type: ignore[attr-defined]
356
+ placements=s.placements, # type: ignore[attr-defined]
357
+ tensor_meta=TensorMeta(
358
+ shape=torch.Size(
359
+ s.shape[0:active_dim] + (1,) + s.shape[active_dim + 1 :]
360
+ )
361
+ if active_dim is not None
362
+ else s.shape,
363
+ stride=s.tensor_meta.stride,
364
+ dtype=s.tensor_meta.dtype,
365
+ ),
366
+ )
367
+ )
368
+
369
+ op_schema = OpSchema(
370
+ op=op_schema.op,
371
+ args_schema=args_schema, # type: ignore[arg-type]
372
+ kwargs_schema={},
373
+ )
374
+ output_sharding = pointwise_rule(op_schema, linearity=False)
375
+ if output_sharding.output_spec:
376
+ assert isinstance(output_sharding.output_spec, DTensorSpec)
377
+ return output_sharding.output_spec.placements
378
+ else:
379
+ assert output_sharding.schema_suggestions is not None
380
+ out_schema = output_sharding.schema_suggestions[0].args_schema[0]
381
+ assert isinstance(out_schema, DTensorSpec)
382
+ return tuple(out_schema.placements)
383
+
384
+
385
+ @register_prop_rule(aten.slice_scatter.default) # pyre-ignore
386
+ def prop_slice_scatter(op_schema: OpSchema) -> OutputSharding:
387
+ # 1. number of dimensions in input and src need to match.
388
+ # 2. number of elements on all non-dim need to match between input and src.
389
+ # 3. numer of elements in src in dim need to match the slice size.
390
+ # Given the above:
391
+ # - We suggest for src to follow the sharding of input, except on the scatter dimension,
392
+ # where our best bet for now is to make them replicated as a fall-back.
393
+ # TODO: Ideally we'd like to make sure the output is re-sharded afterwards to keep input sharding.
394
+
395
+ defaults = (None, None, 0, None, None, 1)
396
+ input, src, dim, start, end, step = (
397
+ op_schema.args_schema + defaults[len(op_schema.args_schema) :]
398
+ )
399
+ assert isinstance(input, DTensorSpec)
400
+ assert isinstance(src, DTensorSpec)
401
+ assert isinstance(dim, int)
402
+
403
+ if dim < 0:
404
+ dim += input.ndim
405
+
406
+ # if the input shape and the output shape are the same on the operating dimension,
407
+ # this is effectively a no-op, so we just propagate sharding as we would do for
408
+ # pointwise, no exceptions.
409
+ if input.shape[dim] == src.shape[dim]:
410
+ assert start == 0
411
+ assert end >= src.shape[dim] # type: ignore[operator]
412
+ dim = None
413
+
414
+ # apply sharding refinement as implemented in pointwise_rule
415
+ input_suggestion = list(_refine_sharding(op_schema, dim))
416
+ # apply the exception -- disallow sharding on the operating dimension.
417
+ for i, p in enumerate(input_suggestion):
418
+ if isinstance(p, Shard) and p.dim == dim:
419
+ input_suggestion[i] = Replicate()
420
+ input_suggestion = tuple(input_suggestion) # type: ignore[assignment]
421
+
422
+ if input_suggestion == tuple(input.placements) and src.placements == tuple(
423
+ input.placements
424
+ ):
425
+ # if our sharding is correct, the output sharding will be the same as the input.
426
+ return OutputSharding(
427
+ output_spec=DTensorSpec(
428
+ mesh=input.mesh,
429
+ placements=input.placements,
430
+ )
431
+ )
432
+ else:
433
+ # otherwise, return the suggestion.
434
+ return OutputSharding(
435
+ output_spec=None,
436
+ schema_suggestions=[
437
+ OpSchema(
438
+ op=op_schema.op,
439
+ args_schema=(
440
+ DTensorSpec(
441
+ mesh=input.mesh,
442
+ placements=input_suggestion,
443
+ tensor_meta=input.tensor_meta,
444
+ ),
445
+ DTensorSpec(
446
+ mesh=src.mesh,
447
+ placements=input_suggestion,
448
+ tensor_meta=src.tensor_meta,
449
+ ),
450
+ )
451
+ + op_schema.args_schema[2:],
452
+ kwargs_schema=op_schema.kwargs_schema,
453
+ )
454
+ ],
455
+ )
venv/lib/python3.10/site-packages/torch/distributed/_spmd/gm_transformation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable
2
+
3
+ from torch import fx
4
+ from torch.distributed._spmd.graph_optimization import (
5
+ comm_fusion_with_concat,
6
+ enable_graph_optimization_dump,
7
+ remove_copy_from_optimizer,
8
+ schedule_comm_wait,
9
+ )
10
+ from torch.distributed._spmd.graph_utils import dump_graphs_to_files
11
+ from torch.distributed._spmd.iter_graph_module import IterGraphModule
12
+
13
+
14
+ class GraphModuleTransformation:
15
+ def __init__(
16
+ self,
17
+ *,
18
+ enable_graph_optimization: bool = False,
19
+ enable_inductor: bool = False,
20
+ dump_graphs: bool = False,
21
+ ) -> None:
22
+ self.enable_graph_optimization = enable_graph_optimization
23
+ self.enable_inductor = enable_inductor
24
+ self.dump_graphs = dump_graphs
25
+
26
+ def __call__(self, gm: fx.GraphModule) -> Callable:
27
+ if self.dump_graphs:
28
+ graph_folder = dump_graphs_to_files(
29
+ {"before_transformation_gm": gm.print_readable(False)}
30
+ )
31
+ enable_graph_optimization_dump(graph_folder)
32
+
33
+ iter_gm = IterGraphModule(gm, enable_inductor=self.enable_inductor)
34
+ if self.enable_graph_optimization:
35
+ comm_fusion_with_concat(iter_gm, 100)
36
+ schedule_comm_wait(iter_gm)
37
+ remove_copy_from_optimizer(iter_gm)
38
+ # Must be called after we are not going to move the graphs
39
+ iter_gm.finalize_setup()
40
+
41
+ if self.dump_graphs:
42
+ dump_graphs_to_files(
43
+ {
44
+ "iter_graph_setup_gm": iter_gm.setup_gm.print_readable(False),
45
+ "iter_graph_main_gm": iter_gm.main_gm.print_readable(False),
46
+ "iter_graph_cleanup_gm": iter_gm.cleanup_gm.print_readable(False),
47
+ },
48
+ graph_folder, # type: ignore[possibly-undefined]
49
+ )
50
+
51
+ return iter_gm
venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_optimization.py ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["oncall: distributed"]
2
+ import collections
3
+ import itertools
4
+ import logging
5
+ import operator
6
+ import tempfile
7
+ import time
8
+ from dataclasses import dataclass, field
9
+ from functools import wraps
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ cast,
14
+ DefaultDict,
15
+ Dict,
16
+ Iterable,
17
+ List,
18
+ Optional,
19
+ Set,
20
+ Tuple,
21
+ Union,
22
+ )
23
+
24
+ import torch
25
+ import torch.fx as fx
26
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
27
+ from torch.distributed._spmd.graph_utils import (
28
+ CommType,
29
+ dump_graphs_to_files,
30
+ find_node,
31
+ get_output,
32
+ OP,
33
+ )
34
+ from torch.distributed._spmd.iter_graph_module import IterGraphModule
35
+ from torch.fx.passes.shape_prop import TensorMetadata
36
+ from torch.utils import _pytree as pytree
37
+ from torch.utils._pytree import tree_flatten, tree_unflatten
38
+
39
+ logger: logging.Logger = logging.getLogger("graph_optimization")
40
+ aten = torch.ops.aten
41
+ fake_tensor_mode = FakeTensorMode()
42
+
43
+ _optimized_func: Set[str] = set()
44
+ # The key is the target pass and the value is the prerequisites of the pass.
45
+ _prerequisite_sets: DefaultDict[str, Set[str]] = collections.defaultdict(set)
46
+ # The key is the target pass and the value is the passes that must applied before
47
+ # the key.
48
+ _apply_before_sets: DefaultDict[str, Set[str]] = collections.defaultdict(set)
49
+ _dump_graph_folder: str = ""
50
+
51
+
52
+ def enable_graph_optimization_dump(folder: str = ""):
53
+ global _dump_graph_folder
54
+ if not folder:
55
+ folder = tempfile.mkdtemp()
56
+ _dump_graph_folder = folder
57
+
58
+
59
+ # TODO(@fegin): Support multiple runs of graph optimization
60
+ # TODO(@fegin): With this design, circular imports will happen when a pass
61
+ # developer accidentally create a pass dependency cycle. As a result, we need to
62
+ # break this file into a finer granularity to avoid incorrect circular import.
63
+ def graph_optimization_pass(
64
+ prerequisites: Iterable[Callable],
65
+ apply_after: Iterable[Callable],
66
+ ) -> Callable:
67
+ """Define the contract of a graph optimization pass.
68
+
69
+ All the passes should be wrapped with this decorator.
70
+ `prerequisites` is used to annotate the prerequisite passes of the this pass.
71
+ `apply_after` means that this wrapped pass must be applied after the passes
72
+ in `apply_after`. The difference between `prerequisites` and `apply_after`
73
+ is that all the passes in `prerequisites` must be applied to the graph and
74
+ must be applifed before the wrapped pass while the passes `apply_after` are
75
+ optional. But if a pass in `apply_after` is applied to the graph, it has to
76
+ be done before the wrapped pass.
77
+ Optimizer pass developers are required to add these fields accordingly and
78
+ users need to follow the restrictions to avoid the assert.
79
+
80
+ Current design has one limitation: users can only apply the optimizations
81
+ once. In some cases, we may need to run multiple the same optimization
82
+ multiple time, e.g., optimization passes -> profiling the result -> apply
83
+ optimization passes with the profiling result again. This limitation will be
84
+ addressed limitation in the future.
85
+
86
+ Args:
87
+ prerequisites (Iterable[Callable]): the list of string to the names of
88
+ passes which are the prerequisites of this pass.
89
+ apply_after (Iterable[Callable]): the list of string to the names of
90
+ passes that can not be applied after the wrapped pass.
91
+ """
92
+
93
+ def inner(func: Callable) -> Callable:
94
+ def make_key(func: Callable) -> str:
95
+ return f"{func.__module__}.{func.__name__}"
96
+
97
+ func_key = make_key(func)
98
+ _prerequisite_sets[func_key] = {make_key(f) for f in prerequisites}
99
+ for apply_after_pass in apply_after:
100
+ _apply_before_sets[make_key(apply_after_pass)].add(func_key)
101
+
102
+ @wraps(func)
103
+ def pass_wrapper(
104
+ gm: Union[fx.GraphModule, IterGraphModule], *args: Any, **kwargs: Any
105
+ ) -> None:
106
+ begin = time.time()
107
+ assert isinstance(gm, (fx.GraphModule, IterGraphModule)), (
108
+ "The first argument of the pass must be either "
109
+ "fx.GraphModule or IterGraphModule."
110
+ )
111
+ assert func_key not in _optimized_func, f"Cannot apply {func_key} twice."
112
+ invalid_passes = _apply_before_sets[func_key].intersection(_optimized_func)
113
+ assert (
114
+ not invalid_passes
115
+ ), f"{invalid_passes} must be applied after {func_key}."
116
+ assert _prerequisite_sets[func_key].issubset(_optimized_func), (
117
+ f"{_prerequisite_sets[func_key] - _optimized_func} are the "
118
+ f"prerequisites of {func_key} but are not applified. "
119
+ f"Applied passes are {_optimized_func}."
120
+ )
121
+
122
+ func(gm, *args, **kwargs)
123
+ gm.graph.lint()
124
+ gm.graph.eliminate_dead_code()
125
+ gm.recompile()
126
+ _optimized_func.add(func_key)
127
+
128
+ prefix = f"after_{func.__name__}"
129
+ if _dump_graph_folder:
130
+ if isinstance(gm, IterGraphModule):
131
+ dump_graphs_to_files(
132
+ {
133
+ f"{prefix}_setup_gm": gm.setup_gm,
134
+ f"{prefix}_main_gm": gm.main_gm,
135
+ f"{prefix}_cleanup_gm": gm.cleanup_gm,
136
+ },
137
+ _dump_graph_folder,
138
+ )
139
+ else:
140
+ dump_graphs_to_files({prefix: gm}, _dump_graph_folder)
141
+
142
+ logger.info("Spent %f seconds applying %s", time.time() - begin, func_key)
143
+
144
+ return pass_wrapper
145
+
146
+ return inner
147
+
148
+
149
+ @dataclass(unsafe_hash=True)
150
+ class CommBlock:
151
+ shape: Optional[torch.Size]
152
+ node_list: List[fx.Node]
153
+ inputs: List[fx.Node]
154
+ wait_nodes: List[fx.Node]
155
+ comm_node: fx.Node
156
+ outputs: Set[fx.Node]
157
+
158
+
159
+ def get_comm_block(comm_node: fx.Node) -> CommBlock:
160
+ """Find out all the nodes belong to this communcation given a collective node (e.g., allreduce).
161
+
162
+ Args:
163
+ comm_node(fx.Node): The target communication/collective node.
164
+
165
+ Returns:
166
+ The CommBlock that encapsulates the related nodes (e.g., wait_node) of
167
+ the given comm_node.
168
+ """
169
+ # We choose 5 to prevent some accidents that cause infinite loop. But
170
+ # with functional collective, the distance is 1.
171
+ MAX_WAIT_DISTANCE = 5
172
+ node_list = []
173
+ wait_nodes = []
174
+ inputs = pytree.arg_tree_leaves(*comm_node.args, **comm_node.kwargs)
175
+ input_nodes = [inp for inp in inputs if isinstance(inp, fx.Node)]
176
+ distance = 0
177
+ wait_prefixes = ("wait_comm", "wait_tensor")
178
+ non_end_users_nodes = ("split", "reshape", "getitem", "detach", "alias")
179
+
180
+ nodes = collections.deque([comm_node, None])
181
+ while nodes and distance < 5:
182
+ node = nodes.popleft()
183
+ if node is None:
184
+ distance += 1
185
+ if nodes:
186
+ nodes.append(None)
187
+ continue
188
+ node_list.append(node)
189
+ if node.name.startswith(wait_prefixes):
190
+ wait_nodes.append(node)
191
+ else:
192
+ for child in node.users:
193
+ if isinstance(child, fx.Node):
194
+ nodes.append(child)
195
+
196
+ if not wait_nodes:
197
+ raise RuntimeError(
198
+ "The wait nodes are too far away from the comm node {comm_node}."
199
+ )
200
+
201
+ # Identify all the outputs of this collective block.
202
+ outputs: Set[fx.Node] = set()
203
+ nodes = collections.deque(wait_nodes)
204
+ while nodes:
205
+ node = nodes.popleft()
206
+ assert node is not None
207
+ for user in node.users:
208
+ if isinstance(user, fx.Node) and user.name.startswith(non_end_users_nodes):
209
+ nodes.append(user)
210
+ node_list.append(user)
211
+ else:
212
+ outputs.add(node)
213
+ break
214
+
215
+ # TODO: populate all the tensor metadata and remove the default.
216
+ tensor_meta = input_nodes[0].meta.get("tensor_meta", None)
217
+ return CommBlock(
218
+ # TODO: support symbolic shapes
219
+ shape=torch.Size(int(s) for s in tensor_meta.shape) if tensor_meta else None,
220
+ node_list=node_list,
221
+ wait_nodes=wait_nodes,
222
+ comm_node=comm_node,
223
+ inputs=input_nodes,
224
+ outputs=outputs,
225
+ )
226
+
227
+
228
+ def get_all_comm_blocks(
229
+ gm: IterGraphModule, comm_ops: Union[Tuple[str, ...], str]
230
+ ) -> List[CommBlock]:
231
+ return [
232
+ get_comm_block(node)
233
+ for node in gm.graph.nodes
234
+ if node.name.startswith(comm_ops)
235
+ ]
236
+
237
+
238
+ def _create_meta_val(
239
+ fake_tensor_mode: FakeTensorMode,
240
+ val: FakeTensor,
241
+ ) -> FakeTensor:
242
+ # TODO: fix the memory_format
243
+ return FakeTensor(
244
+ fake_tensor_mode,
245
+ torch.empty(
246
+ val.shape,
247
+ dtype=val.dtype,
248
+ device="meta",
249
+ requires_grad=val.requires_grad,
250
+ ),
251
+ val.device,
252
+ )
253
+
254
+
255
+ def _create_meta_tensor_meta(
256
+ fake_tensor_mode: FakeTensorMode,
257
+ val: FakeTensor,
258
+ ) -> TensorMetadata:
259
+ return TensorMetadata(
260
+ shape=val.shape,
261
+ dtype=val.dtype,
262
+ requires_grad=val.requires_grad,
263
+ stride=val.stride, # type: ignore[arg-type]
264
+ # TODO: fix these value
265
+ memory_format=None,
266
+ is_quantized=False,
267
+ qparams={},
268
+ )
269
+
270
+
271
+ def _call_function(
272
+ gm: IterGraphModule,
273
+ fake_tensor_mode: FakeTensorMode,
274
+ meta_val: Optional[FakeTensor],
275
+ function: Any,
276
+ *args: Any,
277
+ **kwargs: Any,
278
+ ) -> fx.Node:
279
+ node = gm.graph.call_function(function, args, kwargs)
280
+
281
+ if meta_val is None:
282
+ flat_args, spec = tree_flatten((args, kwargs))
283
+ new_flat_args = []
284
+ memory_format = None
285
+ for arg in flat_args:
286
+ if not isinstance(arg, fx.Node):
287
+ new_flat_args.append(arg)
288
+ continue
289
+ val = arg.meta["val"]
290
+ new_flat_args.append(_create_meta_val(fake_tensor_mode, val))
291
+
292
+ fake_args, fake_kwargs = tree_unflatten(new_flat_args, spec)
293
+ new_meta_val = function(*fake_args, **fake_kwargs)
294
+ else:
295
+ new_meta_val = meta_val
296
+ node.meta["val"] = new_meta_val
297
+ node.meta["tensor_meta"] = _create_meta_tensor_meta(fake_tensor_mode, new_meta_val)
298
+ return node
299
+
300
+
301
+ def _scatter_wait_result(
302
+ gm: IterGraphModule,
303
+ fused_comm_block: CommBlock,
304
+ comm_blocks: List[CommBlock],
305
+ node_indices: Dict[fx.Node, int],
306
+ ) -> None:
307
+ """Scatter the result of the fused communication node to the original users -- splitting the output and reshape each subitem."""
308
+ last_wait_node_idx = 0
309
+ for node in gm.graph.nodes:
310
+ if node == fused_comm_block.comm_node:
311
+ break
312
+ last_wait_node_idx = max(
313
+ node_indices.get(node, last_wait_node_idx), last_wait_node_idx
314
+ )
315
+
316
+ fused_comm_node = fused_comm_block.comm_node
317
+ fused_wait_node = fused_comm_block.wait_nodes[0]
318
+
319
+ with gm.graph.inserting_after(fused_wait_node):
320
+ split_node = gm.graph.call_function(
321
+ aten.split,
322
+ (
323
+ fused_wait_node,
324
+ # TODO(@fegin): support symbolic shapes
325
+ [int(cast(torch.Size, cb.shape).numel()) for cb in comm_blocks],
326
+ ),
327
+ )
328
+
329
+ # Scatter the split result.
330
+ need_sort_nodes = []
331
+ last_split_reshape_node = split_node
332
+ with gm.graph.inserting_after(split_node):
333
+ for idx, comm_block in enumerate(comm_blocks):
334
+ # Some users of the original allreduce and wait are scheduled
335
+ # before the fused allreduce. We must move these users to a
336
+ # correct topological sort order -- right after the last fused
337
+ # allreduce result, the `last_split_reshape_node` variable.
338
+ orig_wait = comm_block.wait_nodes[0]
339
+ nodes = collections.deque(list(orig_wait.users))
340
+ while nodes:
341
+ user_node = nodes.popleft()
342
+ if not isinstance(user_node, fx.Node):
343
+ continue
344
+ if node_indices[user_node] < last_wait_node_idx:
345
+ need_sort_nodes.append(user_node)
346
+ nodes.extend(list(user_node.users))
347
+
348
+ split_idx_node = gm.graph.call_function(operator.getitem, (split_node, idx))
349
+ with gm.graph.inserting_after(split_idx_node):
350
+ wait_output_node = gm.graph.call_function(
351
+ aten.reshape, (split_idx_node, comm_block.shape)
352
+ )
353
+ gm.graph.node_replace_all_uses_with(orig_wait, wait_output_node)
354
+
355
+ if last_split_reshape_node == split_node:
356
+ last_split_reshape_node = wait_output_node # type: ignore[possibly-undefined]
357
+
358
+ need_sort_nodes = sorted(need_sort_nodes, key=lambda node: node_indices[node])
359
+ gm.graph.move_after(need_sort_nodes, last_split_reshape_node)
360
+
361
+ gm.graph.eliminate_dead_code()
362
+
363
+
364
+ def _fuse_with_cat(
365
+ gm: IterGraphModule,
366
+ comm_blocks: List[CommBlock],
367
+ node_indices: Dict[fx.Node, int],
368
+ ) -> CommBlock:
369
+ """Fuse the CommBlocks using concat given a list of CommBlock (only allreduce)."""
370
+ # Find the last input node.
371
+ last_input_node = comm_blocks[0].inputs[0]
372
+ last_input_index = -1
373
+ all_input_nodes = []
374
+ for comm_block in comm_blocks:
375
+ input_node = comm_block.inputs[0]
376
+ # If the input node is a clone, this is CommTensor based implementation.
377
+ if input_node.name.startswith("clone"):
378
+ input_node = cast(fx.Node, input_node.args[0])
379
+ all_input_nodes.append(input_node)
380
+ index = node_indices[input_node]
381
+ if index >= last_input_index:
382
+ assert index != last_input_index
383
+ last_input_node = input_node
384
+ last_input_index = index
385
+
386
+ # Flatten all the inputs right after the last input is ready.
387
+ with gm.graph.inserting_after(last_input_node):
388
+ cat_inputs = []
389
+ for input_node in all_input_nodes:
390
+ cat_inputs.append(
391
+ _call_function(
392
+ gm, fake_tensor_mode, None, aten.flatten.using_ints, input_node
393
+ )
394
+ )
395
+
396
+ with gm.graph.inserting_after(cat_inputs[0]):
397
+ cat_node = _call_function(gm, fake_tensor_mode, None, aten.cat, cat_inputs)
398
+
399
+ # Create a new Comm node.
400
+ last_comm = comm_blocks[-1]
401
+ last_comm_node = last_comm.comm_node
402
+ last_wait_node = last_comm.wait_nodes[0]
403
+ with gm.graph.inserting_after(cat_node):
404
+ flatten_args, spec = tree_flatten((last_comm_node.args, last_comm_node.kwargs))
405
+ flatten_args[0] = cat_node
406
+ args, kwargs = tree_unflatten(flatten_args, spec)
407
+ fused_comm_node = _call_function(
408
+ gm,
409
+ fake_tensor_mode,
410
+ cat_node.meta["val"],
411
+ last_comm_node.target,
412
+ *args,
413
+ **kwargs,
414
+ )
415
+
416
+ # Create a new Wait node.
417
+ with gm.graph.inserting_after(fused_comm_node):
418
+ flatten_args, spec = tree_flatten((last_wait_node.args, last_wait_node.kwargs))
419
+ flatten_args[0] = fused_comm_node
420
+ args, kwargs = tree_unflatten(flatten_args, spec)
421
+ fused_wait_node = _call_function(
422
+ gm,
423
+ fake_tensor_mode,
424
+ cat_node.meta["val"],
425
+ last_wait_node.target,
426
+ *args,
427
+ **kwargs,
428
+ )
429
+
430
+ # Move the fused_comm_node and its args to right after the source node
431
+ nodes_to_move = cat_inputs + [cat_node, fused_comm_node, fused_wait_node]
432
+ gm.graph.move_after(nodes_to_move, last_input_node)
433
+
434
+ tensor_meta = cat_node.meta.get("tensor_meta")
435
+ fused_comm_block = CommBlock(
436
+ shape=tensor_meta.shape, # type: ignore[union-attr]
437
+ node_list=[fused_comm_node, fused_wait_node],
438
+ wait_nodes=[fused_wait_node],
439
+ comm_node=fused_comm_node,
440
+ inputs=[cat_node],
441
+ outputs={fused_wait_node},
442
+ )
443
+
444
+ _scatter_wait_result(gm, fused_comm_block, comm_blocks, node_indices)
445
+
446
+ return fused_comm_block
447
+
448
+
449
+ def _expedite_comm_ops(gm: IterGraphModule, comm_blocks: List[CommBlock]) -> None:
450
+ node_indices = {node: i for i, node in enumerate(gm.graph.nodes)}
451
+ for comm_block in comm_blocks:
452
+ last_input = comm_block.comm_node
453
+ last_input_idx = -1
454
+ for input in comm_block.inputs:
455
+ input_idx = node_indices[input]
456
+ if input_idx > last_input_idx:
457
+ last_input = input
458
+ last_input_idx = input_idx
459
+ gm.graph.node_append(last_input, comm_block.comm_node)
460
+
461
+
462
+ @graph_optimization_pass(
463
+ prerequisites=[],
464
+ apply_after=[],
465
+ )
466
+ def comm_fusion_with_concat(
467
+ gm: IterGraphModule,
468
+ bucket_size_mb: int,
469
+ ) -> None:
470
+ """Run fuse communication with concat.
471
+
472
+ This implementation uses concat to concat the bucketed gradients.
473
+ """
474
+ comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce"))
475
+ # First ensure the allreduce are scheduled immediately right after the gradients.
476
+ _expedite_comm_ops(gm, comm_blocks)
477
+ # Get the comm_blocks based on the new order.
478
+ comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce"))
479
+ node_indices = {node: i for i, node in enumerate(gm.graph.nodes)}
480
+
481
+ bucket_size = 1 * 1024**2
482
+ bucket_cap_size = bucket_size_mb * 1024**2
483
+ begin = end = curr_size = 0
484
+ while end < len(comm_blocks):
485
+ # TODO: determine the dtype
486
+ curr_size += cast(torch.Size, comm_blocks[end].shape).numel() * 4
487
+ end += 1
488
+ if curr_size < bucket_size:
489
+ continue
490
+ _fuse_with_cat(gm, comm_blocks[begin:end], node_indices)
491
+ bucket_size = bucket_cap_size
492
+ begin = end
493
+ curr_size = 0
494
+ else:
495
+ if begin < len(comm_blocks):
496
+ _fuse_with_cat(gm, comm_blocks[begin:end], node_indices)
497
+
498
+
499
+ @graph_optimization_pass(
500
+ prerequisites=[comm_fusion_with_concat],
501
+ apply_after=[],
502
+ )
503
+ def schedule_comm_wait(gm: IterGraphModule) -> None:
504
+ """Delay the execution of wait tensors of allreduce until its first user."""
505
+ comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce"))
506
+
507
+ # Find all the end users.
508
+ allreduce_users: Set[fx.Node] = set()
509
+ for allreduce in comm_blocks:
510
+ for output in allreduce.outputs:
511
+ allreduce_users.update(output.users)
512
+
513
+ node_indices = {node: i for i, node in enumerate(gm.graph.nodes)}
514
+ for allreduce in comm_blocks:
515
+ # Find the earliest users.
516
+ assert (
517
+ len(allreduce.outputs) >= 1
518
+ ), f"Found a allreduce that has zero outputs/users -- {allreduce}."
519
+ # Initialize the target_node to be the first user of the first output.
520
+ target_node = next(iter(next(iter(allreduce.outputs)).users))
521
+ target_node_index = 2**31
522
+ for user in (user for output in allreduce.outputs for user in output.users):
523
+ index = node_indices[user]
524
+ if index < target_node_index:
525
+ target_node = user
526
+ target_node_index = index
527
+
528
+ # Move wait nodes and all the subsequent output nodes before the
529
+ # earliest user.
530
+ wait_idx = -1
531
+ for wait_idx, node in enumerate(allreduce.node_list):
532
+ if node == allreduce.wait_nodes[0]:
533
+ break
534
+ assert wait_idx >= 0
535
+ gm.graph.move_before(allreduce.node_list[wait_idx:], target_node)
536
+
537
+
538
+ @graph_optimization_pass(
539
+ prerequisites=[],
540
+ apply_after=[],
541
+ )
542
+ def remove_copy_from_optimizer(gm: IterGraphModule) -> None:
543
+ """Erase the orphant copy_ that generated when tracing optimizer.
544
+
545
+ Two reasons why we could not simply use the DCE of fx.Graph.
546
+ 1. fx.Graph treats copy_ as a side-effect node and does not erase it.
547
+ 2. Users may want to preserve some orphan `copy_` that is not from the
548
+ optimizer.
549
+ If the second reason does not hold, this pass can be rewritten as using
550
+ DCE from fx.Graph (with the overwrite to the side-effect node list).
551
+ """
552
+ MAX_COPY_DISTANCE = 5
553
+ remove_candidates: Set[fx.Node] = set()
554
+ for node in reversed(gm.graph.nodes):
555
+ if node.users:
556
+ continue
557
+ if node.op != OP.CALL_FUNCTION or node.target != aten.copy_.default:
558
+ continue
559
+
560
+ copy_ancestors: Set[fx.Node] = set()
561
+ nodes = collections.deque([node, None])
562
+ distance = 0
563
+ should_remove = False
564
+ while nodes and distance < MAX_COPY_DISTANCE:
565
+ visiting = nodes.popleft()
566
+ if visiting is None:
567
+ distance += 1
568
+ if nodes:
569
+ nodes.append(None)
570
+ continue
571
+ copy_ancestors.add(visiting)
572
+ if visiting.op == OP.CALL_FUNCTION and str(visiting.target).startswith(
573
+ ("aten._foreach_", "aten._fused_")
574
+ ):
575
+ should_remove = True
576
+ parents = pytree.arg_tree_leaves(*visiting.args, **visiting.kwargs)
577
+ for parent in parents:
578
+ if isinstance(parent, fx.Node):
579
+ nodes.append(parent)
580
+ if should_remove:
581
+ # We add all ancestors to the list and it is okay as not all of
582
+ # them will be erased -- only those nodes with zero users will be
583
+ # erased.
584
+ remove_candidates.update(copy_ancestors)
585
+
586
+ for node in reversed(gm.graph.nodes):
587
+ if node.users:
588
+ continue
589
+ if node not in remove_candidates:
590
+ continue
591
+ gm.graph.erase_node(node)
592
+
593
+
594
+ # The args list of fused_adam function. We don't care about kwargs.
595
+ AdamArgs = collections.namedtuple(
596
+ "AdamArgs",
597
+ ["params", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps"],
598
+ )
599
+
600
+
601
+ # TODO(fegin): Have a template class for all Block class.
602
+ @dataclass(unsafe_hash=True)
603
+ class FusedAdamBlock:
604
+ optim_node: fx.Node
605
+ generate_output: bool
606
+ # The output list of the copy nodes. The order follows the argument order.
607
+ param_outputs: List[fx.Node] = field(default_factory=list)
608
+ grad_outputs: List[fx.Node] = field(default_factory=list)
609
+ exp_avgs_outputs: List[fx.Node] = field(default_factory=list)
610
+ exp_avg_sqs_outputs: List[fx.Node] = field(default_factory=list)
611
+ # TODO(fegin): populate/generate the max_exp_avg_sqs if exists
612
+ max_exp_avg_sqs: List[fx.Node] = field(default_factory=list)
613
+
614
+ def generate_outputs(self):
615
+ # Iterate all the args and generate the corresponding output lists.
616
+ # Assuming the corrsesponding output nodes are not created yet.
617
+ def _generate_outputs(arg_idx, output_list):
618
+ graph = self.optim_node.graph
619
+ with graph.inserting_after(self.optim_node):
620
+ optim_getitem = graph.call_function(
621
+ operator.getitem, (self.optim_node, arg_idx)
622
+ )
623
+ for i, arg in enumerate(self.optim_node.args[arg_idx]):
624
+ with graph.inserting_after(optim_getitem):
625
+ updated_arg = graph.call_function(
626
+ operator.getitem, (optim_getitem, i)
627
+ )
628
+ with graph.inserting_after(updated_arg):
629
+ output_copy = graph.call_function(aten.copy_, (arg, updated_arg))
630
+ output_list.append(output_copy)
631
+
632
+ _generate_outputs(0, self.param_outputs)
633
+ # Do not generate gradient out list as it is not used.
634
+ _generate_outputs(2, self.exp_avgs_outputs)
635
+ _generate_outputs(3, self.exp_avg_sqs_outputs)
636
+
637
+ def populate_outputs(self):
638
+ # Populate the existing output lists from the graph.
639
+ def _populate_outputs(args_idx, output_list):
640
+ optim_getitem = self.optim_node
641
+ for user in self.optim_node.users:
642
+ assert (
643
+ user.target == operator.getitem
644
+ ), f"The user of {self.optim_node} is not getitem."
645
+ if user.args[1] == args_idx:
646
+ optim_getitem = user
647
+ break
648
+ assert (
649
+ optim_getitem != self.optim_node
650
+ ), f"Cannot find the getitem node for {self.optim_node}"
651
+ output_list.extend(
652
+ [self.optim_node] * len(cast(List[fx.Node], self.optim_node.args[0]))
653
+ )
654
+ for updated_arg in optim_getitem.users:
655
+ assert (
656
+ updated_arg.target == operator.getitem
657
+ ), f"Unexpected node target {updated_arg.target}."
658
+ idx = updated_arg.args[1]
659
+ output_copy = next(iter(updated_arg.users))
660
+ assert str(output_copy.target).startswith(
661
+ "aten.copy_"
662
+ ), f"Unexpected node target {output_copy.target}."
663
+ output_list[idx] = output_copy
664
+ for i, output in enumerate(output_list):
665
+ assert output != self.optim_node, f"{i}th output is not replaced."
666
+
667
+ assert output_list, f"The output for {self.optim_node} is empty."
668
+
669
+ _populate_outputs(0, self.param_outputs)
670
+ _populate_outputs(2, self.exp_avgs_outputs)
671
+ _populate_outputs(3, self.exp_avg_sqs_outputs)
672
+
673
+ def __post_init__(self):
674
+ if self.param_outputs:
675
+ return
676
+ if self.generate_output:
677
+ self.generate_outputs()
678
+ else:
679
+ self.populate_outputs()
680
+
681
+
682
+ @dataclass(unsafe_hash=True)
683
+ class ForeachAddBlock:
684
+ add_node: fx.Node
685
+ generate_output: bool
686
+ # The output list of the copy nodes. The order follows the argument order.
687
+ outputs: List[fx.Node] = field(default_factory=list)
688
+
689
+ def generate_outputs(self):
690
+ # Iterate all the args and generate the corresponding output lists
691
+ # Assuming the corrsesponding output nodes are not created yet.
692
+ graph = self.add_node.graph
693
+ for i, arg in enumerate(cast(Tuple[Any, ...], self.add_node.args[0])):
694
+ with graph.inserting_after(self.add_node):
695
+ updated_arg = graph.call_function(operator.getitem, (self.add_node, i))
696
+ with graph.inserting_after(updated_arg):
697
+ output_copy = graph.call_function(aten.copy_, (arg, updated_arg))
698
+ self.outputs.append(output_copy)
699
+ assert self.outputs, f"The output for {self.add_node} is empty."
700
+
701
+ def populate_outputs(self):
702
+ # Populate the existing output lists from the graph.
703
+ self.outputs = [
704
+ self.add_node for _ in cast(Tuple[Any, ...], self.add_node.args[0])
705
+ ]
706
+ for updated_arg in self.add_node.users:
707
+ assert (
708
+ updated_arg.target == operator.getitem
709
+ ), f"Unexpected node target {updated_arg.target}"
710
+ idx = cast(int, updated_arg.args[1])
711
+ output_copy = next(iter(updated_arg.users))
712
+ assert str(output_copy.target).startswith(
713
+ "aten.copy_"
714
+ ), f"The execpted output node is different, {str(output_copy.target)}"
715
+ self.outputs[idx] = output_copy
716
+ for i, output in enumerate(self.outputs):
717
+ assert output != self.add_node, f"{i}th output is not replaced."
718
+
719
+ def __post_init__(self):
720
+ if self.outputs:
721
+ return
722
+
723
+ if self.generate_output:
724
+ self.generate_outputs()
725
+ else:
726
+ self.populate_outputs()
727
+
728
+
729
+ @dataclass(unsafe_hash=True)
730
+ class FusedOptimizerBlock:
731
+ step: ForeachAddBlock
732
+ optim: FusedAdamBlock
733
+
734
+
735
+ def get_fused_optimizer_block(optim_node: fx.Node) -> FusedOptimizerBlock:
736
+ """Given a fused optimizer node and return the FusedOptimizerBlock."""
737
+ MAX_STEP_DISTANCE = 5
738
+ # Find the step (foreach_add)
739
+ nodes = collections.deque([optim_node, None])
740
+ step_node = optim_node
741
+ distance = 0
742
+ while nodes and distance < MAX_STEP_DISTANCE:
743
+ node = nodes.popleft()
744
+ if node is None:
745
+ distance += 1
746
+ if nodes:
747
+ nodes.append(None)
748
+ continue
749
+ elif node.op == OP.CALL_FUNCTION and str(node.target).startswith(
750
+ "aten._foreach_add"
751
+ ):
752
+ step_node = node
753
+ break
754
+ else:
755
+ nodes.extend(
756
+ a
757
+ for a in pytree.arg_tree_leaves(*node.args, **node.kwargs)
758
+ if isinstance(a, fx.Node)
759
+ )
760
+ if step_node == optim_node:
761
+ raise RuntimeError(
762
+ "Cannot find step node (foreach_add) for the optimizer node "
763
+ f"{optim_node} with {MAX_STEP_DISTANCE} BFS distance. "
764
+ "The API design does not match the tracing graph."
765
+ )
766
+
767
+ step = ForeachAddBlock(step_node, generate_output=False)
768
+ optim = FusedAdamBlock(optim_node, generate_output=False)
769
+ return FusedOptimizerBlock(step, optim)
770
+
771
+
772
+ def get_all_fused_optimizer_blocks(
773
+ gm: IterGraphModule, optim_ops: Union[Tuple[str, ...], str]
774
+ ) -> List[FusedOptimizerBlock]:
775
+ """Find all the FusedOptimizerBlock that the optimizer operators are in `optim_ops`."""
776
+ return [
777
+ get_fused_optimizer_block(node)
778
+ for node in gm.graph.nodes
779
+ if node.name.startswith(optim_ops)
780
+ ]
781
+
782
+
783
+ def _split_fused_adam(
784
+ gm: IterGraphModule,
785
+ orig_optim_block: FusedOptimizerBlock,
786
+ split_gradients: Set[fx.Node],
787
+ ) -> Tuple[FusedOptimizerBlock, FusedOptimizerBlock]:
788
+ """Split the `orig_optim_block` into two FusedOptimizerBlock.
789
+
790
+ The first one will be the optimizer that optimize `split_gradients`. The second one is
791
+ used to optimize the remaining gradients.
792
+ An assert will be raised if one of the optimizer optimize zero gradients.
793
+ """
794
+ orig_optim_args = AdamArgs(*orig_optim_block.optim.optim_node.args)
795
+ optim_args = (AdamArgs([], [], [], [], [], []), AdamArgs([], [], [], [], [], []))
796
+ # The only hint we can use to split the optimizer is the order/indices.
797
+ orig_optim_indices: Tuple[List[int], List[int]] = ([], [])
798
+ orig_step_indices: Tuple[List[int], List[int]] = ([], [])
799
+
800
+ for idx, gradient in enumerate(orig_optim_args.grads):
801
+ group_idx = 0 if gradient in split_gradients else 1
802
+ orig_optim_indices[group_idx].append(idx)
803
+ # Get the argument for idx-th gradient from orig_optim_args
804
+ for orig_arg, optim_arg in zip(orig_optim_args, optim_args[group_idx]):
805
+ # Only add the argument to the list if the original argument list
806
+ # is not empty. If the original argument list is empty, the new
807
+ # one must be an empty list as well.
808
+ if orig_arg:
809
+ optim_arg.append(orig_arg[idx])
810
+
811
+ # If argument order of step is the same as optimizer, nothing has to be
812
+ # done. However, it is risky to rely on this assumption so we populate
813
+ # the orig_step_indices.
814
+ orig_step_output = optim_args[group_idx].state_steps[-1]
815
+ assert str(orig_step_output.target).startswith(
816
+ "aten.copy_"
817
+ ), f"The copy output is {orig_step_output.target}, expect aten.copy_"
818
+ orig_step_getitem = orig_step_output.args[1]
819
+ assert "getitem" in str(
820
+ orig_step_getitem.target
821
+ ), f"The copy getitem is {orig_step_getitem.target}, expect operator.getitem"
822
+ orig_step_idx = orig_step_getitem.args[1]
823
+ orig_step_indices[group_idx].append(orig_step_idx)
824
+
825
+ if not all(l for l in (orig_step_indices + orig_optim_indices)):
826
+ raise ValueError("At least one split optimizer does not have input.")
827
+
828
+ output = get_output(gm.graph)
829
+ results: List[FusedOptimizerBlock] = []
830
+ flatten_output_args, spec = tree_flatten((output.args, output.kwargs))
831
+ flatten_output_args_indices: DefaultDict[
832
+ fx.Node, Set[int]
833
+ ] = collections.defaultdict(set)
834
+ for idx, output_arg in enumerate(flatten_output_args):
835
+ if isinstance(output_arg, fx.Node):
836
+ flatten_output_args_indices[output_arg].add(idx)
837
+
838
+ def replace_flatten_output_args(orig_node: fx.Node, new_node: fx.Node):
839
+ for idx in flatten_output_args_indices[orig_node]:
840
+ flatten_output_args[idx] = new_node
841
+
842
+ # Create the new step and optim nodes and blocks.
843
+ for group_idx in range(2):
844
+ step_args: List[fx.Node] = []
845
+ orig_step_outputs: List[fx.Node] = []
846
+ # We have to create the new step node and block first because it is used
847
+ # for the new optim node as the input.
848
+ with gm.graph.inserting_after(orig_optim_block.optim.optim_node):
849
+ for idx in orig_step_indices[group_idx]:
850
+ step_args.append(
851
+ cast(Tuple[fx.Node, ...], orig_optim_block.step.add_node.args[0])[
852
+ idx
853
+ ]
854
+ )
855
+ orig_step_outputs.append(orig_optim_block.step.outputs[idx])
856
+ step = gm.graph.call_function(
857
+ aten._foreach_add.Scalar,
858
+ (step_args, 1),
859
+ )
860
+ step_block = ForeachAddBlock(step, generate_output=True)
861
+ for i, step_output in enumerate(step_block.outputs):
862
+ # Replace the original step output in the graph output node with
863
+ # the new one.
864
+ orig_step_output = orig_step_outputs[i]
865
+ replace_flatten_output_args(orig_step_output, step_output)
866
+ # Also need to replace the step output used for the new optimizer.
867
+ assert optim_args[group_idx].state_steps[i] == orig_step_output, (
868
+ f"The expected step output node mismatched, {orig_step_output} "
869
+ f"{optim_args[group_idx].state_steps[i]}"
870
+ )
871
+ optim_args[group_idx].state_steps[i] = step_output
872
+
873
+ # Insert the optimizer node after the first step output because its
874
+ # topo sort order is the last.
875
+ with gm.graph.inserting_after(step_block.outputs[0]):
876
+ optim = gm.graph.call_function(
877
+ aten._fused_adam.default,
878
+ optim_args[group_idx],
879
+ orig_optim_block.optim.optim_node.kwargs,
880
+ )
881
+ optim_block = FusedAdamBlock(optim, generate_output=True)
882
+ for curr_idx, orig_idx in enumerate(orig_optim_indices[group_idx]):
883
+ list_names = ("param_outputs", "exp_avgs_outputs", "exp_avg_sqs_outputs")
884
+ for name in list_names:
885
+ orig_list = getattr(orig_optim_block.optim, name)
886
+ curr_list = getattr(optim_block, name)
887
+ replace_flatten_output_args(orig_list[orig_idx], curr_list[curr_idx])
888
+
889
+ results.append(FusedOptimizerBlock(step_block, optim_block))
890
+
891
+ # Optimizer is used as the output of the train_step. Therefore, we have to
892
+ # update the output node of the graph.
893
+ output_args, output_kwargs = tree_unflatten(flatten_output_args, spec)
894
+ gm.graph.node_set_args(output, output_args)
895
+ gm.graph.node_set_kwargs(output, output_kwargs)
896
+ # Remove the original copy_ nodes as they won't be DCE.
897
+ for copy_output in itertools.chain(
898
+ orig_optim_block.optim.param_outputs,
899
+ orig_optim_block.optim.exp_avgs_outputs,
900
+ orig_optim_block.optim.exp_avg_sqs_outputs,
901
+ ):
902
+ gm.graph.erase_node(copy_output)
903
+ # Call DCE once to get rid of the old optimizer. By doing so, we will be
904
+ # able to erase the copy_ nodes of step later.
905
+ gm.graph.eliminate_dead_code()
906
+ for copy_output in orig_optim_block.step.outputs:
907
+ gm.graph.erase_node(copy_output)
908
+ # This is not required but calling this for consistency.
909
+ gm.graph.eliminate_dead_code()
910
+
911
+ return results[0], results[1]
912
+
913
+
914
+ def split_fused_optimizer(
915
+ gm: IterGraphModule,
916
+ optim_block: FusedOptimizerBlock,
917
+ split_gradients: Set[fx.Node],
918
+ ) -> Tuple[FusedOptimizerBlock, FusedOptimizerBlock]:
919
+ if not split_gradients:
920
+ raise ValueError("The given split_gradients is empty.")
921
+ if str(optim_block.optim.optim_node.target).startswith("aten._fused_adam"):
922
+ return _split_fused_adam(gm, optim_block, split_gradients)
923
+ else:
924
+ raise NotImplementedError("Only fused_adam is supported now")
925
+
926
+
927
+ # TODO(fegin): The API only support fused adam now. Should extend it to support
928
+ # foreach as well.
929
+ @graph_optimization_pass(
930
+ prerequisites=[remove_copy_from_optimizer],
931
+ apply_after=[schedule_comm_wait],
932
+ )
933
+ def iter_move_grads_and_optimizers(
934
+ gm: IterGraphModule,
935
+ target_comm_node: str,
936
+ target_dest_node: str,
937
+ ) -> None:
938
+ """Extract a comm block and split out a new optimizer and step for it.
939
+
940
+ This subgraph is then moved to the forward graph.
941
+ """
942
+ for comm_block in get_all_comm_blocks(gm, "all_reduce"):
943
+ if comm_block.comm_node.name == target_comm_node:
944
+ break
945
+ else:
946
+ raise ValueError(f"Cannot find {target_comm_node}")
947
+
948
+ optim_blocks = get_all_fused_optimizer_blocks(gm, "_fused_adam")
949
+ for optim_block in optim_blocks:
950
+ optim_args = AdamArgs(*optim_block.optim.optim_node.args)
951
+ one_output = next(iter(comm_block.outputs))
952
+ if one_output in optim_args.grads:
953
+ break
954
+ else:
955
+ raise ValueError(f"{target_comm_node} is not used by any fused optimizer.")
956
+
957
+ move_optim, _ = split_fused_optimizer(gm, optim_block, comm_block.outputs)
958
+
959
+ move_nodes = find_all_descendants(
960
+ gm, [comm_block.comm_node, move_optim.step.add_node]
961
+ )
962
+
963
+ stop_node = find_node(gm.graph, lambda n: n.name == target_dest_node)[0]
964
+
965
+ gm.graph.move_to_next_iter_before(move_nodes, stop_node)
966
+
967
+
968
+ def find_all_descendants(
969
+ gm: IterGraphModule,
970
+ parent_nodes: List[fx.Node],
971
+ ) -> List[fx.Node]:
972
+ """Identify the list of nodes to move during FX graph transformation."""
973
+ assert len(parent_nodes) > 0, "No parent nodes are given."
974
+
975
+ output = get_output(gm.graph)
976
+ dq_parent_nodes = collections.deque(parent_nodes)
977
+ move_node_set = set()
978
+ while dq_parent_nodes:
979
+ node = dq_parent_nodes.popleft()
980
+ move_node_set.add(node)
981
+ dq_parent_nodes += [
982
+ u for u in node.users if isinstance(u, fx.Node) and u != output
983
+ ]
984
+ move_nodes = [node for node in gm.graph.nodes if node in move_node_set]
985
+
986
+ return move_nodes
venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_utils.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import tempfile
4
+ from enum import Enum
5
+ from typing import Callable, cast, Dict, Iterable, List, Set
6
+
7
+ import torch.fx as fx
8
+ from torch.fx.passes.shape_prop import TensorMetadata
9
+ from torch.utils import _pytree as pytree
10
+ from torch.utils._pytree import tree_flatten, tree_unflatten
11
+
12
+
13
+ logger: logging.Logger = logging.getLogger("graph_utils")
14
+
15
+
16
+ class OP(str, Enum):
17
+ CALL_FUNCTION = "call_function"
18
+ CALL_MODULE = "call_module"
19
+ CALL_METHOD = "call_method"
20
+ GET_ATTR = "get_attr"
21
+ OUTPUT = "output"
22
+ PLACEHOLDER = "placeholder"
23
+
24
+
25
+ class CommType(str, Enum):
26
+ ALLREDUCE = "allreduce_"
27
+ ALLGATHER = "allgather_"
28
+ BROADCAST = "broadcast_"
29
+ REDUCESCATTER = "reduce_scatter_"
30
+ SCATTER = "scatter_"
31
+
32
+
33
+ def get_node_tensor_metadata(node: fx.Node, is_required: bool = True) -> TensorMetadata:
34
+ metadata = node.meta.get("tensor_meta", None)
35
+ if is_required and metadata is None:
36
+ raise RuntimeError(
37
+ f"Callsite expects that ``tensor_meta`` exists in ``{node.name}``, "
38
+ f"but got None instead. Node: {node.op} {node.name} {node.target}"
39
+ )
40
+ return metadata
41
+
42
+
43
+ def get_output(graph: fx.Graph) -> fx.Node:
44
+ """Take a graphmodule and return the graph output node.
45
+
46
+ We traverse in reverse to expedite it, with the idea that last node should be output
47
+ """
48
+ for node in reversed(graph.nodes):
49
+ if node.op == OP.OUTPUT:
50
+ return node
51
+ raise RuntimeError(f"Cannot find the output node in {graph}")
52
+
53
+
54
+ def find_node(
55
+ graph: fx.Graph, predicate: Callable, reverse_order: bool = False
56
+ ) -> List[fx.Node]:
57
+ """Take a predicate and return all the nodes in the `graph` where the predicate holds."""
58
+ nodes = cast(Iterable[fx.Node], graph.nodes)
59
+ if reverse_order:
60
+ nodes = cast(Iterable[fx.Node], iter(reversed(nodes))) # type: ignore[call-overload]
61
+ return [node for node in nodes if predicate(node)]
62
+
63
+
64
+ def is_leaf_subgraph(graph: fx.Graph, subgraph: List[fx.Node]) -> bool:
65
+ """Ensure nodes in ``subgraph`` satisfy one of the following rules.
66
+
67
+ 1. The user of the node is in ``subgraph``.
68
+ 2. The user of the node is output.
69
+ 3. There are no users -- the node is a side-effect node.
70
+ """
71
+ all_nodes: Set[fx.Node] = set(subgraph)
72
+ output = get_output(graph)
73
+ for node in subgraph:
74
+ for user in node.users:
75
+ if not isinstance(user, fx.Node):
76
+ continue
77
+ if user not in all_nodes and user != output:
78
+ return False
79
+ return True
80
+
81
+
82
+ def clone_subgraph(
83
+ graph: fx.Graph, subgraph: List[fx.Node], target: fx.Node
84
+ ) -> List[fx.Node]:
85
+ """Clone the given subgraph and insert it before ``target``.
86
+
87
+ This API currently does not support inserting after ``target``.
88
+ """
89
+ all_nodes = set(subgraph)
90
+ mapping: Dict[fx.Node, fx.Node] = dict()
91
+ cloned_subgraph = []
92
+ with graph.inserting_before(target):
93
+ for node in subgraph:
94
+ cloned_node = graph.call_function(
95
+ node.target, node.args, node.kwargs, node.type
96
+ )
97
+ # TODO: there are many flatten/unflatten in IterGraph that
98
+ # can be simplified with tree_map. Will simplify this in
99
+ # a follow-up PR.
100
+ original_input = pytree.arg_tree_leaves(*node.args, **node.kwargs)
101
+ cloned_input, spec = tree_flatten((cloned_node.args, cloned_node.kwargs))
102
+ mapped_cloned_input = []
103
+ for original_input_node, cloned_input_node in zip(
104
+ original_input, cloned_input
105
+ ):
106
+ if (
107
+ isinstance(original_input_node, fx.Node)
108
+ and original_input_node in all_nodes
109
+ ):
110
+ assert original_input_node in mapping
111
+ mapped_cloned_input.append(mapping[original_input_node])
112
+ else:
113
+ mapped_cloned_input.append(cloned_input_node)
114
+ cloned_node.args, cloned_node.kwargs = tree_unflatten(
115
+ mapped_cloned_input, spec
116
+ )
117
+ mapping[node] = cloned_node
118
+ cloned_subgraph.append(cloned_node)
119
+
120
+ return cloned_subgraph
121
+
122
+
123
+ def rebuild_graph(gm: fx.GraphModule, remove_dead_code: bool = True) -> None:
124
+ """Run the required steps to ensure production-ready graph.
125
+
126
+ Note - per the fx docs, elimination of dead code is not very precise.
127
+ Hence, the flag to make this step optional.
128
+ """
129
+ gm.graph.lint()
130
+ if remove_dead_code:
131
+ gm.graph.eliminate_dead_code()
132
+ gm.recompile()
133
+
134
+
135
+ def dump_graphs_to_files(graphs: Dict[str, fx.GraphModule], folder: str = "") -> str:
136
+ if not folder:
137
+ folder = tempfile.mkdtemp()
138
+
139
+ for prefix, gm in graphs.items():
140
+ with open(os.path.join(folder, f"{prefix}.graph"), "w") as fp:
141
+ fp.write(str(gm))
142
+
143
+ logger.warning("Dump graphs to %s", folder)
144
+
145
+ return folder
venv/lib/python3.10/site-packages/torch/distributed/_spmd/iter_graph_module.py ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import inspect
3
+ import logging
4
+ from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type
5
+
6
+ import torch.nn as nn
7
+ from torch import fx
8
+ from torch.distributed._spmd.graph_utils import (
9
+ clone_subgraph,
10
+ get_output,
11
+ is_leaf_subgraph,
12
+ )
13
+ from torch.distributed._spmd.partial_lower import partial_lower
14
+ from torch.fx.graph import _PyTreeCodeGen, PythonCode
15
+ from torch.fx.node import Argument
16
+ from torch.profiler import record_function
17
+ from torch.utils import _pytree as pytree
18
+ from torch.utils._pytree import tree_flatten, tree_map, tree_map_only, tree_unflatten
19
+
20
+
21
+ logger: logging.Logger = logging.getLogger("IterGraphModule")
22
+
23
+
24
+ class IterGraph(fx.Graph):
25
+ """``IterGraph`` is used to perform cross-iteration optimization.
26
+
27
+ ``IterGraph`` keeps track of the 3 graphs, self (the original graph), setup graph, and
28
+ cleanup graph. The 3 graphs should be identical copies of a ``fx.Graph``.
29
+
30
+ IterGraph subclass fx.Graph to override the necessary APIs that will be used
31
+ when constructing a optimization, e.g., communication fusion. IterGraph also
32
+ provides APIs that originally belong to fx.Node and all these APIs will have
33
+ ``node_`` prefix. For example, ``IterGraph.node_prepend`` is the equivalence
34
+ of ``fx.Node.prepend``. Note that all the optimizations must be constructed
35
+ using these APIs.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ orig_graph: fx.Graph,
41
+ setup_graph: fx.Graph,
42
+ cleanup_graph: fx.Graph,
43
+ owning_module: Optional[fx.GraphModule] = None,
44
+ tracer_cls: Optional[Type["fx.Tracer"]] = None,
45
+ tracer_extras: Optional[Dict[str, Any]] = None,
46
+ ):
47
+ super().__init__(owning_module, tracer_cls, tracer_extras)
48
+
49
+ output_vals = self.graph_copy(orig_graph, {}, return_output_node=True)
50
+ # TODO: if we do ``deepcopy(_codegen)`` and the input argument contains
51
+ # a dictionary with the form of Dict[torch.Tensor, Any], the
52
+ # torch.fx._pytree.treen_flatten_spec will not be able to flatten the
53
+ # dict -- the torch.Tensor will be duplicated because the _input_spec
54
+ # will save the ``keys`` of a dictionary (the values are not saved).
55
+ self._codegen = copy.deepcopy(orig_graph._codegen)
56
+ assert isinstance(output_vals, tuple)
57
+ output_val, old_output_val = output_vals
58
+ super().output(output_val, type_expr=getattr(old_output_val, "type", None))
59
+
60
+ self.setup_graph = setup_graph
61
+ self.cleanup_graph = cleanup_graph
62
+ self._all_graphs: Tuple[fx.Graph, ...] = (
63
+ self.setup_graph,
64
+ self.cleanup_graph,
65
+ cast(fx.Graph, super()),
66
+ )
67
+
68
+ self._setup_mapping: Dict[fx.Node, fx.Node] = {}
69
+ self._cleanup_mapping: Dict[fx.Node, fx.Node] = {}
70
+ self._freeze_cross_iter_movement = False
71
+ self._cross_iter_block_count = 0
72
+
73
+ for node, setup_node, cleanup_node in zip(
74
+ self.nodes, self.setup_graph.nodes, self.cleanup_graph.nodes
75
+ ):
76
+ self._setup_mapping[node] = setup_node
77
+ self._cleanup_mapping[node] = cleanup_node
78
+
79
+ self.num_extra_output = 0
80
+
81
+ def _lookup_node(self, node: fx.Node, graph: fx.Graph) -> Optional[fx.Node]:
82
+ if graph == self.setup_graph:
83
+ return self._setup_mapping.get(node, None)
84
+ elif graph == self.cleanup_graph:
85
+ return self._cleanup_mapping.get(node, None)
86
+ return node
87
+
88
+ def _fx_graph_call(
89
+ self, graph: fx.Graph, func: str, *args: Any, **kwargs: Any
90
+ ) -> Any:
91
+ fx_graph: fx.Graph = graph if graph != self else cast(fx.Graph, super())
92
+ return getattr(fx_graph, func)(*args, **kwargs)
93
+
94
+ def _insert_context(self, func: str, node: fx.Node):
95
+ class _InsertPoint:
96
+ def __init__(self, insert_points: List[Any]):
97
+ self.insert_points = insert_points
98
+
99
+ def __enter__(self):
100
+ pass
101
+
102
+ def __exit__(self, type, value, tb):
103
+ for insert_point in self.insert_points:
104
+ insert_point.__exit__(type, value, tb)
105
+
106
+ insert_points = []
107
+ for graph in self._all_graphs:
108
+ if node:
109
+ actual_node = self._lookup_node(node, graph)
110
+ assert actual_node is not None, "Cannot handle None case now."
111
+ else:
112
+ actual_node = node
113
+ insert_points.append(getattr(graph, func)(actual_node))
114
+
115
+ return _InsertPoint(insert_points)
116
+
117
+ def inserting_after(self, node):
118
+ if self._freeze_cross_iter_movement:
119
+ return super().inserting_after(node)
120
+ return self._insert_context("inserting_after", node)
121
+
122
+ def inserting_before(self, node):
123
+ if self._freeze_cross_iter_movement:
124
+ return super().inserting_before(node)
125
+ return self._insert_context("inserting_before", node)
126
+
127
+ def _forward_subgraph_inputs(
128
+ self, subgraph: List[fx.Node], graph: fx.Graph, erase_node: bool
129
+ ) -> int:
130
+ """Turn the inputs of a subgraph into the extra output of the entire graph.
131
+
132
+ If ``erase_node`` is True, the subgraph will be erased from the graph -- essentially forward the inputs
133
+ of the subgraph to the output of the graph.
134
+ """
135
+ output = get_output(graph)
136
+ inputs = []
137
+ all_nodes: Set[fx.Node] = set(subgraph)
138
+
139
+ for node in subgraph:
140
+ node_inputs = pytree.arg_tree_leaves(*node.args, **node.kwargs)
141
+ for _input in node_inputs:
142
+ if not isinstance(_input, fx.Node):
143
+ continue
144
+ if _input in all_nodes:
145
+ continue
146
+ inputs.append(_input)
147
+
148
+ if erase_node:
149
+ # We have to remove the node in the reversed order to ensure the
150
+ # node has zero users.
151
+ erased = set()
152
+ for node in reversed(subgraph):
153
+ if len(node.users) == 1:
154
+ key = next(iter(node.users.keys()))
155
+ if key == output:
156
+ flatten_args, spec = tree_flatten((output.args, output.kwargs))
157
+ if node not in flatten_args:
158
+ # This optimizer node from the legacy _SPMD tracing.
159
+ node.users.clear()
160
+ elif str(node.target).startswith("aten.copy_"):
161
+ # This is the case where the optimizer is
162
+ # functionalized with copy_.
163
+ for i in range(len(flatten_args)):
164
+ if flatten_args[i] == node:
165
+ flatten_args[i] = node.args[0]
166
+ else:
167
+ # We have not figured out semantics of forwarding
168
+ # all diff ops.
169
+ raise RuntimeError(
170
+ f"IterGraph does not how to forward the output of {node}"
171
+ )
172
+ output.args, output.kwargs = tree_unflatten(flatten_args, spec)
173
+
174
+ # This is the step case where there is a virtual data dependency
175
+ # (in-place update) between step and optimizer. And
176
+ # functionalize_optim add this dependency
177
+ for user in list(node.users.keys()):
178
+ if user in erased:
179
+ node.users.pop(user)
180
+ if node.users:
181
+ raise RuntimeError(
182
+ "IterGraph has not supported moving the nodes that "
183
+ "produce users output result. "
184
+ f"Error node: {node}."
185
+ )
186
+ self._fx_graph_call(graph, "erase_node", node)
187
+ erased.add(node)
188
+
189
+ # Add all the extra output nodes into a list and append the list to
190
+ # the original output.args[0].
191
+ if self.num_extra_output:
192
+ # If the extra-output list already exist, just use it.
193
+ cast(List[fx.Node], output.args[0][-1]).extend(inputs) # type: ignore[index]
194
+ new_output = output.args[0]
195
+ else:
196
+ # When adding the extra-output list, out_spec of _PyTreeCodeGen
197
+ # must be updated accordingly.
198
+ if isinstance(graph._codegen, _PyTreeCodeGen):
199
+ codegen = graph._codegen
200
+ new_output = list(output.args[0]) # type: ignore[arg-type]
201
+ new_output.append(inputs)
202
+ assert codegen.pytree_info.out_spec is not None
203
+ original_tree_out = tree_unflatten(
204
+ cast(List[Any], output.args[0]), codegen.pytree_info.out_spec
205
+ )
206
+ # Use None as a placeholder. If we use the extra-output list
207
+ # the list will be flatten as well and put into out_spec.
208
+ _, out_spec = tree_flatten((original_tree_out, None))
209
+ codegen.pytree_info = codegen.pytree_info._replace(out_spec=out_spec)
210
+ else:
211
+ new_output = (output.args[0], inputs)
212
+ self._fx_graph_call(graph, "erase_node", output)
213
+ self._fx_graph_call(graph, "output", new_output)
214
+
215
+ logger.info("Extended outputs from the subgraph inputs: %s", str(inputs))
216
+ return len(inputs)
217
+
218
+ def _forward_inputs_to_subgraph(
219
+ self, subgraph: List[fx.Node], graph: fx.Graph, extra_input: int
220
+ ) -> None:
221
+ """Create extra input nodes and forward the input nodes to the ``subgraph``.
222
+
223
+ The external input nodes of ``subgraph`` (nodes that are not in ``subgraph``) will replaced by the newly
224
+ created input nodes.
225
+ """
226
+ placeholders = [node for node in graph.nodes if str(node.op) == "placeholder"]
227
+ assert placeholders, "No placeholders are found"
228
+ # Append the extra input nodes to the current input nodes.
229
+ with self._fx_graph_call(graph, "inserting_after", placeholders[-1]):
230
+ new_input_nodes = list(
231
+ reversed(
232
+ [
233
+ self._fx_graph_call(
234
+ graph,
235
+ "placeholder",
236
+ f"cross_iter_input_{self._cross_iter_block_count}_{i}",
237
+ )
238
+ for i in reversed(range(extra_input))
239
+ ]
240
+ )
241
+ )
242
+
243
+ # Update the inputs of subgraph to use the newly created input nodes.
244
+ all_nodes = set(subgraph)
245
+ new_input_index = 0
246
+ for node in subgraph:
247
+ node_inputs, spec = tree_flatten((node.args, node.kwargs))
248
+ new_node_inputs = []
249
+ for input_node in node_inputs:
250
+ if not isinstance(input_node, fx.Node) or input_node in all_nodes:
251
+ new_node_inputs.append(input_node)
252
+ else:
253
+ new_node_inputs.append(new_input_nodes[new_input_index])
254
+ new_input_index += 1
255
+ node.args, node.kwargs = tree_unflatten(new_node_inputs, spec)
256
+ assert new_input_index == len(
257
+ new_input_nodes
258
+ ), f"More inputs than needed {len(new_input_nodes)} > {new_input_index}"
259
+
260
+ # Update the in_spec of _PyTreeCodeGen if in_spec is not None (the new
261
+ # SPMD makes in_spec as None).
262
+ if (
263
+ isinstance(graph._codegen, _PyTreeCodeGen)
264
+ and graph._codegen.pytree_info.in_spec is not None
265
+ ):
266
+ codegen = graph._codegen
267
+ original_tree_in = tree_unflatten(placeholders, codegen.pytree_info.in_spec)
268
+ _, in_spec = tree_flatten(tuple(list(original_tree_in) + new_input_nodes))
269
+ codegen.pytree_info = codegen.pytree_info._replace(in_spec=in_spec)
270
+ for new_input in new_input_nodes:
271
+ codegen.pytree_info.orig_args.append(new_input.name)
272
+ codegen.pytree_info = codegen.pytree_info._replace(in_spec=in_spec)
273
+
274
+ def move_to_next_iter_before(
275
+ self, subgraph: List[fx.Node], target_node: fx.Node
276
+ ) -> None:
277
+ """Move the ``subgraph`` to the next iteration before ``target_node``.
278
+
279
+ The ``subgraph`` is a list of fx.Node and must satisfy the following
280
+ restrictions:
281
+ 1. The order of the nodes in ``subgraph`` must obey the topological
282
+ sort order.
283
+ 2. The users of the node in ``subgraph`` must be one of the following:
284
+ a.) the user is also a node in ``subgraph``.
285
+ b.) the user is the output of the full graph.
286
+ c.) the node has users (side effect node).
287
+ """
288
+ if self._freeze_cross_iter_movement:
289
+ raise RuntimeError(
290
+ "The cross-iteration movement has been frozen for the given "
291
+ "IterGraph."
292
+ )
293
+
294
+ if not is_leaf_subgraph(self, subgraph):
295
+ raise ValueError(
296
+ "The target nodes for ``move_to_next_iter_before`` must "
297
+ "satisfy one of the following conditions: 1) the user of the "
298
+ "node is in the target nodes, 2) the user is the output of the "
299
+ "graph, 3) there are no users -- the node is a side-effect node. "
300
+ )
301
+
302
+ self._cross_iter_block_count += 1
303
+ # The main graph must be the last one to be modified. Otherwise, the
304
+ # mapping may change and hence introduce incorrect mapping for setup
305
+ # and cleanup graphs.
306
+
307
+ # For the setup graph, no additional input is needed but additional
308
+ # outputs will be created. The additional output represents the input of
309
+ # the action to be moved to the next iteration -- main graph.
310
+ setup_subgraph: List[fx.Node] = []
311
+ for node in subgraph:
312
+ mapped_node = self._lookup_node(node, self.setup_graph)
313
+ assert mapped_node is not None
314
+ setup_subgraph.append(mapped_node)
315
+ setup_extra_input = self._forward_subgraph_inputs(
316
+ subgraph=setup_subgraph,
317
+ graph=self.setup_graph,
318
+ erase_node=True,
319
+ )
320
+
321
+ # For the cleanup graph, additional input is required to get the output
322
+ # from the last iteration -- main graph. Additional nodes are also
323
+ # needed to perform the action moved from the last iteration.
324
+ target_cleanup_node = self._lookup_node(target_node, self.cleanup_graph)
325
+ assert target_cleanup_node is not None, "The target_cleanup_node is None."
326
+ cleanup_subgraph: List[fx.Node] = []
327
+ for node in subgraph:
328
+ mapped_node = self._lookup_node(node, self.cleanup_graph)
329
+ assert mapped_node is not None
330
+ cleanup_subgraph.append(mapped_node)
331
+ cloned_subgraph = clone_subgraph(
332
+ self.cleanup_graph,
333
+ cleanup_subgraph,
334
+ target=target_cleanup_node,
335
+ )
336
+ self._forward_inputs_to_subgraph(
337
+ cloned_subgraph, self.cleanup_graph, setup_extra_input
338
+ )
339
+
340
+ # For the main graph, additional input will be created to represent
341
+ # the output from the last iteration -- main graph or setup graph.
342
+ # Additional output will also be generated to represent the input for
343
+ # the next iteration -- the main graph or the cleanup graph.
344
+ main_extra_input = self._forward_subgraph_inputs(
345
+ subgraph=subgraph, graph=self, erase_node=False
346
+ )
347
+ assert main_extra_input == setup_extra_input
348
+ for node in subgraph:
349
+ target_node.prepend(node)
350
+ self._forward_inputs_to_subgraph(subgraph, self, main_extra_input)
351
+
352
+ # TODO: This is a temporary solution. We are going to remove DCE usage
353
+ # or have something to replace fx DCE.
354
+ for node in self.cleanup_graph.nodes:
355
+ if len(node.users) == 0:
356
+ node.users["__hold__"] = None # type: ignore[index]
357
+ for node in self.nodes:
358
+ if len(node.users) == 0:
359
+ node.users["__hold__"] = None # type: ignore[index]
360
+ self.num_extra_output += main_extra_input
361
+
362
+ def move_before(self, nodes: List[fx.Node], target_node: fx.Node) -> None:
363
+ for graph in self._all_graphs:
364
+ actual_nodes = [self._lookup_node(node, graph) for node in nodes]
365
+ actual_target_node = self._lookup_node(target_node, graph)
366
+ assert actual_target_node is not None
367
+ for actual_node in actual_nodes:
368
+ actual_target_node.prepend(actual_node)
369
+
370
+ def move_after(self, nodes: List[fx.Node], target_node: fx.Node) -> None:
371
+ for graph in self._all_graphs:
372
+ actual_nodes = [self._lookup_node(node, graph) for node in nodes]
373
+ actual_target_node = self._lookup_node(target_node, graph)
374
+ for actual_node in actual_nodes:
375
+ assert actual_target_node is not None
376
+ actual_target_node.append(actual_node)
377
+ actual_target_node = actual_node
378
+
379
+ def call_function(
380
+ self,
381
+ the_function: Callable[..., Any],
382
+ args: Optional[Tuple[Argument, ...]] = None,
383
+ kwargs: Optional[Dict[str, Argument]] = None,
384
+ type_expr: Optional[Any] = None,
385
+ ) -> fx.Node:
386
+ if self._freeze_cross_iter_movement:
387
+ return super().call_function(the_function, args, kwargs, type_expr)
388
+
389
+ setup_args = tree_map(
390
+ lambda arg: self._lookup_node(arg, self.setup_graph)
391
+ if isinstance(arg, fx.Node)
392
+ else arg,
393
+ args,
394
+ )
395
+ setup_kwargs = tree_map(
396
+ lambda arg: self._lookup_node(arg, self.setup_graph)
397
+ if isinstance(arg, fx.Node)
398
+ else arg,
399
+ kwargs,
400
+ )
401
+ cleanup_args = tree_map(
402
+ lambda arg: self._lookup_node(arg, self.cleanup_graph)
403
+ if isinstance(arg, fx.Node)
404
+ else arg,
405
+ args,
406
+ )
407
+ cleanup_kwargs = tree_map(
408
+ lambda arg: self._lookup_node(arg, self.cleanup_graph)
409
+ if isinstance(arg, fx.Node)
410
+ else arg,
411
+ kwargs,
412
+ )
413
+
414
+ setup_node = self.setup_graph.call_function(
415
+ the_function, setup_args, setup_kwargs, type_expr
416
+ )
417
+ main_node = super().call_function(the_function, args, kwargs, type_expr)
418
+ cleanup_node = self.cleanup_graph.call_function(
419
+ the_function, cleanup_args, cleanup_kwargs, type_expr
420
+ )
421
+ self._setup_mapping[main_node] = setup_node
422
+ self._cleanup_mapping[main_node] = cleanup_node
423
+ return main_node
424
+
425
+ def erase_node(self, to_erase: fx.Node) -> None:
426
+ if self._freeze_cross_iter_movement:
427
+ return super().erase_node(to_erase)
428
+
429
+ setup_node = self._lookup_node(to_erase, self.setup_graph)
430
+ assert setup_node is not None, "setup_node is None"
431
+ self.setup_graph.erase_node(setup_node)
432
+ super().erase_node(to_erase)
433
+ cleanup_node = self._lookup_node(to_erase, self.cleanup_graph)
434
+ self.cleanup_graph.erase_node(cleanup_node)
435
+
436
+ def placeholder(
437
+ self,
438
+ name: str,
439
+ type_expr: Optional[Any] = None,
440
+ default_value: Any = inspect.Signature.empty,
441
+ ) -> fx.Node:
442
+ if self._freeze_cross_iter_movement:
443
+ return super().placeholder(name, type_expr, default_value)
444
+
445
+ main_placeholder = super().placeholder(name, type_expr, default_value)
446
+ setup_placeholder = self.setup_graph.placeholder(name, type_expr, default_value)
447
+ cleanup_placeholder = self.cleanup_graph.placeholder(
448
+ name, type_expr, default_value
449
+ )
450
+ self._setup_mapping[main_placeholder] = setup_placeholder
451
+ self._cleanup_mapping[main_placeholder] = cleanup_placeholder
452
+ return main_placeholder
453
+
454
+ def output(self, result: Argument, type_expr: Optional[Any] = None) -> fx.Node:
455
+ if self._freeze_cross_iter_movement:
456
+ return super().output(result, type_expr)
457
+
458
+ main_output = super().output(result, type_expr)
459
+ setup_result = tree_map(
460
+ lambda _result: self._lookup_node(_result, self.setup_graph)
461
+ if isinstance(_result, fx.Node)
462
+ else _result,
463
+ result,
464
+ )
465
+ cleanup_result = tree_map(
466
+ lambda _result: self._lookup_node(_result, self.cleanup_graph)
467
+ if isinstance(_result, fx.Node)
468
+ else _result,
469
+ result,
470
+ )
471
+ self.setup_graph.output(setup_result, type_expr)
472
+ self.cleanup_graph.output(cleanup_result, type_expr)
473
+
474
+ return main_output
475
+
476
+ def lint(self) -> None:
477
+ self.setup_graph.lint()
478
+ super().lint()
479
+ self.cleanup_graph.lint()
480
+
481
+ def node_prepend(self, target_node: fx.Node, node: fx.Node) -> None:
482
+ """Prepend node to target_node."""
483
+ if self._freeze_cross_iter_movement:
484
+ target_node.prepend(node)
485
+ return
486
+
487
+ for graph in self._all_graphs:
488
+ actual_node = self._lookup_node(node, graph)
489
+ assert actual_node is not None, "The node is None"
490
+ actual_target_node = self._lookup_node(target_node, graph)
491
+ assert actual_target_node is not None, "The target node is None"
492
+ actual_target_node.prepend(actual_node)
493
+
494
+ def node_append(self, target_node: fx.Node, node: fx.Node) -> None:
495
+ """Append node to target_node."""
496
+ if self._freeze_cross_iter_movement:
497
+ target_node.append(node)
498
+ return
499
+
500
+ for graph in self._all_graphs:
501
+ actual_node = self._lookup_node(node, graph)
502
+ assert actual_node is not None, f"The actual node is None, {node}."
503
+ actual_target_node = self._lookup_node(target_node, graph)
504
+ assert (
505
+ actual_target_node is not None
506
+ ), f"The actual target node is None, {target_node}."
507
+ actual_target_node.append(actual_node)
508
+
509
+ def node_set_args(self, node: fx.Node, args: Tuple[Argument, ...]) -> None:
510
+ if self._freeze_cross_iter_movement:
511
+ node.args = args
512
+ return
513
+
514
+ setup_args = tree_map_only(
515
+ fx.Node, lambda _arg: self._lookup_node(_arg, self.setup_graph), args
516
+ )
517
+ setup_node = self._lookup_node(node, self.setup_graph)
518
+ assert setup_node is not None
519
+ setup_node.args = setup_args
520
+ cleanup_args = tree_map_only(
521
+ fx.Node, lambda _arg: self._lookup_node(_arg, self.cleanup_graph), args
522
+ )
523
+ cleanup_node = self._lookup_node(node, self.cleanup_graph)
524
+ assert cleanup_node is not None
525
+ cleanup_node.args = cleanup_args
526
+ node.args = args
527
+
528
+ def node_set_kwargs(self, node: fx.Node, kwargs: Dict[str, Argument]) -> None:
529
+ if self._freeze_cross_iter_movement:
530
+ node.kwargs = kwargs
531
+ return
532
+
533
+ setup_kwargs = tree_map_only(
534
+ fx.Node, lambda _arg: self._lookup_node(_arg, self.setup_graph), kwargs
535
+ )
536
+ setup_node = self._lookup_node(node, self.setup_graph)
537
+ assert setup_node is not None
538
+ setup_node.kwargs = setup_kwargs
539
+ cleanup_kwargs = tree_map_only(
540
+ fx.Node, lambda _arg: self._lookup_node(_arg, self.cleanup_graph), kwargs
541
+ )
542
+ cleanup_node = self._lookup_node(node, self.cleanup_graph)
543
+ assert cleanup_node is not None
544
+ cleanup_node.kwargs = cleanup_kwargs
545
+ node.kwargs = kwargs
546
+
547
+ def node_replace_all_uses_with(
548
+ self,
549
+ node: fx.Node,
550
+ replace_with: fx.Node,
551
+ delete_user_cb: Callable[[fx.Node], bool] = lambda user: True,
552
+ *,
553
+ propagate_meta=False,
554
+ ) -> List[fx.Node]:
555
+ for graph in self._all_graphs:
556
+ actual_node = self._lookup_node(node, graph)
557
+ actual_replace_with = self._lookup_node(replace_with, graph)
558
+ assert actual_node is not None
559
+ ret = actual_node.replace_all_uses_with(
560
+ actual_replace_with,
561
+ delete_user_cb,
562
+ propagate_meta=propagate_meta,
563
+ )
564
+ return ret # type: ignore[possibly-undefined]
565
+
566
+ def node_add_user(self, node: fx.Node, user: Any) -> None:
567
+ for graph in self._all_graphs:
568
+ actual_node = self._lookup_node(node, graph)
569
+ if isinstance(user, fx.Node):
570
+ actual_user_node = self._lookup_node(user, graph)
571
+ else:
572
+ actual_user_node = user
573
+ assert actual_node is not None
574
+ actual_node.users[actual_user_node] = None # type: ignore[index]
575
+
576
+ def node_remove_user(self, node: fx.Node, user: Any) -> None:
577
+ for graph in self._all_graphs:
578
+ actual_node = self._lookup_node(node, graph)
579
+ if isinstance(user, fx.Node):
580
+ actual_user_node = self._lookup_node(user, graph)
581
+ else:
582
+ actual_user_node = user
583
+ assert actual_node is not None
584
+ del actual_node.users[actual_user_node] # type: ignore[arg-type]
585
+
586
+ def keep_unused_nodes(self) -> None:
587
+ for node in self.nodes:
588
+ if len(node.users) == 0 and str(node.op) != "output":
589
+ self.node_add_user(node, "__hold__")
590
+
591
+ def functionalize_optim(self) -> None:
592
+ # IterGraph can only support full graph (fwd+bwd+optim). As optimizer
593
+ # is not a functional call (it is inplace op), this method adds the of
594
+ # the optimizer call. This method has strong assumption of the optimizer
595
+ # and may not always be working. This method is intended be a temporary
596
+ # solution only.
597
+
598
+ # TODO: remove this API after DCE is removed
599
+ for node in reversed(self.nodes):
600
+ if node.name.startswith("output"):
601
+ output_node = node
602
+ elif node.name.startswith(
603
+ "_fused_adam_",
604
+ ):
605
+ optim_node = node
606
+ elif node.name.startswith(
607
+ "_foreach_add_",
608
+ ):
609
+ step_node = node
610
+ self.node_add_user(optim_node, output_node) # type: ignore[possibly-undefined]
611
+ self.node_add_user(step_node, optim_node) # type: ignore[possibly-undefined]
612
+
613
+ def defunctionalize_optim(self) -> None:
614
+ # TODO: remove this API after DCE is not used with IterGraph
615
+ for graph in self._all_graphs:
616
+ for node in reversed(graph.nodes):
617
+ if node.name.startswith("output"):
618
+ output_node = node
619
+ elif node.name.startswith(
620
+ "_fused_adam_",
621
+ ):
622
+ optim_node = node
623
+ elif node.name.startswith(
624
+ "_foreach_add_",
625
+ ):
626
+ step_node = node
627
+ optim_node.users.pop(output_node, None) # type: ignore[possibly-undefined]
628
+ step_node.users.pop(optim_node, None) # type: ignore[possibly-undefined]
629
+
630
+ def freeze_cross_iter_movement(self) -> None:
631
+ self._freeze_cross_iter_movement = True
632
+
633
+
634
+ class IterGraphModule(nn.Module):
635
+ """``IterGraphModule`` provides the ability to do cross-iteration optimization.
636
+
637
+ Given a ``fx.GraphModule``, main_gm, ``IterGraphModule`` internally
638
+ duplicate it to 3 copies and redirect the ``forward`` request to a different
639
+ ``fx.GraphModule`` based on the iteration count. This allows users to do
640
+ graph optimizations that across iterations (e.g., moving collective wait in
641
+ the backward to the forward of the next iteration).
642
+
643
+ Note that users must call the APIs provided by ``IterGraphModule`` or
644
+ ``IterGraph`` to rewrite the graph so that ``IterGraphModule`` can keep the
645
+ data dependency for all 3 graphs.
646
+ """
647
+
648
+ def __init__(
649
+ self,
650
+ main_gm: fx.GraphModule,
651
+ max_iters: int = -1,
652
+ enable_inductor: bool = False,
653
+ ) -> None:
654
+ super().__init__()
655
+
656
+ def _copy_gm(src: fx.GraphModule, graph: fx.Graph) -> fx.GraphModule:
657
+ gm = fx.GraphModule(src, graph)
658
+ gm.meta = getattr(graph, "meta", {})
659
+ return gm
660
+
661
+ self.setup_gm = _copy_gm(main_gm, copy.deepcopy(main_gm.graph))
662
+ self.cleanup_gm = _copy_gm(main_gm, copy.deepcopy(main_gm.graph))
663
+ self.main_gm = _copy_gm(
664
+ main_gm,
665
+ IterGraph(main_gm.graph, self.setup_gm.graph, self.cleanup_gm.graph),
666
+ )
667
+
668
+ self._iter = 0
669
+ self._max_iters = max_iters
670
+ self._previous_output: Tuple[Any, ...] = tuple()
671
+ self._num_extra_output = 0
672
+ self._is_frozen = False
673
+ self._enable_inductor = enable_inductor
674
+
675
+ def finalize_setup(self) -> None:
676
+ """Set up the internal states and also get the signal from users that what is the maximum iteration count.
677
+
678
+ This method must be called before the forward() is called.
679
+ """
680
+ if not self._is_frozen:
681
+ self.graph.freeze_cross_iter_movement()
682
+ self._num_extra_output = self.graph.num_extra_output
683
+ if self._enable_inductor:
684
+ self.main_gm = partial_lower(self.main_gm)
685
+ self._is_frozen = True
686
+
687
+ self._iter = 0
688
+
689
+ def _run(self, gm: fx.GraphModule, last_iter: bool, *args, **kwargs) -> Any:
690
+ if self._num_extra_output > 0:
691
+ new_args = args + (self._previous_output)
692
+ output = gm(*new_args, **kwargs)
693
+ if not last_iter:
694
+ assert len(output) == 2
695
+ self._previous_output = tuple(output[-1])
696
+ assert (
697
+ len(self._previous_output) > 0
698
+ ), "There should be at least one extra output."
699
+ output = output[0]
700
+ else:
701
+ # No cross-iteration optimization is done. Simply call the
702
+ # GraphModule.
703
+ output = gm(*args, **kwargs)
704
+ return output
705
+
706
+ def forward(self, *args: Any, last_iter: bool = False, **kwargs: Any) -> Any:
707
+ self._iter += 1
708
+ last_iter = last_iter or self._iter == self._max_iters
709
+ if last_iter:
710
+ logger.info("Using the cleanup graph")
711
+ gm = self.cleanup_gm
712
+ profiler_string = "## IterGraphModule: Cleanup Graph ##"
713
+ self._iter = 0
714
+ elif self._iter == 1:
715
+ logger.info("Using the setup graph")
716
+ gm = self.setup_gm
717
+ profiler_string = "## IterGraphModule: Setup Graph ##"
718
+ else:
719
+ gm = self.main_gm
720
+ if self._iter == 2:
721
+ logger.info("Using the main graph")
722
+ profiler_string = "## IterGraphModule -- Maybe Compiling ##"
723
+ else:
724
+ profiler_string = "## IterGraphModule ##"
725
+
726
+ with record_function(profiler_string):
727
+ return self._run(gm, last_iter, *args, **kwargs)
728
+
729
+ @property
730
+ def graph(self) -> IterGraph:
731
+ return cast(IterGraph, self.main_gm.graph)
732
+
733
+ def recompile(self) -> PythonCode:
734
+ self.setup_gm.recompile()
735
+ self.cleanup_gm.recompile()
736
+ return self.main_gm.recompile()
737
+
738
+ def freeze_cross_iter_movement(self) -> None:
739
+ # TODO: remove this API once it is not used.
740
+ self.graph.freeze_cross_iter_movement()
741
+ self._num_extra_output = self.graph.num_extra_output
742
+
743
+ def print_readable(self, print_output: bool = True) -> str:
744
+ return self.main_gm.print_readable(print_output)
745
+
746
+ def print_all_graphs(self) -> None:
747
+ logger.info("Printing the three fx.Graph:")
748
+ logger.info("1. Setup fx.Graph:")
749
+ logger.info("%s", self.setup_gm.graph)
750
+ logger.info("2. Main fx.Graph:")
751
+ logger.info("%s", self.main_gm.graph)
752
+ logger.info("3. Cleanup fx.Graph:")
753
+ logger.info("%s", self.cleanup_gm.graph)
754
+
755
+ def print_all_graph_modules(self) -> None:
756
+ logger.info("Printing the three fx gm:")
757
+ logger.info("1. Setup fx.GraphModule:")
758
+ logger.info("%s", self.setup_gm.print_readable(False))
759
+ logger.info("2. Main fx.GraphModule:")
760
+ logger.info("%s", self.main_gm.print_readable(False))
761
+ logger.info("3. Cleanup fx.GraphModule:")
762
+ logger.info("%s", self.cleanup_gm.print_readable(False))
venv/lib/python3.10/site-packages/torch/distributed/_spmd/log_utils.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import logging.config
3
+ import os
4
+ from typing import Optional
5
+
6
+ import torch.distributed as dist
7
+
8
+
9
+ LOGGING_CONFIG = {
10
+ "version": 1,
11
+ "formatters": {
12
+ "spmd_format": {"format": "%(name)s: [%(levelname)s] %(message)s"},
13
+ "graph_opt_format": {"format": "%(name)s: [%(levelname)s] %(message)s"},
14
+ },
15
+ "handlers": {
16
+ "spmd_console": {
17
+ "class": "logging.StreamHandler",
18
+ "level": "DEBUG",
19
+ "formatter": "spmd_format",
20
+ "stream": "ext://sys.stdout",
21
+ },
22
+ "graph_opt_console": {
23
+ "class": "logging.StreamHandler",
24
+ "level": "DEBUG",
25
+ "formatter": "graph_opt_format",
26
+ "stream": "ext://sys.stdout",
27
+ },
28
+ "null_console": {
29
+ "class": "logging.NullHandler",
30
+ },
31
+ },
32
+ "loggers": {
33
+ "spmd_exp": {
34
+ "level": "DEBUG",
35
+ "handlers": ["spmd_console"],
36
+ "propagate": False,
37
+ },
38
+ "graph_opt": {
39
+ "level": "DEBUG",
40
+ "handlers": ["graph_opt_console"],
41
+ "propagate": False,
42
+ },
43
+ "null_logger": {
44
+ "handlers": ["null_console"],
45
+ "propagate": False,
46
+ },
47
+ # TODO(anj): Add loggers for MPMD
48
+ },
49
+ "disable_existing_loggers": False,
50
+ }
51
+
52
+
53
+ def get_logger(log_type: str) -> Optional[logging.Logger]:
54
+ from torch.distributed._spmd import config
55
+
56
+ if "PYTEST_CURRENT_TEST" not in os.environ:
57
+ logging.config.dictConfig(LOGGING_CONFIG)
58
+ avail_loggers = list(LOGGING_CONFIG["loggers"].keys()) # type: ignore[attr-defined]
59
+ assert (
60
+ log_type in avail_loggers
61
+ ), f"Unable to find {log_type} in the available list of loggers {avail_loggers}"
62
+
63
+ if not dist.is_initialized():
64
+ return logging.getLogger(log_type)
65
+
66
+ if dist.get_rank() == 0:
67
+ logger = logging.getLogger(log_type)
68
+ logger.setLevel(config.log_level)
69
+ if config.log_file_name is not None:
70
+ log_file = logging.FileHandler(config.log_file_name)
71
+ log_file.setLevel(config.log_level)
72
+ logger.addHandler(log_file)
73
+ else:
74
+ logger = logging.getLogger("null_logger")
75
+
76
+ return logger
77
+
78
+ return logging.getLogger("null_logger")
venv/lib/python3.10/site-packages/torch/distributed/_spmd/parallel_mode.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.utils._pytree as pytree
7
+ from torch._subclasses import FakeTensorMode
8
+ from torch.distributed._spmd.data_parallel import (
9
+ DataParallelStyle,
10
+ partition_data_parallel,
11
+ )
12
+ from torch.distributed._spmd.distribute import _convert_to_distributed, Schema
13
+ from torch.distributed._tensor import DeviceMesh, Placement, Replicate, Shard
14
+
15
+ from torch.fx import GraphModule
16
+
17
+
18
+ class ParallelMode(ABC):
19
+ """
20
+ Basic Parallel Mode interface. Each parallelism pattern should implement
21
+ this interface to describe how to partition and compile the graph in the
22
+ spmd compiler.
23
+ """
24
+
25
+ @abstractmethod
26
+ def partition(
27
+ self,
28
+ gm: GraphModule,
29
+ model: torch.nn.Module,
30
+ optimizer: Optional[torch.optim.Optimizer],
31
+ params_and_buffers: Dict[str, Any],
32
+ named_states: Dict[str, Any],
33
+ args: Tuple[Any, ...],
34
+ kwargs: Dict[str, Any],
35
+ ) -> GraphModule:
36
+ """
37
+ Partition a single device graph to a distributed graph.
38
+
39
+ TODO(@wanchaol): some of these arguments are not necessary for
40
+ partitioning, remove the unnecessary ones later.
41
+ """
42
+ raise NotImplementedError()
43
+
44
+ @abstractmethod
45
+ def transform_and_compile(self, gm: GraphModule) -> GraphModule:
46
+ """
47
+ Transform and compile a distributed graph with a set of graph
48
+ transformation and optimization passes for each parallel mode.
49
+
50
+ The returned result should be a compiled executable graph in
51
+ the distributed environment.
52
+ """
53
+ # TODO: add more necessary arguments to this interface.
54
+ raise NotImplementedError()
55
+
56
+
57
+ class DataParallel(ParallelMode):
58
+ """Data Parallelism mode."""
59
+
60
+ def __init__(
61
+ self,
62
+ parallel_style: str = "replicate",
63
+ *,
64
+ input_batch_dim: int = 0,
65
+ custom_passes: Optional[Callable[[GraphModule], GraphModule]] = None,
66
+ ):
67
+ """
68
+ DataParallel Mode that partition the model and graph to data parallel style
69
+ parallelism (i.e. DDP/FSDP/ZERO-3). It currently supports three different
70
+ parallel styles: "replicate", "fully_shard", and "default". See
71
+ :class:`DataParallelStyle` for more details.
72
+
73
+ Args:
74
+ parallel_style (str): parallel style to use. Currently supports
75
+ "replicate", "fully_shard", and "default".
76
+
77
+ Keyword args:
78
+ input_batch_dim (int): the batch dimension of the input tensor.
79
+ default: 0
80
+ custom_passes (Callable[[GraphModule], GraphModule], optional):
81
+ A custom callable that overrides the default graph transformation
82
+ and optimization passes.
83
+ """
84
+ if parallel_style == "replicate":
85
+ self.parallel_style = DataParallelStyle.REPLICATE
86
+ elif parallel_style == "fully_shard":
87
+ self.parallel_style = DataParallelStyle.FULLY_SHARD
88
+ elif parallel_style == "default":
89
+ self.parallel_style = DataParallelStyle.DEFAULT
90
+ else:
91
+ raise RuntimeError(f"Unknown parallel style: {parallel_style}")
92
+
93
+ # TODO: what if user passes in a incorrect `input_batch_dim`, how should we
94
+ # detect that and do proper error handling?
95
+ self.input_batch_dim = input_batch_dim
96
+
97
+ if custom_passes is not None:
98
+ self._gm_passes: Callable[[GraphModule], GraphModule] = custom_passes
99
+ else:
100
+ # TODO: add a few default passes here.
101
+ self._gm_passes = lambda gm: gm
102
+
103
+ def partition(
104
+ self,
105
+ gm: GraphModule,
106
+ model: torch.nn.Module,
107
+ optimizer: Optional[torch.optim.Optimizer],
108
+ params_and_buffers: Dict[str, Any],
109
+ named_states: Dict[str, Any],
110
+ args: Tuple[Any, ...],
111
+ kwargs: Dict[str, Any],
112
+ ) -> GraphModule:
113
+ # TODO: figure out a way to avoid explicit "cuda" mesh.
114
+ mesh = DeviceMesh("cuda", torch.arange(dist.get_world_size()))
115
+
116
+ gm = partition_data_parallel(
117
+ gm,
118
+ model,
119
+ optimizer,
120
+ params_and_buffers,
121
+ named_states,
122
+ args,
123
+ kwargs,
124
+ mesh,
125
+ self.parallel_style,
126
+ self.input_batch_dim,
127
+ )
128
+ return gm
129
+
130
+ def transform_and_compile(self, gm: GraphModule) -> GraphModule:
131
+ """optimize a distributed graph with a set of optimization passes"""
132
+ # TODO: add more necessary arguments to this interface.
133
+ return self._gm_passes(gm)
134
+
135
+
136
+ class DTensorExpandMode(ParallelMode):
137
+ """
138
+ The DTensor Expand mode. It's replicating the parameters and
139
+ shard the inputs to represent DDP like behavior, it's currently
140
+ a transitent mode before we move to the new data parallel expansion.
141
+ """
142
+
143
+ def __init__(
144
+ self, custom_passes: Optional[Callable[[GraphModule], GraphModule]] = None
145
+ ):
146
+ self._placements_override: Dict[int, List[Placement]] = {}
147
+ if custom_passes is not None:
148
+ self._gm_passes: Callable[[GraphModule], GraphModule] = custom_passes
149
+ else:
150
+ # TODO: add a few default passes here.
151
+ self._gm_passes = lambda gm: gm
152
+
153
+ def partition(
154
+ self,
155
+ gm: GraphModule,
156
+ model: torch.nn.Module,
157
+ optimizer: Optional[torch.optim.Optimizer],
158
+ params_and_buffers: Dict[str, Any],
159
+ named_states: Dict[str, Any],
160
+ args: Tuple[Any, ...],
161
+ kwargs: Dict[str, Any],
162
+ ) -> GraphModule:
163
+ flat_args = pytree.arg_tree_leaves(*args, **kwargs)
164
+
165
+ mesh = DeviceMesh("cuda", torch.arange(dist.get_world_size()).cuda())
166
+ shard_schema: Schema = Schema(mesh=mesh, placements=[Shard(0)])
167
+ # FIXME: allow other sharding schemas
168
+ replicate_schema: Schema = Schema(mesh=mesh, placements=[Replicate()])
169
+
170
+ inps, schemas = [], []
171
+
172
+ for p in pytree.tree_leaves(params_and_buffers):
173
+ assert isinstance(p, torch.Tensor), f"expecting Tensor but got {type(p)}"
174
+ inps.append(p)
175
+ schemas.append(replicate_schema)
176
+
177
+ for o in pytree.tree_leaves(named_states):
178
+ if isinstance(o, torch.Tensor):
179
+ inps.append(o)
180
+ schemas.append(replicate_schema)
181
+ else:
182
+ inps.append(torch.empty(0))
183
+ schemas.append(replicate_schema)
184
+
185
+ for a in flat_args:
186
+ if isinstance(a, torch.Tensor):
187
+ inps.append(a)
188
+ if id(a) in self._placements_override:
189
+ schemas.append(
190
+ Schema(mesh=mesh, placements=self._placements_override[id(a)])
191
+ )
192
+ else:
193
+ schemas.append(shard_schema)
194
+ else:
195
+ # Create dummy tensor and schema for non-tensor inputs for
196
+ # the purpose of dtensor expansion. Non-tensor inputs are
197
+ # guaranteed unused in dispatcher graphs produced by make_fx.
198
+ # However, we still need to respect them so that tensor inputs
199
+ # match wtih their placeholders.
200
+ inps.append(torch.empty(0))
201
+ schemas.append(shard_schema)
202
+
203
+ with FakeTensorMode(allow_non_fake_inputs=True):
204
+ fake_inps = [torch.empty_like(inp) for inp in inps]
205
+
206
+ return _convert_to_distributed(
207
+ gm, fake_inps, schemas, default_mesh=mesh, _allow_partial=False
208
+ )[0]
209
+
210
+ def transform_and_compile(self, gm: GraphModule) -> GraphModule:
211
+ """
212
+ Transform and compile a distributed graph with a set of graph transformation
213
+ and optimization passes for the dtensor fallback parallel mode.
214
+ """
215
+ # TODO: move the trasnformation passed to this function
216
+ return self._gm_passes(gm)
venv/lib/python3.10/site-packages/torch/distributed/_spmd/partial_lower.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is copied from Meta internal repo and is not synced with the
2
+ # internal version. Once the internal version is fully mature, we should
3
+ # upstream again and retire the internal version. @yifuwang
4
+
5
+ import logging
6
+ import operator
7
+ from typing import Callable, List, Optional, Set, Tuple
8
+
9
+ from functorch import make_fx
10
+
11
+ import torch
12
+
13
+ from torch._inductor.compile_fx import compile_fx_inner
14
+ from torch._inductor.decomposition import select_decomp_table
15
+
16
+ MIN_ATEN_OPS_TO_LOWER = 10
17
+
18
+ logger: logging.Logger = logging.getLogger(__name__)
19
+
20
+
21
+ def _create_subgraph_module(
22
+ inputs: List[torch.fx.Node], body: List[torch.fx.Node], outputs: List[torch.fx.Node]
23
+ ) -> torch.fx.GraphModule:
24
+ subgraph: torch.fx.Graph = torch.fx.Graph()
25
+ node_to_subgraph_node = {}
26
+ for idx, inp in enumerate(inputs):
27
+ subgraph_inp = subgraph.placeholder(name=f"arg_{idx}")
28
+ subgraph_inp.meta = inp.meta
29
+ node_to_subgraph_node[inp] = subgraph_inp
30
+
31
+ for node in body:
32
+ subgraph_node = subgraph.node_copy(
33
+ node, arg_transform=lambda x: node_to_subgraph_node[x]
34
+ )
35
+ node_to_subgraph_node[node] = subgraph_node
36
+
37
+ subgraph.output(result=tuple(node_to_subgraph_node[x] for x in outputs))
38
+ subgraph.eliminate_dead_code()
39
+ subgraph.lint()
40
+ return torch.fx.GraphModule(root={}, graph=subgraph)
41
+
42
+
43
+ def _is_container_node(node: torch.fx.Node) -> bool:
44
+ if any(user.target == operator.getitem for user in node.users):
45
+ assert all(user.target == operator.getitem for user in node.users), (
46
+ "Malformed graph: a container node is used as input for non-getitem nodes."
47
+ "\nNode: {fmt_node}\nUsers: {fmt_users}".format(
48
+ fmt_node=node.format_node(),
49
+ fmt_users="\n".join(u.format_node() for u in node.users),
50
+ )
51
+ )
52
+ return True
53
+ return False
54
+
55
+
56
+ def _lower_subgraph_nodes(
57
+ gm: torch.fx.GraphModule,
58
+ subgraph_name: str,
59
+ subgraph_nodes: List[torch.fx.Node],
60
+ dumper: Callable[[str], str],
61
+ ) -> None:
62
+ prologue: List[torch.fx.Node] = []
63
+ inputs: List[torch.fx.Node] = []
64
+ body: List[torch.fx.Node] = []
65
+ visible: Set[torch.fx.Node] = set()
66
+
67
+ # Inductor requires all graph input to be tensors. When adding a container
68
+ # node as subgraph input, add its descendant getitem nodes to the subgraph
69
+ # prologue and add its leaf getitem nodes to the subgraph input.
70
+ def add_input(arg: torch.fx.Node) -> None:
71
+ stack = [arg]
72
+ while len(stack) != 0:
73
+ node = stack.pop()
74
+ if _is_container_node(node):
75
+ # We should only prepone nodes within subgraph_nodes
76
+ prologue.extend(user for user in node.users if user in subgraph_nodes)
77
+ stack.extend(node.users)
78
+ else:
79
+ if node not in visible:
80
+ inputs.append(node)
81
+ visible.add(node)
82
+
83
+ for node in subgraph_nodes:
84
+ if node.op == "get_attr":
85
+ # Prepone get_attr to avoid having to copy
86
+ # the attribute to the subgraph module.
87
+ inputs.append(node)
88
+ visible.add(node)
89
+ continue
90
+
91
+ for arg in node.all_input_nodes:
92
+ if arg not in visible:
93
+ add_input(arg)
94
+
95
+ if node not in prologue:
96
+ body.append(node)
97
+ visible.add(node)
98
+
99
+ outputs: List[torch.fx.Node] = []
100
+
101
+ # Inductor requires all graph output to be tensors. When adding a container
102
+ # node as subgraph output, add its descendant getitem nodes to the subgraph
103
+ # body and add its leaf getitem nodes to the subgraph output.
104
+ def add_output(output: torch.fx.Node) -> None:
105
+ stack = [output]
106
+ while len(stack) != 0:
107
+ node = stack.pop()
108
+ if _is_container_node(node):
109
+ body.extend(node.users)
110
+ stack.extend(node.users)
111
+ elif not all(user in visible for user in node.users):
112
+ if node not in outputs:
113
+ outputs.append(node)
114
+
115
+ for node in body:
116
+ if not all(user in visible for user in node.users):
117
+ add_output(node)
118
+
119
+ assert len(inputs) == len(set(inputs))
120
+ assert len(outputs) == len(set(outputs))
121
+
122
+ subgraph_module = _create_subgraph_module(inputs, body, outputs)
123
+ readable_tag = dumper(str(subgraph_module.graph))
124
+ setattr(gm, subgraph_name, _InductorModule(subgraph_module))
125
+
126
+ insertion_point = subgraph_nodes[-1].next
127
+ for node in prologue:
128
+ insertion_point.prepend(node)
129
+
130
+ with gm.graph.inserting_before(insertion_point):
131
+ # Insert subgraph call
132
+ subgraph_call = gm.graph.create_node(
133
+ op="call_module",
134
+ target=subgraph_name,
135
+ args=tuple(inputs),
136
+ kwargs={"tag": readable_tag},
137
+ )
138
+ # Replace parent graph nodes with their corresponding subgraph outputs
139
+ for idx, output in enumerate(outputs):
140
+ new_output = gm.graph.create_node(
141
+ op="call_function",
142
+ target=operator.getitem,
143
+ args=(subgraph_call, idx),
144
+ )
145
+ new_output.meta = output.meta
146
+ output.replace_all_uses_with(new_output)
147
+
148
+ # Erase lowered nodes from the parent graph
149
+ for node in reversed(body + outputs):
150
+ if len(node.users) == 0:
151
+ gm.graph.erase_node(node)
152
+
153
+
154
+ class _InductorModule(torch.nn.Module):
155
+ def __init__(self, gm: torch.fx.GraphModule) -> None:
156
+ super().__init__()
157
+ self.gm = gm
158
+ self.compiled: Optional[
159
+ Callable[[List[torch.Tensor]], List[torch.Tensor]]
160
+ ] = None
161
+
162
+ def forward(self, *args: torch.Tensor, tag: str) -> List[torch.Tensor]:
163
+ if self.compiled is None:
164
+ inductor_decompositions = select_decomp_table()
165
+ # TODO: figure out why turning on cudagraphs cause exceptions.
166
+ decomp_gm = make_fx(self.gm, decomposition_table=inductor_decompositions)(
167
+ *args
168
+ )
169
+ logger.info("Lowering subgraph (%s) to Inductor...", tag)
170
+ self.compiled = compile_fx_inner(
171
+ decomp_gm,
172
+ list(args),
173
+ cudagraphs=False,
174
+ )
175
+ logger.info("Completed lowering subgraph (%s) to Inductor", tag)
176
+ with torch.profiler.record_function(tag):
177
+ assert self.compiled is not None
178
+ return self.compiled(list(args))
179
+
180
+
181
+ def _is_inductor_compatible(node: torch.fx.Node) -> Tuple[bool, str]:
182
+ # `has_tag` is not supported yet
183
+ # if has_tag(node, "non_lowerable"):
184
+
185
+ if node.target in (
186
+ torch.ops.aten._fused_adam_.default,
187
+ torch.ops.aten._fused_adam.default,
188
+ torch.ops.aten._foreach_add_.Scalar,
189
+ torch.ops.aten._foreach_add.Scalar,
190
+ ):
191
+ return False, "fused adam is not supported yet"
192
+
193
+ # TODO(yifu): apparently having a meta kernel is not a necessary
194
+ # condition for Inductor compatiblity. We should refine the check.
195
+ # Sneaking this one in for now to support comm_fusion_with_cat.
196
+ if node.target == torch.ops.aten.flatten.using_ints:
197
+ return True, ""
198
+
199
+ if isinstance(node.target, torch._ops.OpOverload):
200
+ if not node.target.has_kernel_for_dispatch_key(torch._C.DispatchKey.Meta):
201
+ return False, f"{node.target} doesn't have a meta kernel registered"
202
+ return True, ""
203
+
204
+
205
+ def _subgraph_predicate(nodes: List[torch.fx.Node]) -> bool:
206
+ num_aten_ops = len([n for n in nodes if str(n.target).startswith("aten.")])
207
+ return num_aten_ops >= MIN_ATEN_OPS_TO_LOWER
208
+
209
+
210
+ def partial_lower(
211
+ gm: torch.fx.GraphModule,
212
+ node_predicate: Callable[[torch.fx.Node], bool] = lambda x: True,
213
+ subgraph_predicate: Callable[[List[torch.fx.Node]], bool] = lambda x: True,
214
+ dumper: Callable[[str], str] = lambda x: "subgraph",
215
+ ) -> torch.fx.GraphModule:
216
+ """
217
+ Lower Inductor compatible portions of the graph module to Inductor.
218
+
219
+ Args:
220
+ node_predicate: user predicate for determining whether to consider a node for
221
+ lowering.
222
+ subgraph_predicate: user predicate for determining whether to consider a list of
223
+ candidate nodes for lowering.
224
+ dumper: a callback for dumping subgraphs for human digestion. For exmaple, it
225
+ can be a function that writes to disk/blob storage and returns the
226
+ path/handle. The returned path/handle for each subgraph will be made
227
+ available in the subgraph call node in the parent graph, as well as the
228
+ label of the profiler block for the subgraph.
229
+ """
230
+ nodes_per_subgraph: List[List[torch.fx.Node]] = [[]]
231
+ ptr = next(iter(gm.graph.nodes))
232
+
233
+ def _node_predicate(node: torch.fx.Node) -> Tuple[bool, str]:
234
+ should_lower, reason = _is_inductor_compatible(node)
235
+ if not should_lower:
236
+ return should_lower, reason
237
+ if not node_predicate(node):
238
+ return False, "user predicate"
239
+ return True, ""
240
+
241
+ while ptr.op != "output":
242
+ if ptr.op == "placeholder":
243
+ ptr = ptr.next
244
+ continue
245
+ should_lower, reason = _node_predicate(ptr)
246
+ if should_lower:
247
+ nodes_per_subgraph[-1].append(ptr)
248
+ else:
249
+ if len(nodes_per_subgraph[-1]) > 0:
250
+ logger.warning(
251
+ "partial_lower: graph break at %s. Reason: %s", str(ptr), reason
252
+ )
253
+ nodes_per_subgraph.append([])
254
+ ptr = ptr.next
255
+
256
+ nodes_per_subgraph = [
257
+ nodes
258
+ for nodes in nodes_per_subgraph
259
+ if subgraph_predicate(nodes) and _subgraph_predicate(nodes)
260
+ ]
261
+
262
+ for idx, subgraph_nodes in enumerate(nodes_per_subgraph):
263
+ subgraph_name = f"subgraph_{idx}"
264
+ _lower_subgraph_nodes(gm, subgraph_name, subgraph_nodes, dumper)
265
+
266
+ gm.graph.lint()
267
+ gm.recompile()
268
+ return gm