applied-ai-018 commited on
Commit
9fe56d9
·
verified ·
1 Parent(s): 709d16e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/4.input_layernorm.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py +342 -0
  6. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/api.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/tp_conv.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py +313 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/_tensor/_utils.py +204 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/_tensor/api.py +760 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__init__.py +14 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/comm_mode.py +91 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/op_coverage.py +105 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/visualize_sharding.py +176 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/_tensor/device_mesh.py +6 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/_tensor/dispatch.py +393 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py +12 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py +547 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py +427 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py +620 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/_tensor/random.py +372 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py +337 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py +410 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py +277 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py +12 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d741ec339177bb2b66495f4c6fdbde2b4024c35c1473dc5f957082788e4be21
3
+ size 33555627
ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a575846fbdbc06e7dd8134227ac7cecea4ee564c09b352156d82fe22db7909cf
3
+ size 33555533
ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75dc3bb752cb92d344aeed81ae1a24a0bcf4aca44cb8dab7dd8e951ed0a672c1
3
+ size 33555612
ckpts/universal/global_step120/zero/4.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e497ea1e1441ff74a24f84984159fdbe127060190f52ed1efbe1773881c48d35
3
+ size 9293
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import Optional, Sequence
3
+
4
+ # Import all builtin dist tensor ops
5
+ import torch
6
+ import torch.distributed._tensor.ops
7
+ import torch.distributed._tensor.random as random
8
+ from torch.distributed._tensor._utils import compute_local_shape
9
+ from torch.distributed._tensor.api import distribute_module, distribute_tensor, DTensor
10
+ from torch.distributed._tensor.ops.utils import normalize_to_torch_size
11
+ from torch.distributed._tensor.placement_types import Placement, Replicate, Shard
12
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
13
+
14
+ # All public APIs from dtensor package
15
+ __all__ = [
16
+ "DTensor",
17
+ "DeviceMesh",
18
+ "distribute_tensor",
19
+ "distribute_module",
20
+ "init_device_mesh,",
21
+ "Shard",
22
+ "Replicate",
23
+ ]
24
+
25
+
26
+ def _dtensor_init_helper(
27
+ init_op,
28
+ size: torch.Size,
29
+ device_mesh=None,
30
+ placements=None,
31
+ **kwargs,
32
+ ) -> DTensor:
33
+ # if device_mesh is None, use the one from mesh resources
34
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
35
+ kwargs["device"] = device_mesh.device_type
36
+
37
+ # set default placements to replicated if not specified
38
+ placements = placements or tuple(Replicate() for _ in range(device_mesh.ndim))
39
+
40
+ # check device_mesh againts placements
41
+ assert device_mesh.ndim == len(
42
+ placements
43
+ ), "mesh dimension does not match the length of placements"
44
+
45
+ assert kwargs["layout"] == torch.strided, "layout value not supported!"
46
+ torch_stride = torch._prims_common.make_contiguous_strides_for(size)
47
+
48
+ # get local tensor shape
49
+ local_shape = compute_local_shape(size, device_mesh, placements)
50
+ # initialize the local tensor
51
+ if init_op == torch.full:
52
+ fill_value = kwargs.pop("fill_value", 0)
53
+ local_tensor = init_op(local_shape, fill_value, **kwargs)
54
+ elif init_op == torch.rand or init_op == torch.randn:
55
+ # this tensor meta is not used except `shape`
56
+ dtype = kwargs.get("dtype", torch.get_default_dtype())
57
+
58
+ from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta
59
+
60
+ tensor_meta = TensorMeta(size, (0,), dtype)
61
+ spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta)
62
+
63
+ if random.is_rng_supported_mesh(device_mesh) and not random._rng_tracker:
64
+ random._rng_tracker = random.OffsetBasedRNGTracker()
65
+
66
+ assert random._rng_tracker is not None
67
+ with random._rng_tracker._distribute_region(spec):
68
+ local_tensor = init_op(local_shape, **kwargs)
69
+ else:
70
+ local_tensor = init_op(local_shape, **kwargs)
71
+
72
+ return DTensor(
73
+ local_tensor=local_tensor,
74
+ device_mesh=device_mesh,
75
+ placements=tuple(placements),
76
+ shape=size,
77
+ dtype=local_tensor.dtype,
78
+ stride=torch_stride,
79
+ requires_grad=kwargs["requires_grad"],
80
+ )
81
+
82
+
83
+ def ones(
84
+ *size,
85
+ dtype: Optional[torch.dtype] = None,
86
+ layout: torch.layout = torch.strided,
87
+ requires_grad: bool = False,
88
+ device_mesh: Optional[DeviceMesh] = None,
89
+ placements: Optional[Sequence[Placement]] = None,
90
+ ) -> DTensor:
91
+ """
92
+ Returns a :class:`DTensor` filled with the scalar value 1, with the shape defined
93
+ by the variable argument ``size``.
94
+
95
+ Args:
96
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
97
+ Can be a variable number of arguments or a collection like a list or tuple.
98
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
99
+
100
+ Keyword args:
101
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
102
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
103
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
104
+ Default: ``torch.strided``.
105
+ requires_grad (bool, optional): If autograd should record operations on the
106
+ returned :class:`DTensor`. Default: ``False``.
107
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
108
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
109
+
110
+ Returns:
111
+ A :class:`DTensor` object on each rank
112
+ """
113
+ torch_size = normalize_to_torch_size(size)
114
+
115
+ return _dtensor_init_helper(
116
+ torch.ones,
117
+ torch_size,
118
+ dtype=dtype,
119
+ layout=layout,
120
+ requires_grad=requires_grad,
121
+ device_mesh=device_mesh,
122
+ placements=placements,
123
+ )
124
+
125
+
126
+ def empty(
127
+ *size,
128
+ dtype: Optional[torch.dtype] = None,
129
+ layout: torch.layout = torch.strided,
130
+ requires_grad: bool = False,
131
+ device_mesh: Optional[DeviceMesh] = None,
132
+ placements: Optional[Sequence[Placement]] = None,
133
+ ) -> DTensor:
134
+ """
135
+ Returns a :class:`DTensor` filled with uninitialized data. The shape of the :class:`DTensor`
136
+ is defined by the variable argument ``size``.
137
+
138
+ Args:
139
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
140
+ Can be a variable number of arguments or a collection like a list or tuple.
141
+ E.g.: empty(1,2,3..) or empty([1,2,3..]) or empty((1,2,3..))
142
+
143
+ Keyword args:
144
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
145
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).\
146
+ layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`.
147
+ Default: ``torch.strided``.
148
+ requires_grad (bool, optional): If autograd should record operations on the
149
+ returned :class:`DTensor`. Default: ``False``.
150
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
151
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
152
+
153
+ Returns:
154
+ A :class:`DTensor` object on each rank
155
+ """
156
+ torch_size = normalize_to_torch_size(size)
157
+
158
+ return _dtensor_init_helper(
159
+ torch.empty,
160
+ torch_size,
161
+ dtype=dtype,
162
+ layout=layout,
163
+ requires_grad=requires_grad,
164
+ device_mesh=device_mesh,
165
+ placements=placements,
166
+ )
167
+
168
+
169
+ def full(
170
+ size,
171
+ fill_value,
172
+ *,
173
+ dtype: Optional[torch.dtype] = None,
174
+ layout: torch.layout = torch.strided,
175
+ requires_grad: bool = False,
176
+ device_mesh: Optional[DeviceMesh] = None,
177
+ placements: Optional[Sequence[Placement]] = None,
178
+ ) -> DTensor:
179
+ """
180
+ Returns a :class:`DTensor` filled with ``fill_value``. The scalar value type should match
181
+ ``device_mesh.device_type``.
182
+
183
+ Args:
184
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
185
+ Can be a variable number of arguments or a collection like a list or tuple.
186
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
187
+ fill_value(Scalar): the value to fill the output tensor with.
188
+
189
+ Keyword args:
190
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
191
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
192
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
193
+ Default: ``torch.strided``.
194
+ requires_grad (bool, optional): If autograd should record operations on the
195
+ returned :class:`DTensor`. Default: ``False``.
196
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
197
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
198
+
199
+ Returns:
200
+ A :class:`DTensor` object on each rank
201
+ """
202
+ torch_size = normalize_to_torch_size(size)
203
+
204
+ return _dtensor_init_helper(
205
+ torch.full,
206
+ torch_size,
207
+ fill_value=fill_value,
208
+ dtype=dtype,
209
+ layout=layout,
210
+ requires_grad=requires_grad,
211
+ device_mesh=device_mesh,
212
+ placements=placements,
213
+ )
214
+
215
+
216
+ def rand(
217
+ *size,
218
+ requires_grad: bool = False,
219
+ dtype: Optional[torch.dtype] = None,
220
+ layout: torch.layout = torch.strided,
221
+ device_mesh: Optional[DeviceMesh] = None,
222
+ placements: Optional[Sequence[Placement]] = None,
223
+ ) -> DTensor:
224
+ """
225
+ Returns a :class:`DTensor` filled with random numbers from a uniform distribution
226
+ on the interval ``[0, 1)``. The shape of the tensor is defined by the variable
227
+ argument ``size``.
228
+
229
+ Args:
230
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
231
+ Can be a variable number of arguments or a collection like a list or tuple.
232
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
233
+
234
+ Keyword args:
235
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
236
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
237
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
238
+ Default: ``torch.strided``.
239
+ requires_grad (bool, optional): If autograd should record operations on the
240
+ returned :class:`DTensor`. Default: ``False``.
241
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
242
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
243
+
244
+ Returns:
245
+ A :class:`DTensor` object on each rank
246
+ """
247
+ torch_size = normalize_to_torch_size(size)
248
+
249
+ return _dtensor_init_helper(
250
+ torch.rand,
251
+ torch_size,
252
+ dtype=dtype,
253
+ layout=layout,
254
+ requires_grad=requires_grad,
255
+ device_mesh=device_mesh,
256
+ placements=placements,
257
+ )
258
+
259
+
260
+ def randn(
261
+ *size,
262
+ requires_grad: bool = False,
263
+ dtype: Optional[torch.dtype] = None,
264
+ layout: torch.layout = torch.strided,
265
+ device_mesh: Optional[DeviceMesh] = None,
266
+ placements: Optional[Sequence[Placement]] = None,
267
+ ) -> DTensor:
268
+ """
269
+ Returns a :class:`DTensor` filled with random numbers from a normal distribution
270
+ with mean 0 and variance 1. The shape of the tensor is defined by the variable
271
+ argument ``size``.
272
+
273
+ Args:
274
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
275
+ Can be a variable number of arguments or a collection like a list or tuple.
276
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
277
+
278
+ Keyword args:
279
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
280
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
281
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
282
+ Default: ``torch.strided``.
283
+ requires_grad (bool, optional): If autograd should record operations on the
284
+ returned :class:`DTensor`. Default: ``False``.
285
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
286
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
287
+
288
+ Returns:
289
+ A :class:`DTensor` object on each rank
290
+ """
291
+ torch_size = normalize_to_torch_size(size)
292
+
293
+ return _dtensor_init_helper(
294
+ torch.randn,
295
+ torch_size,
296
+ dtype=dtype,
297
+ layout=layout,
298
+ requires_grad=requires_grad,
299
+ device_mesh=device_mesh,
300
+ placements=placements,
301
+ )
302
+
303
+
304
+ def zeros(
305
+ *size,
306
+ requires_grad: bool = False,
307
+ dtype: Optional[torch.dtype] = None,
308
+ layout: torch.layout = torch.strided,
309
+ device_mesh: Optional[DeviceMesh] = None,
310
+ placements: Optional[Sequence[Placement]] = None,
311
+ ) -> DTensor:
312
+ """
313
+ Returns a :class:`DTensor` filled with the scalar value 0.
314
+
315
+ Args:
316
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
317
+ Can be a variable number of arguments or a collection like a list or tuple.
318
+ E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..))
319
+ Keyword args:
320
+ requires_grad (bool, optional): If autograd should record operations on the
321
+ returned :class:`DTensor`. Default: ``False``.
322
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
323
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
324
+ layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`.
325
+ Default: ``torch.strided``.
326
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
327
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
328
+
329
+ Returns:
330
+ A :class:`DTensor` object on each rank
331
+ """
332
+ torch_size = normalize_to_torch_size(size)
333
+
334
+ return _dtensor_init_helper(
335
+ torch.zeros,
336
+ torch_size,
337
+ dtype=dtype,
338
+ layout=layout,
339
+ requires_grad=requires_grad,
340
+ device_mesh=device_mesh,
341
+ placements=placements,
342
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc ADDED
Binary file (6.86 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/api.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc ADDED
Binary file (342 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc ADDED
Binary file (8.94 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc ADDED
Binary file (8.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/tp_conv.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+ from dataclasses import dataclass
4
+ from functools import lru_cache
5
+
6
+ from typing import List, Optional
7
+
8
+ import torch
9
+ import torch.distributed._tensor.placement_types as placement_types
10
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
11
+ from torch.distributed.distributed_c10d import (
12
+ all_to_all,
13
+ broadcast,
14
+ get_global_rank,
15
+ get_rank,
16
+ get_world_size,
17
+ GroupMember,
18
+ ProcessGroup,
19
+ scatter,
20
+ Work,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ # TODO: we need to migrate these APIs to be functional collectives
27
+
28
+
29
+ def mesh_scatter(
30
+ output: torch.Tensor,
31
+ scatter_list: List[torch.Tensor],
32
+ mesh: DeviceMesh,
33
+ mesh_dim: int = 0,
34
+ async_op: bool = False,
35
+ ) -> Optional[Work]:
36
+ """
37
+ scatter a list of tensors to a device mesh dimension. We by default
38
+ use the first rank of the mesh dimension as the source of truth, i.e
39
+ for a 2d mesh [[0, 1], [2, 3]], if we scatter on mesh_dim = 1, we will
40
+ scatter the tensor list on rank 0 to rank 0/1, and tensor list on rank
41
+ 2 to rank 2/3.
42
+
43
+ Args:
44
+ output (torch.Tensor): the tensor to receive the scattered list.
45
+ scatter_list (List[torch.Tensor]): the tensor list to be scattered.
46
+ mesh_dim (int, optional): indicate which mesh dimension we want
47
+ to scatter on, we by default choose the first rank on the
48
+ mesh dimension as source of truth.
49
+
50
+ Returns:
51
+ A :class:`Work` object
52
+ """
53
+ # TODO: Ideally we should use the meta tensor way
54
+ # (to register a meta kernel for the collective op)
55
+ # so that it would avoid the communication. Need to
56
+ # remove the check below once that is done.
57
+ if output.is_meta:
58
+ return None
59
+ dim_group = mesh.get_group(mesh_dim)
60
+ assert isinstance(dim_group, ProcessGroup)
61
+ # src need to be global rank
62
+ src_for_dim = 0
63
+
64
+ if dim_group is not GroupMember.WORLD:
65
+ src_for_dim = get_global_rank(dim_group, 0)
66
+
67
+ if src_for_dim == get_rank():
68
+ fut = scatter(
69
+ output,
70
+ scatter_list=scatter_list,
71
+ src=src_for_dim,
72
+ group=dim_group,
73
+ async_op=async_op,
74
+ )
75
+ else:
76
+ fut = scatter(
77
+ output,
78
+ scatter_list=None,
79
+ src=src_for_dim,
80
+ group=dim_group,
81
+ async_op=async_op,
82
+ )
83
+
84
+ return fut
85
+
86
+
87
+ def mesh_broadcast(
88
+ tensor: torch.Tensor,
89
+ mesh: DeviceMesh,
90
+ mesh_dim: int = 0,
91
+ async_op: bool = False,
92
+ ) -> Optional[Work]:
93
+ """
94
+ broadcast the tensor to a device mesh dimension. We by default
95
+ use the first rank of the mesh dimension as the source of truth, i.e
96
+ for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will
97
+ broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2
98
+ to rank 2/3.
99
+
100
+ Args:
101
+ tensor (torch.Tensor): tensor to broadcast.
102
+ mesh_dim (int, optional): indicate which mesh dimension we want
103
+ to scatter on, we by default choose the first rank on the
104
+ mesh dimension as source of truth.
105
+
106
+ Returns:
107
+ A :class:`Work` object
108
+ """
109
+ # TODO: Ideally we should use the meta tensor way
110
+ # (to register a meta kernel for the collective op)
111
+ # so that it would avoid the communication. Need to
112
+ # remove the check below once that is done.
113
+ if tensor.is_meta:
114
+ return None
115
+ dim_group = mesh.get_group(mesh_dim)
116
+ assert isinstance(dim_group, ProcessGroup)
117
+ # src need to be global rank
118
+ src_for_dim = 0
119
+ if dim_group is not GroupMember.WORLD:
120
+ src_for_dim = get_global_rank(dim_group, 0)
121
+
122
+ return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op)
123
+
124
+
125
+ # TODO: test uneven split on GLOO and NCCL
126
+ def mesh_all_to_all(
127
+ output_tensor_list: List[torch.Tensor],
128
+ input_tensor_list: List[torch.Tensor],
129
+ mesh: DeviceMesh,
130
+ mesh_dim: int = 0,
131
+ async_op: bool = False,
132
+ ) -> Optional[Work]:
133
+ dim_group = mesh.get_group(mesh_dim)
134
+ assert isinstance(dim_group, ProcessGroup)
135
+
136
+ work = None
137
+ # no direct dist.all_to_all support on 'gloo' so we manually do scatters
138
+ if mesh.device_type == "cpu":
139
+ logger.warning(
140
+ "ProcessGroupGloo does not support all_to_all, falling back with scatters!"
141
+ )
142
+ # TODO: pull the handle of uneven case in #492
143
+ dim_group_size = get_world_size(dim_group)
144
+ for i in range(dim_group_size):
145
+ # src need to be global rank
146
+ src_for_dim = i
147
+ if dim_group is not GroupMember.WORLD:
148
+ src_for_dim = get_global_rank(dim_group, i)
149
+
150
+ work = scatter(
151
+ output_tensor_list[i],
152
+ input_tensor_list if mesh.get_rank() == src_for_dim else [],
153
+ group=dim_group,
154
+ src=src_for_dim,
155
+ async_op=async_op,
156
+ )
157
+ else:
158
+ work = all_to_all(
159
+ output_tensor_list,
160
+ input_tensor_list,
161
+ dim_group,
162
+ async_op=async_op,
163
+ )
164
+ return work
165
+
166
+
167
+ def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int:
168
+ assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
169
+ return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape)
170
+
171
+
172
+ @dataclass
173
+ class MeshTopoInfo:
174
+ """
175
+ Mesh information for collective cost estimation
176
+ """
177
+
178
+ mesh: DeviceMesh
179
+ mesh_dim_devices: List[int]
180
+ mesh_dim_bandwidth: List[float]
181
+ mesh_dim_latency: List[float]
182
+
183
+ @staticmethod
184
+ @lru_cache(None)
185
+ def build_from_mesh(mesh: DeviceMesh) -> "MeshTopoInfo":
186
+ # Generate mesh topology info for intra-host/inter-host communication pattern
187
+ # Note that we made bunch of assumptions for simplicity:
188
+ # 1. we assume the mesh is homogeneous, and it's gpu/nccl model
189
+ # 2. we assume gpu arch is Ampere or Hopper
190
+ # 3. we assume collectives are all ring base algo for now
191
+ num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type)
192
+ # the base bw number (intra-node), GB/s
193
+ base_bw = 87.7
194
+ mesh_dim_bandwidth = [base_bw] * mesh.ndim
195
+ # the latency in terms of us (intra-node, nv-link)
196
+ mesh_dim_latency = [0.6] * mesh.ndim
197
+ mesh_dim_devices = [1] * mesh.ndim
198
+
199
+ total_num_devices = 1
200
+ for mesh_dim in reversed(range(mesh.ndim)):
201
+ num_devices = mesh.size(mesh_dim)
202
+ mesh_dim_devices[mesh_dim] = num_devices
203
+ total_num_devices *= num_devices
204
+ if total_num_devices > num_devices_per_host:
205
+ # magic number for inter-host communication bandwidth/latency factor
206
+ # This number assumes latest GPU arch, i.e. Ampere or Hopper
207
+ # TODO: see if we need to tweak this or offer a way for user
208
+ # to specify the bandwidths/latency
209
+ mesh_dim_bandwidth[mesh_dim] *= 0.22
210
+ # set to ethernet latency for inter-host
211
+ mesh_dim_latency[mesh_dim] = 2.7
212
+
213
+ return MeshTopoInfo(
214
+ mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency
215
+ )
216
+
217
+
218
+ def allgather_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
219
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
220
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
221
+ num_hops = num_devices_on_mesh_dim - 1
222
+ # base latency + comm latency
223
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] # us
224
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth # s
225
+ return latency + bw * 1e6 # rescale to us
226
+
227
+
228
+ def allreduce_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
229
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
230
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
231
+ # allreduce have almost 2x comm bytes compare to allgather/reduce_scatter
232
+ num_hops = 2 * num_devices_on_mesh_dim - 1
233
+
234
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
235
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
236
+ return latency + bw * 1e6
237
+
238
+
239
+ def reduce_scatter_cost(
240
+ bytes_gb: float,
241
+ mesh_topo: MeshTopoInfo,
242
+ mesh_dim: int,
243
+ ) -> float:
244
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
245
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
246
+ num_hops = num_devices_on_mesh_dim - 1
247
+ # base latency + comm latency
248
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
249
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
250
+ return latency + bw * 1e6
251
+
252
+
253
+ def redistribute_cost(
254
+ current_spec: "placement_types.DTensorSpec",
255
+ target_spec: "placement_types.DTensorSpec",
256
+ ) -> float:
257
+ """
258
+ This function returns the cost of redistribute from current to target DTensorSpec.
259
+
260
+ NOTE:
261
+ 1. Only consider communication cost here, since computation costs for redistribute
262
+ are quite trival (i.e. we only need to narrow or simple division)
263
+ 2. Only consider redistribute cost on same mesh, cross mesh communication cost is
264
+ not quite needed for operator strategy estimation/selection.
265
+ """
266
+ if current_spec.mesh != target_spec.mesh:
267
+ # make infinite cost if meshes are not same
268
+ # TODO: see if we want to support this once there's cross mesh communication
269
+ return float("inf")
270
+
271
+ if current_spec.is_replicated():
272
+ # short-cut:
273
+ # comm cost is 0 if current spec is already full replication
274
+ return 0.0
275
+
276
+ mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh)
277
+ cost = 0.0
278
+ comm_bytes_gb = (
279
+ spec_to_bytes(current_spec) / current_spec.num_shards / 1024 / 1024 / 1024
280
+ )
281
+ # Transformation that considered for redistribute cost:
282
+ # 1. allgather 2. alltoall
283
+ # 3. allreduce 4. reduce_scatter
284
+ for i, (current, target) in enumerate(
285
+ zip(current_spec.placements, target_spec.placements)
286
+ ):
287
+ if current == target:
288
+ continue
289
+
290
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[i]
291
+ if current.is_shard() and target.is_replicate():
292
+ # allgather gives larger comm bytes
293
+ comm_bytes_gb *= num_devices_on_mesh_dim
294
+ # add up allgather comm cost
295
+ cost += allgather_cost(comm_bytes_gb, mesh_topo, i)
296
+ elif current.is_shard() and target.is_shard():
297
+ # should be alltoall comm, since we haven't implement it yet, add penalty
298
+ # to favor allgather instead
299
+ cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + 1.0
300
+ elif current.is_partial() and target.is_replicate():
301
+ # add up allreduce comm cost
302
+ cost += allreduce_cost(comm_bytes_gb, mesh_topo, i)
303
+ elif current.is_partial() and target.is_shard():
304
+ # add up reduce_scatter comm cost
305
+ cost += reduce_scatter_cost(comm_bytes_gb, mesh_topo, i)
306
+ # after reduce_scatter the comm bytes for further collectives halved.
307
+ comm_bytes_gb /= num_devices_on_mesh_dim
308
+ elif current.is_shard() and target.is_partial():
309
+ # ban shard -> partial as it does not make sense to perform
310
+ # this redistribute
311
+ return float("inf")
312
+
313
+ return cost
venv/lib/python3.10/site-packages/torch/distributed/_tensor/_utils.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import cast, List, Sequence, Tuple
2
+
3
+ import torch
4
+ import torch.distributed._tensor.api as dtensor
5
+ from torch._prims_common import ShapeType
6
+ from torch.distributed._tensor.placement_types import (
7
+ _Partial,
8
+ DTensorSpec,
9
+ Placement,
10
+ Replicate,
11
+ Shard,
12
+ )
13
+ from torch.distributed.device_mesh import DeviceMesh
14
+
15
+
16
+ # TODO: audit existing code base to see if we can safely remove this API.
17
+ def compute_local_shape(
18
+ global_shape: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]
19
+ ) -> Tuple[int, ...]:
20
+ """
21
+ Compute the shape of a local shard of the given DTensor on its current
22
+ coordinate of the mesh.
23
+ """
24
+ my_coordinate = mesh.get_coordinate()
25
+
26
+ if my_coordinate is None:
27
+ # if rank not in the mesh, return empty shape
28
+ return (0,)
29
+ else:
30
+ local_shape = list(global_shape) # start with global shape
31
+ ndim = len(global_shape)
32
+ for idx, placement in enumerate(placements):
33
+ mesh_dim_size = mesh.size(idx)
34
+ if isinstance(placement, Shard):
35
+ shard_dim = placement.dim
36
+ assert (
37
+ shard_dim < ndim
38
+ ), f"Sharding dim {shard_dim} greater than tensor ndim {ndim}"
39
+ local_shard_size, _ = placement._local_shard_size_on_dim(
40
+ local_shape[shard_dim], mesh_dim_size, my_coordinate[idx]
41
+ )
42
+ assert isinstance(local_shard_size, int)
43
+ local_shape[shard_dim] = local_shard_size
44
+
45
+ return tuple(local_shape)
46
+
47
+
48
+ def compute_local_shape_and_global_offset(
49
+ global_shape: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]
50
+ ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
51
+ """
52
+ Compute the local tensor shape and the global offsets into the original tensor
53
+ of a DTensor on its current global rank. This is useful for checkpointing purpose.
54
+
55
+ Example (2 host with 4GPUs each):
56
+ # Below is a DeviceMesh with mesh_shape of (2, 4)
57
+ mesh = DeviceMesh(device_type="cuda",
58
+ mesh=[
59
+ [0, 1, 2, 3],
60
+ [4, 5, 6, 7]
61
+ ],
62
+ )
63
+
64
+ Let's say we distribute a global_tensor of shape (8,4) over the above DeviceMesh
65
+ with a placements of [Shard(0), Shard(0)].
66
+ The local shape and global offset will be as follows:
67
+ rank0 -- local_shape:[1, 4], global_offset:[0, 0]
68
+ rank1 -- local_shape:[1, 4], global_offset:[1, 0]
69
+ rank2 -- local_shape:[1, 4], global_offset:[2, 0]
70
+ rank5 -- local_shape:[1, 4], global_offset:[5, 0]
71
+ rank3 -- local_shape:[1, 4], global_offset:[3, 0]
72
+ rank4 -- local_shape:[1, 4], global_offset:[4, 0]
73
+ rank6 -- local_shape:[1, 4], global_offset:[6, 0]
74
+ rank7 -- local_shape:[1, 4], global_offset:[7, 0]
75
+
76
+ Let's say we distribute a global_tensor of shape (2) over the above DeviceMesh with
77
+ a placements of [Shard(0)]. We will not have non-empty local tensor for all the ranks.
78
+ The local shape and global offset will be as follows:
79
+ rank0 -- local_shape:[1,], global_offset:[0,]
80
+ rank1 -- local_shape:[1,], global_offset:[1,]
81
+ rank2 -- local_shape:[0,], global_offset:[2,]
82
+ rank5 -- local_shape:[0,], global_offset:[2,]
83
+ rank3 -- local_shape:[0,], global_offset:[2,]
84
+ rank4 -- local_shape:[0,], global_offset:[2,]
85
+ rank6 -- local_shape:[0,], global_offset:[2,]
86
+ rank7 -- local_shape:[0,], global_offset:[2,]
87
+ """
88
+ my_coordinate = mesh.get_coordinate()
89
+
90
+ if my_coordinate is None:
91
+ # if rank not in the mesh, return empty offset
92
+ return ((), ())
93
+ else:
94
+ local_shape = list(global_shape)
95
+ global_offset = [0] * len(global_shape)
96
+
97
+ for idx, placement in enumerate(placements):
98
+ mesh_dim_size = mesh.size(idx)
99
+ if isinstance(placement, Shard):
100
+ shard_dim = placement.dim
101
+ local_offset = [0] * len(global_shape)
102
+ assert shard_dim < len(
103
+ local_shape
104
+ ), f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)}"
105
+ shard_size, shard_offset = placement._local_shard_size_on_dim(
106
+ local_shape[shard_dim],
107
+ mesh_dim_size,
108
+ my_coordinate[idx],
109
+ return_offset=True,
110
+ )
111
+
112
+ local_shape[shard_dim] = shard_size
113
+ local_offset[shard_dim] = shard_offset
114
+
115
+ # On a given dimension, if the local_offset[shard_dim] is smaller than global_offset[shard_dim],
116
+ # it means that this dimension has been already sharded in previous placement.
117
+ # Therefore, we cannot simply replace the global_offset[shard_dim] with local_offset[shard_dim].
118
+ # Instead, for the given shard_dim, we need to add local_offset[shard_dim] to existing global_offset[shard_dim].
119
+ if global_offset[shard_dim] <= local_offset[shard_dim]:
120
+ global_offset[shard_dim] = local_offset[shard_dim]
121
+ else:
122
+ global_offset[shard_dim] += local_offset[shard_dim]
123
+
124
+ return tuple(local_shape), tuple(global_offset)
125
+
126
+
127
+ def compute_global_tensor_info(
128
+ tensor: torch.Tensor, mesh: DeviceMesh, placements: Sequence[Placement]
129
+ ) -> Tuple[List[int], List[int]]:
130
+ """
131
+ Compute the global size and stride of a DTensor from the given local tensor.
132
+ The local size is multiplited by `world_size` per Sharding dim.
133
+ The local stride is multiplited by `world_size` per Sharding dim, as long as the
134
+ dimension is outside sharding dim.
135
+
136
+ For example, if we have a local tensor with size (4, 8, 2) and stride (16, 1, 8).
137
+ If the DTensor placements are [Shard(2)] and world_size is 2;
138
+ then the global size is (4, 8, 4) and stride is (16 * 2, 1, 8).
139
+
140
+ Args:
141
+ tensor (:class:`torch.Tensor`):
142
+ Local tensor which DTensor will be constructed from.
143
+ mesh (:class:`DeviceMesh`):
144
+ Object which describes the mesh topology
145
+ of devices for the DTensor.
146
+ placements (Sequence[:class:`Placement`]]):
147
+ The attribute of the DTensor that describes its layout
148
+ on the mesh topology.
149
+
150
+ Return:
151
+ tensor_shape: A List of int which specifies the size of DTensor which build
152
+ on top of the local tensor.
153
+ tensor_stride: A List of int which specifies the stride of DTensor.
154
+ """
155
+ tensor_shape = list(tensor.size())
156
+ tensor_stride = list(tensor.stride())
157
+ for idx, placement in enumerate(placements):
158
+ mesh_dim_size = mesh.size(idx)
159
+ if placement.is_shard():
160
+ shard_placement = cast(Shard, placement)
161
+ if shard_placement.dim < 0:
162
+ raise AssertionError(
163
+ "Shard placements should have negative dims normalized in "
164
+ f"the user-facing APIs: {shard_placement}"
165
+ )
166
+ shard_dim = shard_placement.dim
167
+
168
+ assert (
169
+ shard_dim < tensor.ndim
170
+ ), f"Sharding dim {shard_dim} greater than tensor ndim {tensor.ndim} for placement number {idx}."
171
+
172
+ local_dim_size = tensor_shape[shard_dim]
173
+ tensor_shape[shard_dim] = local_dim_size * mesh_dim_size
174
+
175
+ # recover tensor stride by modifying the stride that larger than
176
+ # the current stride on the shard_dim
177
+ for i in range(len(tensor_stride)):
178
+ if i != shard_dim and tensor_stride[i] >= tensor_stride[shard_dim]:
179
+ # rescale the stride by the shard size
180
+ tensor_stride[i] = tensor_stride[i] * mesh_dim_size
181
+ elif not isinstance(placement, (Replicate, _Partial)):
182
+ raise RuntimeError(f"placement type {type(placement)} not supported!")
183
+ return tensor_shape, tensor_stride
184
+
185
+
186
+ def try_find_mesh_from_args(
187
+ op_call: torch._ops.OpOverload, args: Sequence[object]
188
+ ) -> DeviceMesh:
189
+ """
190
+ Find the device mesh object from args.
191
+ It returns None if no mesh is found.
192
+ NOTE: we can optimize this search if needed
193
+ """
194
+ for arg in args:
195
+ if isinstance(arg, (dtensor.DTensor, DTensorSpec)):
196
+ return arg.device_mesh
197
+ elif (
198
+ isinstance(arg, (list, tuple))
199
+ and len(arg) > 0
200
+ and isinstance(arg[0], (dtensor.DTensor, DTensorSpec))
201
+ ):
202
+ return arg[0].device_mesh
203
+
204
+ raise ValueError(f"Cannot find device mesh from args for op : {op_call}.")
venv/lib/python3.10/site-packages/torch/distributed/_tensor/api.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import inspect
3
+ import warnings
4
+ from typing import Any, Callable, cast, Optional, Sequence, Tuple
5
+
6
+ import torch
7
+
8
+ import torch.distributed._tensor.dispatch as op_dispatch
9
+ import torch.distributed._tensor.random as random
10
+ import torch.nn as nn
11
+ from torch.distributed._tensor._collective_utils import mesh_broadcast
12
+ from torch.distributed._tensor._utils import compute_global_tensor_info
13
+ from torch.distributed._tensor.placement_types import (
14
+ DTensorSpec,
15
+ Placement,
16
+ Replicate,
17
+ Shard,
18
+ TensorMeta,
19
+ )
20
+ from torch.distributed._tensor.random import (
21
+ is_rng_supported_mesh,
22
+ OffsetBasedRNGTracker,
23
+ )
24
+ from torch.distributed._tensor.redistribute import (
25
+ Redistribute,
26
+ redistribute_local_tensor,
27
+ )
28
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
29
+
30
+
31
+ __all__ = ["DTensor", "distribute_tensor", "distribute_module"]
32
+
33
+ aten = torch.ops.aten
34
+
35
+
36
+ # NOTE [Autograd interaction between torch.Tensor]
37
+ #
38
+ # The autograd functions defined below are being used by the public
39
+ # facing APIs (i.e. from_local, to_local) to ensure our DTensor
40
+ # works together with torch.Tensor within autograd engine. This
41
+ # allows DistributedTensor to exist on part of the module hierarchy
42
+ # and still able to calculate gradients across the torch.Tensor and
43
+ # DistributedTensor boundary.
44
+ # As an example, we have the a module that consists of submodules
45
+ # A, B, and C, the execution flow would be like:
46
+ # input(torch.Tensor) -> Module A -> Module B -> Module C -> output (torch.Tensor)
47
+ #
48
+ # Suppose I only want to make Module B be a sharded module with
49
+ # DistributedTensor params, we would need to make the following
50
+ # flow to work:
51
+ #
52
+ # input(torch.Tensor) -> Module A
53
+ # -> DTensor input -> Sharded Module B -> DTensor output
54
+ # -> output (torch.Tensor) -> Module C -> output (torch.Tensor)
55
+ #
56
+ # We need the conversion from Module A to DTensor input, which is
57
+ # `from_local`, and conversion from DTensor output to output, which
58
+ # is `to_local`, thus these two functions must be Autograd functions.
59
+ #
60
+ class _ToTorchTensor(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward( # type: ignore[override]
63
+ ctx,
64
+ input: "DTensor",
65
+ grad_placements: Optional[Sequence[Placement]],
66
+ ):
67
+ ctx.dtensor_spec = input._spec
68
+ ctx.grad_placements = grad_placements
69
+ local_tensor = input._local_tensor
70
+
71
+ # We need to return a fresh Tensor object there as autograd metadata
72
+ # will be inplaced into it. So we don't want to pollute the Tensor
73
+ # object stored in the _local_tensor of this DTensor.
74
+ return local_tensor.view_as(local_tensor)
75
+
76
+ @staticmethod
77
+ def backward(ctx, grad_output: torch.Tensor): # type: ignore[override]
78
+ dtensor_spec = ctx.dtensor_spec
79
+ mesh = dtensor_spec.mesh
80
+ grad_placements = ctx.grad_placements
81
+ dtensor_meta = dtensor_spec.tensor_meta
82
+
83
+ _, tensor_stride = compute_global_tensor_info(
84
+ grad_output, mesh, dtensor_spec.placements
85
+ )
86
+ tensor_stride = tuple(tensor_stride)
87
+ grad_placements = grad_placements or dtensor_spec.placements
88
+
89
+ return (
90
+ DTensor(
91
+ grad_output,
92
+ mesh,
93
+ grad_placements,
94
+ shape=dtensor_meta.shape,
95
+ dtype=dtensor_meta.dtype,
96
+ requires_grad=grad_output.requires_grad,
97
+ stride=tensor_stride,
98
+ ),
99
+ None,
100
+ )
101
+
102
+
103
+ class _FromTorchTensor(torch.autograd.Function):
104
+ @staticmethod
105
+ def forward( # type: ignore[override]
106
+ ctx, # pyre-ignore[2]: Parameter must be annotated.
107
+ input: torch.Tensor,
108
+ device_mesh: DeviceMesh,
109
+ placements: Tuple[Placement, ...],
110
+ run_check: bool,
111
+ shape: Optional[torch.Size] = None,
112
+ stride: Optional[Tuple[int, ...]] = None,
113
+ ) -> "DTensor":
114
+ ctx.previous_placement = placements
115
+ ctx.previous_device_mesh = device_mesh
116
+
117
+ if shape and stride:
118
+ tensor_shape, tensor_stride = shape, stride
119
+ elif not shape and not stride:
120
+ # if it's not by default run_check, we assume user is certain that each
121
+ # rank has the same tensor shape, and we just use that to calculate the
122
+ # global shape
123
+ global_shape, global_stride = compute_global_tensor_info(
124
+ input, device_mesh, placements
125
+ )
126
+ tensor_shape, tensor_stride = torch.Size(global_shape), tuple(global_stride)
127
+ else:
128
+ raise RuntimeError(
129
+ f"Found shape:{shape}, stride:{stride}.",
130
+ "Please pass both shape and stride at the same time.",
131
+ )
132
+
133
+ if device_mesh.get_coordinate() is None:
134
+ # if the global rank is not participating in the device mesh, we
135
+ # simply set the local tensor to an empty tensor
136
+ input = input.new_empty(0, requires_grad=input.requires_grad)
137
+ elif run_check:
138
+ # TODO: by default check tensor metas across rank
139
+ # TODO: See if we need to make this run_check logic
140
+ # have a corresponding backward.
141
+ for idx, placement in enumerate(placements):
142
+ if placement.is_replicate():
143
+ # broadcast rank 0 tensor to all ranks
144
+ # only broadcast if run_check is True
145
+ input = input.contiguous()
146
+ mesh_broadcast(input, device_mesh, mesh_dim=idx)
147
+
148
+ # We want a fresh Tensor object that shares memory with the input tensor
149
+ dist_tensor = DTensor(
150
+ input.view_as(input),
151
+ device_mesh,
152
+ placements,
153
+ shape=tensor_shape,
154
+ dtype=input.dtype,
155
+ # requires_grad of the dist tensor depends on if input
156
+ # requires_grad or not
157
+ requires_grad=input.requires_grad,
158
+ stride=tensor_stride,
159
+ )
160
+ return dist_tensor
161
+
162
+ @staticmethod
163
+ def backward(ctx, grad_output: "DTensor"): # type: ignore[override]
164
+ previous_placement = ctx.previous_placement
165
+ previous_device_mesh = ctx.previous_device_mesh
166
+
167
+ # reshard to the placement when creating DistributedTensor
168
+ # so that the gradient layout matches, and we could return
169
+ # local gradients directly
170
+ if grad_output.placements != previous_placement:
171
+ current_spec = grad_output._spec
172
+ target_spec = DTensorSpec(
173
+ previous_device_mesh,
174
+ previous_placement,
175
+ tensor_meta=grad_output._spec.tensor_meta,
176
+ )
177
+ local_tensor = grad_output._local_tensor
178
+ output = redistribute_local_tensor(
179
+ local_tensor, current_spec, target_spec, is_backward=True
180
+ )
181
+ # TODO: return the redistributed local tensor directly without
182
+ # differentiable backward. see if this make sense for all cases.
183
+ return output, None, None, None, None, None
184
+
185
+ # TODO: backward is also differentiable now, add a test
186
+ # to test higher level gradients.
187
+ return grad_output.to_local(), None, None, None, None, None
188
+
189
+
190
+ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
191
+ _local_tensor: torch.Tensor
192
+ _spec: DTensorSpec
193
+ __slots__ = ["_local_tensor", "_spec"]
194
+
195
+ # class attribute that handles operator placements propagation
196
+ # rules, keyed by aten op name, value is propagation func
197
+ _op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher()
198
+
199
+ @staticmethod
200
+ def __new__(
201
+ cls,
202
+ local_tensor: torch.Tensor,
203
+ device_mesh: DeviceMesh,
204
+ placements: Tuple[Placement, ...],
205
+ *,
206
+ shape: torch.Size,
207
+ dtype: torch.dtype,
208
+ requires_grad: bool,
209
+ stride: Tuple[int, ...],
210
+ ) -> "DTensor":
211
+ """
212
+ Construct a DTensor from a local tensor, device mesh, and placement and
213
+ other tensor properties (i.e. shape, requires_grad, strides, etc).
214
+ Note: This is not a public API and it's only supposed to be used by the
215
+ operator implementations and internals. If you want to construct a
216
+ DTensor from a local tensor, consider using `DTensor.from_local`, if
217
+ you want to construct a DTensor from a "global" tensor (where you
218
+ already have tensor initialized and want to shard this tensor),
219
+ consider using `distribute_tensor`.
220
+ """
221
+ if local_tensor.requires_grad and not requires_grad:
222
+ warnings.warn(
223
+ "To construct DTensor from torch.Tensor, it's recommended to "
224
+ "use local_tensor.detach() and make requires_grad consistent."
225
+ )
226
+
227
+ # new method instruct wrapper tensor from local_tensor and add
228
+ # placement spec, it does not do actual distribution
229
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
230
+ cls,
231
+ shape,
232
+ strides=stride,
233
+ dtype=dtype,
234
+ device=local_tensor.device,
235
+ layout=local_tensor.layout,
236
+ requires_grad=requires_grad,
237
+ )
238
+
239
+ tensor_meta = TensorMeta(shape, stride, dtype)
240
+ # deepcopy and set spec
241
+ r._spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta)
242
+ r._local_tensor = local_tensor
243
+ return r
244
+
245
+ # pyre-fixme[14]: `__repr__` overrides method defined in `DTensor` inconsistently.
246
+ # pyre-fixme[3]: Return type must be annotated.
247
+ def __repr__(self):
248
+ # TODO: consider all_gather the local tensors for better debugging
249
+ return f"DTensor(local_tensor={self._local_tensor}, device_mesh={self._spec.mesh}, placements={self._spec.placements})"
250
+
251
+ def __tensor_flatten__(self):
252
+ """
253
+ protocol to inform how to flatten a DTensor to local tensor
254
+ for PT2 tracing
255
+ """
256
+ return ["_local_tensor"], (self._spec, self.requires_grad)
257
+
258
+ @staticmethod
259
+ def __tensor_unflatten__(inner_tensors, flatten_spec, outer_size, outer_stride):
260
+ assert (
261
+ flatten_spec is not None
262
+ ), "Expecting spec to be not None from `__tensor_flatten__` return value!"
263
+ local_tensor = inner_tensors["_local_tensor"]
264
+ spec, requires_grad = flatten_spec
265
+ return DTensor(
266
+ local_tensor,
267
+ spec.mesh,
268
+ spec.placements,
269
+ shape=outer_size,
270
+ dtype=spec.tensor_meta.dtype,
271
+ requires_grad=requires_grad,
272
+ stride=outer_stride,
273
+ )
274
+
275
+ @classmethod
276
+ # pyre-fixme[3]: Return type must be annotated.
277
+ # pyre-fixme[2]: Parameter must be annotated.
278
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
279
+ return DTensor._op_dispatcher.dispatch(
280
+ func,
281
+ args,
282
+ kwargs or {},
283
+ )
284
+
285
+ @staticmethod
286
+ def from_local(
287
+ local_tensor: torch.Tensor,
288
+ device_mesh: Optional[DeviceMesh] = None,
289
+ placements: Optional[Sequence[Placement]] = None,
290
+ *,
291
+ run_check: bool = True,
292
+ shape: Optional[torch.Size] = None,
293
+ stride: Optional[Tuple[int, ...]] = None,
294
+ ) -> "DTensor":
295
+ """
296
+ Create a :class:`DTensor` from a local torch.Tensor on each rank
297
+ according to the `device_mesh` and `placements` specified.
298
+
299
+ Args:
300
+ local_tensor (torch.Tensor): local torch.Tensor on each rank.
301
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the
302
+ tensor, if not specified, must be called under a DeviceMesh
303
+ context manager, default: None
304
+ placements (List[:class:`Placement`], optional): the placements that
305
+ describes how to place the local torch.Tensor on DeviceMesh, must
306
+ have the same number of elements as `device_mesh.ndim`. If not
307
+ specified, we will by default replicate the tensor across the
308
+ `device_mesh` from the first rank of each dimension of the `device_mesh`.
309
+
310
+ Keyword args:
311
+ run_check (bool, optional): indicate whether to run check across ranks
312
+ to check meta information and data. if have :class:`Replicate` in
313
+ `placements`, the data on first rank of the device mesh dimension
314
+ will be broadcasted to other ranks.
315
+ shape (torch.Size, optional): A List of int which specifies the size of
316
+ DTensor which build on top of `local_tensor`. Note this needs to be
317
+ provided if the shape of `local_tensor` are different across the ranks.
318
+ If not provided, `shape` will be computed assuming the given distributed
319
+ tensor is evenly sharded across ranks.
320
+ stride (tuple, optional): A List of int which specifies the stride of DTensor.
321
+ If not provided, `stride` will be computed assuming the given distributed
322
+ tensor is evenly sharded across ranks.
323
+
324
+ Returns:
325
+ A :class:`DTensor` object
326
+
327
+ .. note:: `from_local` is differentiable, the `requires_grad` of the created
328
+ `DTensor` object will depend on if `local_tensor` requires_grad or not.
329
+ """
330
+ # if same shape/dtype, no need to run_check, if not, must allgather
331
+ # the metadatas to check the size/dtype across ranks
332
+ # There should be no data communication unless there's replication
333
+ # strategy, where we broadcast the replication from the first rank
334
+ # in the mesh dimension
335
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
336
+ device_type = device_mesh.device_type
337
+
338
+ # convert the local tensor to desired device base on device mesh's device_type
339
+ if device_type != local_tensor.device.type and not local_tensor.is_meta:
340
+ local_tensor = local_tensor.to(device_type)
341
+
342
+ # set default placements to replicated if not specified
343
+ if placements is None:
344
+ placements = [Replicate() for _ in range(device_mesh.ndim)]
345
+ else:
346
+ placements = list(placements)
347
+ for idx, placement in enumerate(placements):
348
+ # normalize shard dim to be positive
349
+ if placement.is_shard():
350
+ placement = cast(Shard, placement)
351
+ if placement.dim < 0:
352
+ placements[idx] = Shard(placement.dim + local_tensor.ndim)
353
+
354
+ # `from_local` is differentiable, and the gradient of the dist tensor this function
355
+ # created should flow back the gradients to the local_tensor, so we call an autograd
356
+ # function to construct the dist tensor instead.
357
+ return _FromTorchTensor.apply( # pyre-ignore[16]: autograd func
358
+ local_tensor,
359
+ device_mesh,
360
+ tuple(placements),
361
+ run_check,
362
+ shape,
363
+ stride,
364
+ )
365
+
366
+ def to_local(
367
+ self, *, grad_placements: Optional[Sequence[Placement]] = None
368
+ ) -> torch.Tensor:
369
+ """
370
+ Get the local tensor of this DTensor on its current rank. For sharding it returns
371
+ a local shard of the logical tensor view, for replication it returns the replica on
372
+ its current rank.
373
+
374
+ Keyword args:
375
+ grad_placements (List[:class:`Placement`], optional): the placements describes
376
+ the future layout of any gradient layout of the Tensor returned from this
377
+ function.
378
+ `to_local` converts DTensor to local tensor and the returned local tensor
379
+ might not be used as the original DTensor layout later in the code. This
380
+ argument is the hint that user can give to autograd in case the gradient
381
+ layout of the returned tensor does not match the original DTensor layout.
382
+ If not specified, we will assume the gradient layout remains the same
383
+ as the original DTensor and use that for gradient computation.
384
+
385
+ Returns:
386
+ A :class:`torch.Tensor` or `AsyncCollectiveTensor` object. it represents the
387
+ local tensor on its current rank.
388
+
389
+ .. note:: `to_local` is differentiable, the `requires_grad` of the local tensor returned
390
+ will depend on if the `DTensor` requires_grad or not.
391
+ """
392
+ if grad_placements is not None and not isinstance(grad_placements, tuple):
393
+ grad_placements = tuple(grad_placements)
394
+ return _ToTorchTensor.apply(
395
+ self, grad_placements
396
+ ) # pyre-ignore[16]: autograd func
397
+
398
+ def redistribute(
399
+ self,
400
+ device_mesh: Optional[DeviceMesh] = None,
401
+ placements: Optional[Sequence[Placement]] = None,
402
+ *,
403
+ async_op: bool = False,
404
+ ) -> "DTensor":
405
+ """
406
+ `redistribute` performs necessary collective operations that redistribute the current
407
+ DTensor from its current placements to a new placements, or from is current DeviceMesh
408
+ to a new DeviceMesh. i.e. we can turn a Sharded DTensor to a Replicated DTensor by
409
+ specifying a Replicate placement for each dimension of the DeviceMesh.
410
+
411
+ Args:
412
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the
413
+ DTensor, if not specified, must be called under a DeviceMesh
414
+ context manager, default: None
415
+ placements (List[:class:`Placement`], optional): the new placements that
416
+ describes how to place the DTensor into the DeviceMesh, must
417
+ have the same number of elements as `device_mesh.ndim`.
418
+
419
+ Keyword args:
420
+ async_op (bool, optional): whether to perform the DTensor redistribute operation
421
+ asynchronously or not. Default: False
422
+
423
+ Returns:
424
+ A :class:`DTensor` object
425
+
426
+ .. note:: `redistribute` is differentiable.
427
+ """
428
+ # NOTE: This redistribute API currently only supports out
429
+ # of place redistribution, i.e. it always create a new
430
+ # DTensor object and leave the original one unchanged.
431
+
432
+ # if device_mesh is not specified, use the current device_mesh
433
+ device_mesh = device_mesh or self.device_mesh
434
+ # raise error if new placements not specified
435
+ if placements is None:
436
+ raise RuntimeError("placements is needed for redistribute!")
437
+
438
+ placements = list(placements)
439
+ for i, placement in enumerate(placements):
440
+ if placement.is_partial():
441
+ raise RuntimeError(
442
+ "Can not redistribute to _Partial, _Partial is for internal use only!"
443
+ )
444
+ elif isinstance(placement, Shard) and placement.dim < 0:
445
+ # normalize shard dim to be positive
446
+ placements[i] = Shard(placement.dim + self.ndim)
447
+ placements = tuple(placements)
448
+
449
+ # Early return the original DTensor if the placements are the same.
450
+ if self._spec.placements == placements:
451
+ return self
452
+
453
+ # pyre-fixme[16]: `Redistribute` has no attribute `apply`.
454
+ return Redistribute.apply(self, device_mesh, placements, async_op)
455
+
456
+ def full_tensor(
457
+ self, *, grad_placements: Optional[Sequence[Placement]] = None
458
+ ) -> torch.Tensor:
459
+ """
460
+ Return the full tensor of this DTensor. It will perform necessary collectives
461
+ to gather the local tensors from other ranks in its DeviceMesh and concatenate
462
+ them together. It's a syntatic sugar of the following code:
463
+
464
+ `dtensor.redistribute(placements=[Replicate()] * mesh.ndim).to_local()`
465
+
466
+ Keyword args:
467
+ grad_placements (List[:class:`Placement`], optional): the placements describes
468
+ the future layout of any gradient layout of the full Tensor returned from this
469
+ function.
470
+ `full_tensor` converts DTensor to a full torch.Tensor and the returned torch.tensor
471
+ might not be used as the original replicated DTensor layout later in the code. This
472
+ argument is the hint that user can give to autograd in case the gradient
473
+ layout of the returned tensor does not match the original replicated DTensor layout.
474
+ If not specified, we will assume the gradient layout of the full tensor be replicated.
475
+
476
+ Returns:
477
+ A :class:`torch.Tensor` object that represents the full tensor of this DTensor.
478
+
479
+ .. note:: `full_tensor` is differentiable.
480
+ """
481
+
482
+ redist_res = self.redistribute(
483
+ placements=[Replicate()] * self.device_mesh.ndim, async_op=False
484
+ )
485
+ return _ToTorchTensor.apply(redist_res, grad_placements)
486
+
487
+ @property
488
+ def device_mesh(self) -> DeviceMesh:
489
+ """
490
+ The :class:`DeviceMesh` attribute that associates with this DTensor object.
491
+
492
+ .. note:: device_mesh is a read-only property, it can not be set.
493
+ """
494
+ return self._spec.mesh
495
+
496
+ @property
497
+ def placements(self) -> Sequence[Placement]:
498
+ """
499
+ The placements attribute of this DTensor that describes the layout of this
500
+ DTensor on the its DeviceMesh.
501
+
502
+ .. note:: placements is a read-only property, it can not be set.
503
+ """
504
+ return self._spec.placements
505
+
506
+
507
+ def distribute_tensor(
508
+ tensor: torch.Tensor,
509
+ device_mesh: Optional[DeviceMesh] = None,
510
+ placements: Optional[Sequence[Placement]] = None,
511
+ ) -> DTensor:
512
+ """
513
+ Distribute a torch.Tensor to the `device_mesh` according to the `placements`
514
+ specified. The rank of `device_mesh` and `placements` must be the same.
515
+
516
+ Args:
517
+ tensor (torch.Tensor): torch.Tensor to be distributed. Note that if you
518
+ want to shard a tensor on a dimension that is not evenly divisible by
519
+ the number of devices in that mesh dimension, we use `torch.chunk`
520
+ semantic to shard the tensor and scatter the shards.
521
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to distribute the
522
+ tensor, if not specified, must be called under a DeviceMesh context
523
+ manager, default: None
524
+ placements (List[:class:`Placement`], optional): the placements that
525
+ describes how to place the tensor on DeviceMesh, must have the same
526
+ number of elements as `device_mesh.ndim`. If not specified, we will
527
+ by default replicate the tensor across the `device_mesh` from the
528
+ first rank of each dimension of the `device_mesh`.
529
+
530
+ Returns:
531
+ A :class:`DTensor` or `XLAShardedTensor` object.
532
+
533
+ Note:
534
+ When initialize the DeviceMesh with the `xla` device_type, `distribute_tensor`
535
+ return `XLAShardedTensor` instead. see [link](https://github.com/pytorch/pytorch/issues/92909)
536
+ for more details. The XLA integration is experimental and subject to change.
537
+ """
538
+
539
+ torch._C._log_api_usage_once("torch.dtensor.distribute_tensor")
540
+
541
+ # get default device mesh if there's nothing specified
542
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
543
+ device_type = device_mesh.device_type
544
+ if device_type == "xla":
545
+ try:
546
+ # call PyTorch/XLA SPMD for `xla` backend type device mesh.
547
+ # This returns XLAShardedTensor
548
+ from torch_xla.distributed.spmd import ( # type:ignore[import]
549
+ xla_distribute_tensor,
550
+ )
551
+
552
+ return xla_distribute_tensor(
553
+ tensor, device_mesh, placements
554
+ ) # type:ignore[return-value]
555
+ except ImportError as e:
556
+ msg = "To use DTensor API with xla, you must install the torch_xla package!"
557
+ raise ImportError(msg) from e
558
+
559
+ # instantiate a RNG tracker if haven't. By default DTensor uses an
560
+ # OffsetBasedRNGTracker to perform random operators.
561
+ # TODO: the value assignment to global variable is not the ideal solution
562
+ # we can replace it in future.
563
+ if is_rng_supported_mesh(device_mesh) and not random._rng_tracker:
564
+ random._rng_tracker = OffsetBasedRNGTracker(device_type)
565
+
566
+ if not tensor.is_leaf:
567
+ raise RuntimeError(
568
+ "`distribute_tensor` should be used to distribute leaf tensors! but found non-leaf tensor!"
569
+ )
570
+
571
+ # convert tensor to the corresponding device type if it's not in that device type
572
+ if device_type != tensor.device.type and not tensor.is_meta:
573
+ tensor = tensor.to(device_type)
574
+
575
+ # set default placements to replicated if not specified
576
+ if placements is None:
577
+ placements = [Replicate() for _ in range(device_mesh.ndim)]
578
+
579
+ if len(placements) != device_mesh.ndim:
580
+ raise ValueError(
581
+ f"`placements` must have the same length as `device_mesh.ndim`! "
582
+ f"Found placements length: {len(placements)}, and device_mesh.ndim: {device_mesh.ndim}."
583
+ )
584
+ if isinstance(tensor, DTensor):
585
+ # if the tensor is already a DTensor, we just need to check if the
586
+ # device mesh and placements are the same
587
+ if tensor.device_mesh != device_mesh:
588
+ raise ValueError(
589
+ f"Cannot distribute a DTensor with device mesh {tensor.device_mesh} "
590
+ f"to a different device mesh {device_mesh}."
591
+ )
592
+ if tensor.placements != tuple(placements):
593
+ raise ValueError(
594
+ f"Cannot distribute a DTensor with placements {tensor.placements} "
595
+ f"to a different placements {placements}. do you want to call "
596
+ f"`redistribute` instead?"
597
+ )
598
+ return tensor
599
+
600
+ local_tensor = tensor
601
+
602
+ # distribute the tensor according to the placements.
603
+ placements = list(placements)
604
+ for idx, placement in enumerate(placements):
605
+ if placement.is_shard():
606
+ placement = cast(Shard, placement)
607
+ if placement.dim < 0:
608
+ # normalize shard placement dim
609
+ placement = Shard(placement.dim + tensor.ndim)
610
+ placements[idx] = placement
611
+ local_tensor = placement._shard_tensor(local_tensor, device_mesh, idx)
612
+ elif placement.is_replicate():
613
+ placement = cast(Replicate, placement)
614
+ local_tensor = placement._replicate_tensor(local_tensor, device_mesh, idx)
615
+ else:
616
+ raise RuntimeError(
617
+ f"Trying to distribute tensor with unsupported placements {placement} on device mesh dimension {idx}!"
618
+ )
619
+ placements = tuple(placements)
620
+
621
+ assert local_tensor is not None, "distributing a tensor should not be None"
622
+ # detach the local tensor passed to DTensor since after the construction
623
+ # of DTensor, autograd would work on top of DTensor instead of local tensor
624
+ return DTensor(
625
+ local_tensor.detach().requires_grad_(tensor.requires_grad),
626
+ device_mesh,
627
+ placements,
628
+ shape=tensor.size(),
629
+ dtype=tensor.dtype,
630
+ requires_grad=tensor.requires_grad,
631
+ stride=tensor.stride(),
632
+ )
633
+
634
+
635
+ def distribute_module(
636
+ module: nn.Module,
637
+ device_mesh: Optional[DeviceMesh] = None,
638
+ partition_fn: Optional[Callable[[str, nn.Module, DeviceMesh], None]] = None,
639
+ input_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None,
640
+ output_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None,
641
+ ) -> nn.Module:
642
+ """
643
+ This function converts all module parameters to :class:`DTensor` parameters
644
+ according to the `partition_fn` specified. It could also control the input or
645
+ output of the module by specifying the `input_fn` and `output_fn`. (i.e. convert
646
+ the input to :class:`DTensor`, convert the output back to torch.Tensor)
647
+ Args:
648
+ module (:class:`nn.Module`): user module to be partitioned.
649
+ device_mesh (:class:`DeviceMesh`): the device mesh to place the module.
650
+ partition_fn (Callable): the function to partition parameters (i.e. shard certain
651
+ parameters across the `device_mesh`). If `partition_fn` is not specified,
652
+ by default we replicate all module parameters of `module` across the mesh.
653
+ input_fn (Callable): specify the input distribution, i.e. could control how the
654
+ input of the module is sharded. `input_fn` will be installed as a module
655
+ `forward_pre_hook` (pre forward hook).
656
+ output_fn (Callable): specify the output distribution, i.e. could control how the
657
+ output is sharded, or convert it back to torch.Tensor. output_fn will be
658
+ installed as a module `forward_hook` (post forward hook).
659
+
660
+ Returns:
661
+ A module that contains parameters/buffers that are all `DTensor`s.
662
+
663
+ Note:
664
+ When initialize the DeviceMesh with the `xla` device_type, `distribute_module`
665
+ return nn.Module with PyTorch/XLA SPMD annotated parameters. See [link](https://github.com/pytorch/pytorch/issues/92909)
666
+ for more details. The XLA integration is experimental and subject to change.
667
+ """
668
+
669
+ torch._C._log_api_usage_once("torch.dtensor.distribute_module")
670
+
671
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
672
+ device_type = device_mesh.device_type
673
+ if device_type == "xla":
674
+ try:
675
+ # This function annotates all module parameters for auto-partitioning with
676
+ # PyTorch/XLA SPMD or explicitly partition to :class:`XLAShardedTensor` parameters
677
+ # according to the `partition_fn` specified.
678
+ from torch_xla.distributed.spmd import ( # type:ignore[import]
679
+ xla_distribute_module,
680
+ )
681
+
682
+ return xla_distribute_module(
683
+ module, device_mesh, partition_fn, input_fn, output_fn
684
+ ) # type:ignore[return-value]
685
+ except ImportError as e:
686
+ msg = "To use DTensor API with xla, you must install the torch_xla package!"
687
+ raise ImportError(msg) from e
688
+
689
+ def replicate_module_params_buffers(m: nn.Module, mesh: DeviceMesh) -> None:
690
+ # This function loop over the immediate module parameters and
691
+ # buffers, replicate all non DTensor params/buffers to DTensor
692
+ # parameters/buffers, if they have not been partitioned in the
693
+ # partition_fn, we can't easily use `module._apply` here
694
+ # because we don't know what happened inside partition_fn as
695
+ # user could do anything, i.e. install hooks, and we want to
696
+ # preserve those.
697
+ full_replicate = [Replicate()] * mesh.ndim
698
+ for key, param in m._parameters.items():
699
+ if param is not None and not isinstance(param, DTensor):
700
+ m.register_parameter(
701
+ key,
702
+ nn.Parameter(distribute_tensor(param.data, mesh, full_replicate)),
703
+ )
704
+ for key, buffer in m._buffers.items():
705
+ if buffer is not None and not isinstance(buffer, DTensor):
706
+ m._buffers[key] = distribute_tensor(buffer, mesh, full_replicate)
707
+
708
+ if partition_fn is None:
709
+ # if partition_fn not specified, we by default replicate
710
+ # all module params/buffers
711
+ for name, submod in module.named_modules():
712
+ replicate_module_params_buffers(submod, device_mesh)
713
+ else:
714
+ # apply partition_fun to submodules
715
+ for name, submod in module.named_modules():
716
+ partition_fn(name, submod, device_mesh)
717
+ replicate_module_params_buffers(submod, device_mesh)
718
+
719
+ # register input_fn as module forward pre hook
720
+ if input_fn is not None:
721
+ # check the input_fn signature
722
+ num_args = len(inspect.signature(input_fn).parameters)
723
+ if num_args == 2:
724
+ # input_fn only takes in inputs and device mesh
725
+ warnings.warn(
726
+ "Deprecating input_fn that takes two arguments (inputs, device_mesh), "
727
+ "please use input_fn that takes in (module, inputs, device_mesh) instead!",
728
+ )
729
+ module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg]
730
+ elif num_args == 3:
731
+ # input_fn takes in module, inputs, device mesh
732
+ module.register_forward_pre_hook(
733
+ lambda mod, inputs: input_fn(mod, inputs, device_mesh)
734
+ )
735
+ else:
736
+ raise ValueError(
737
+ f"input_fn should take in 3 arguments, but got {num_args} arguments!"
738
+ )
739
+ # register output_fn as module forward hook
740
+ if output_fn is not None:
741
+ num_args = len(inspect.signature(output_fn).parameters)
742
+ if num_args == 2:
743
+ # output_fn only takes in outputs and device mesh
744
+ warnings.warn(
745
+ "Deprecating output_fn that takes two arguments (inputs, device_mesh), "
746
+ "please use output_fn that takes in (module, inputs, device_mesh) instead!",
747
+ )
748
+ module.register_forward_hook(
749
+ lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg]
750
+ )
751
+ elif num_args == 3:
752
+ module.register_forward_hook(
753
+ lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh)
754
+ )
755
+ else:
756
+ raise ValueError(
757
+ f"output_fn should take in 3 arguments, but got {num_args} arguments!"
758
+ )
759
+
760
+ return module
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.distributed._tensor.api import DTensor
2
+
3
+ from torch.distributed._tensor.debug.comm_mode import CommDebugMode
4
+
5
+
6
+ def get_sharding_prop_cache_info():
7
+ """
8
+ Get the cache info for the sharding propagation cache, used for debugging purpose only.
9
+ This would return a named tuple showing hits, misses, maxsize and cursize of the sharding
10
+ propagator cache.
11
+ """
12
+ return (
13
+ DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding.cache_info() # type:ignore[attr-defined]
14
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/comm_mode.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from typing import Any, Dict
3
+
4
+ import torch
5
+ from torch.distributed._tensor.api import DTensor
6
+ from torch.utils._python_dispatch import TorchDispatchMode
7
+
8
+
9
+ funcol_native = torch.ops._c10d_functional
10
+ funcol_py = torch.ops.c10d_functional
11
+
12
+ NATIVE_TO_PY_MAPPING = {
13
+ funcol_native.all_gather_into_tensor: funcol_py.all_gather_into_tensor,
14
+ funcol_native.all_gather_into_tensor_coalesced: funcol_py.all_gather_into_tensor_coalesced,
15
+ funcol_native.all_reduce: funcol_py.all_reduce,
16
+ funcol_native.all_to_all_single: funcol_py.all_to_all_single,
17
+ funcol_native.broadcast: funcol_py.broadcast,
18
+ funcol_native.reduce_scatter_tensor: funcol_py.reduce_scatter_tensor,
19
+ funcol_native.reduce_scatter_tensor_coalesced: funcol_py.reduce_scatter_tensor_coalesced,
20
+ }
21
+
22
+
23
+ class CommDebugMode(TorchDispatchMode):
24
+ """
25
+ ``CommDebugMode`` is a context manager that counts the number of
26
+ functional collectives within its context. It does this using a
27
+ ``TorchDispatchMode``.
28
+
29
+ NOTE: this mode only works for functional collective atm and the
30
+ distributed_c10d collectives are not supported yet.
31
+
32
+ Example usage
33
+
34
+ .. code-block:: python
35
+
36
+ mod = ...
37
+ comm_mode = CommDebugMode()
38
+ with comm_mode:
39
+ mod.sum().backward()
40
+
41
+ """
42
+
43
+ def __init__(self):
44
+ self.comm_counts: Dict[Any, int] = defaultdict(int)
45
+ self.comm_registry = set()
46
+ for native_op, py_op in NATIVE_TO_PY_MAPPING.items():
47
+ self.comm_registry.add(native_op)
48
+ self.comm_registry.add(py_op)
49
+
50
+ def get_total_counts(self) -> int:
51
+ return sum(self.comm_counts.values())
52
+
53
+ def get_comm_counts(self) -> Dict[Any, int]:
54
+ """Returns the communication counts as a dictionary.
55
+
56
+ Returns:
57
+ Dict[Any, int]: The communication counts as a dictionary.
58
+ """
59
+ return self.comm_counts
60
+
61
+ def __enter__(self):
62
+ self.comm_counts.clear()
63
+ super().__enter__()
64
+ return self
65
+
66
+ def __exit__(self, *args):
67
+ super().__exit__(*args)
68
+
69
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
70
+ # When running this mode with DTensor, ordinarily all modes will
71
+ # run **before** subclasses get a chance to run.
72
+ # Returning NotImplemented here gives us a chance to let DTensor
73
+ # run and desugar into comms ops, before CommDebugMode sees them.
74
+ if any(t == DTensor for t in types):
75
+ return NotImplemented
76
+ kwargs = kwargs if kwargs else {}
77
+ out = func(*args, **kwargs)
78
+ func_packet = func._overloadpacket
79
+ # We have many tests that use CommDebugMode to verify the occurrence of
80
+ # collectives. These tests do so by querying comm_counts with legacy
81
+ # funcol ops as key. For the purpose of native funcol migration, we
82
+ # need these tests to work for both legacy and native funcol. To avoid
83
+ # the need to modify all tests to accommodate the two implementations,
84
+ # we make CommDebugMode translate native funcol ops into legacy funcol
85
+ # ops until the migration finishes.
86
+ if func_packet in self.comm_registry:
87
+ if func_packet in NATIVE_TO_PY_MAPPING:
88
+ func_packet = NATIVE_TO_PY_MAPPING[func_packet]
89
+ self.comm_counts[func_packet] += 1
90
+
91
+ return out
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/op_coverage.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from operator import itemgetter
2
+ from typing import List
3
+
4
+ from functorch.compile import make_boxed_func
5
+
6
+ import torch
7
+ import torch.fx
8
+ import torch.nn as nn
9
+ from torch._functorch.compilers import aot_module
10
+ from torch._inductor.decomposition import select_decomp_table
11
+ from torch.distributed._tensor import DTensor
12
+
13
+
14
+ inductor_decomps = select_decomp_table()
15
+
16
+ graphs: List[torch.fx.GraphModule] = []
17
+
18
+
19
+ def fwd_bwd_compiler(fx_g, _):
20
+ graphs.append(fx_g)
21
+ return make_boxed_func(fx_g)
22
+
23
+
24
+ def get_inductor_decomp_graphs(model: nn.Module, args, kwargs):
25
+ """
26
+ Obtain forward and backward graphs of a model with inductor decompositions using tracing and aot_module.
27
+
28
+ Convenient util to get the fwd and bwd graphs of an arbitrary model
29
+ with inductor decompositions. Note that this would simply do tracing
30
+ with aot_module and don't ensure correctness. This is useful to track
31
+ the ops needed in DTensor.
32
+ """
33
+ compiled_mod = aot_module(
34
+ model, fw_compiler=fwd_bwd_compiler, decompositions=inductor_decomps
35
+ )
36
+ output = compiled_mod(*args, **kwargs)
37
+
38
+ if output.ndim != 0:
39
+ # if output is not a scalar tensor, by default sum it in order to
40
+ # run backward
41
+ output = output.sum()
42
+
43
+ output.backward()
44
+
45
+ # one fwd, one bwd graph
46
+ assert len(graphs) == 2
47
+ return graphs
48
+
49
+
50
+ def print_op_coverage_summary(model: nn.Module, args, kwargs, *, output_csv=False):
51
+ """
52
+ Util to print the operator coverage summary of a certain model with tabulute.
53
+
54
+ Must have tabulate module installed.
55
+ """
56
+ # python module required for summary
57
+ import csv
58
+
59
+ from tabulate import tabulate
60
+
61
+ fwd_graph, bwd_graph = get_inductor_decomp_graphs(model, args, kwargs)
62
+
63
+ op_counts = {}
64
+
65
+ for node in fwd_graph.graph.nodes:
66
+ if node.op == "call_function" and isinstance(
67
+ node.target, torch._ops.OpOverload
68
+ ):
69
+ if node.target not in op_counts:
70
+ op_counts[node.target] = 0
71
+
72
+ op_counts[node.target] += 1
73
+
74
+ for node in bwd_graph.graph.nodes:
75
+ if node.op == "call_function" and isinstance(
76
+ node.target, torch._ops.OpOverload
77
+ ):
78
+ if node.target not in op_counts:
79
+ op_counts[node.target] = 0
80
+
81
+ op_counts[node.target] += 1
82
+
83
+ op_infos = []
84
+
85
+ for op, count in op_counts.items():
86
+ supported = op in DTensor._op_dispatcher.sharding_propagator.op_to_rules
87
+ op_infos.append([op, str(op._schema), count, supported])
88
+
89
+ # sort the op info base on the total count index
90
+ count_idx = 2
91
+ op_infos.sort(key=itemgetter(count_idx), reverse=True)
92
+
93
+ headers = ["Operator", "Schema", "Total Count", "Supported"]
94
+ print(tabulate(op_infos, headers=headers))
95
+
96
+ if output_csv:
97
+ # Open a CSV file for writing
98
+ with open("op_summary.csv", "w", newline="") as csv_file:
99
+ # Create a CSV writer object
100
+ csv_writer = csv.writer(csv_file)
101
+
102
+ csv_writer.writerow(headers)
103
+ # Write each table row to the CSV file
104
+ for row in op_infos:
105
+ csv_writer.writerow(row)
venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/visualize_sharding.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Sequence, Tuple
2
+
3
+ import numpy as np
4
+
5
+ from torch._prims_common import ShapeType
6
+ from torch.distributed._tensor import DeviceMesh
7
+
8
+ from torch.distributed._tensor.placement_types import Placement, Shard
9
+
10
+
11
+ def _mesh_to_coordinate(mesh, device_type):
12
+ """
13
+ Given a n-dimensional list of device mesh, this function creates a map of
14
+ device and its coordinate
15
+ """
16
+ # Convert the n-dimensional list to a NumPy array
17
+ np_mesh = np.array(mesh.mesh.tolist())
18
+
19
+ # Create a dictionary to map each value to its coordinate
20
+ device_to_coordinate_map = {}
21
+ for coord, value in np.ndenumerate(np_mesh):
22
+ # device is unique in device_mesh
23
+ device_to_coordinate_map[f"{device_type}:{str(value)}"] = list(coord)
24
+
25
+ return device_to_coordinate_map
26
+
27
+
28
+ def _convert_offset_to_ranges(all_offsets):
29
+ """
30
+ Using tabulate package to create a table is easier when we specify row and col ranges
31
+ This function converts offsets to ranges.
32
+ """
33
+ converted_blocks = []
34
+
35
+ for offset in all_offsets:
36
+ shape, offset, value = offset
37
+
38
+ # Calculate row_range and column_range
39
+ row_range = (offset[0], offset[0] + shape[0] - 1)
40
+ column_range = (offset[1], offset[1] + shape[1] - 1)
41
+
42
+ # Convert value to string to match your desired format
43
+ converted_block = {
44
+ "row_range": row_range,
45
+ "column_range": column_range,
46
+ "value": str(value),
47
+ }
48
+ converted_blocks.append(converted_block)
49
+
50
+ return converted_blocks
51
+
52
+
53
+ def _create_table(blocks):
54
+ """
55
+ Creates a tabulate table given row and column ranges with device name
56
+ """
57
+ try:
58
+ from tabulate import tabulate
59
+ except ImportError as e:
60
+ raise ImportError("tabulate package is required to visualize sharding") from e
61
+
62
+ # Extract unique row and column ranges
63
+ row_ranges = sorted({block["row_range"] for block in blocks})
64
+ col_ranges = sorted({block["column_range"] for block in blocks})
65
+
66
+ # Create a matrix initialized with empty strings
67
+ matrix = [["" for _ in col_ranges] for _ in row_ranges]
68
+
69
+ # Fill the matrix with values
70
+ for block in blocks:
71
+ row_index = row_ranges.index(block["row_range"])
72
+ col_index = col_ranges.index(block["column_range"])
73
+ if matrix[row_index][col_index] == "":
74
+ matrix[row_index][col_index] = block["value"]
75
+ else:
76
+ matrix[row_index][col_index] += ", " + block["value"]
77
+
78
+ # Prepare headers
79
+ row_headers = [f"Row {r[0]}-{r[1]}" for r in row_ranges]
80
+ col_headers = [f"Col {c[0]}-{c[1]}" for c in col_ranges]
81
+
82
+ return tabulate(matrix, headers=col_headers, showindex=row_headers)
83
+
84
+
85
+ def compute_local_shape_and_global_offset(
86
+ global_shape: ShapeType,
87
+ mesh: DeviceMesh,
88
+ placements: Sequence[Placement],
89
+ my_coordinate: List[int],
90
+ ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
91
+ """
92
+ Same as torch.distributed._tensor._utils.compute_local_shape_and_global_offset but
93
+ with custom my_coordinate input. This is the modified implementation for visualize_sharding.
94
+ """
95
+
96
+ if my_coordinate is None:
97
+ # if rank not in the mesh, return empty offset
98
+ return ((), ())
99
+ else:
100
+ local_shape = list(global_shape)
101
+ global_offset = [0] * len(global_shape)
102
+
103
+ for idx, placement in enumerate(placements):
104
+ mesh_dim_size = mesh.size(idx)
105
+ if isinstance(placement, Shard):
106
+ shard_dim = placement.dim
107
+ local_offset = [0] * len(global_shape)
108
+ assert shard_dim < len(
109
+ local_shape
110
+ ), f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)}"
111
+ shard_size, shard_offset = placement._local_shard_size_on_dim(
112
+ local_shape[shard_dim],
113
+ mesh_dim_size,
114
+ my_coordinate[idx],
115
+ return_offset=True,
116
+ )
117
+
118
+ local_shape[shard_dim] = shard_size
119
+ local_offset[shard_dim] = shard_offset
120
+
121
+ # On a given dimension, if the local_offset[shard_dim] is smaller than global_offset[shard_dim],
122
+ # it means that this dimension has been already sharded in previous placement.
123
+ # Therefore, we cannot simply replace the global_offset[shard_dim] with local_offset[shard_dim].
124
+ # Instead, for the given shard_dim, we need to add local_offset[shard_dim] to existing global_offset[shard_dim].
125
+ if global_offset[shard_dim] <= local_offset[shard_dim]:
126
+ global_offset[shard_dim] = local_offset[shard_dim]
127
+ else:
128
+ global_offset[shard_dim] += local_offset[shard_dim]
129
+
130
+ return tuple(local_shape), tuple(global_offset)
131
+
132
+
133
+ def visualize_sharding(dtensor, header=""):
134
+ """
135
+ Visualizes sharding in 1D-2D dtensors
136
+ Requires tabulate, install with `pip install tabulate`
137
+
138
+ note: no sharding info will be printed for empty tensors
139
+ """
140
+ if dtensor.numel() == 0: # we do not print for empty dtensors
141
+ return
142
+
143
+ if len(dtensor.shape) >= 3:
144
+ raise RuntimeError(
145
+ "visualize sharding is only implemented for 1D or 2D dtensor"
146
+ )
147
+ placements = dtensor.placements
148
+ device_mesh = dtensor.device_mesh
149
+ device_type = dtensor.device_mesh.device_type
150
+
151
+ if device_mesh.get_coordinate() is None: # current rank is not in the mesh
152
+ return
153
+
154
+ # Only display the visualization once for each DTensor, on the rank whose
155
+ # coordinate is 0 on all dimensions. For example, if the mesh is a full mesh,
156
+ # we will only print on rank 0.
157
+ local_rank_zero_on_all_dim = all(
158
+ device_mesh.get_local_rank(mesh_dim=dim) == 0 for dim in range(device_mesh.ndim)
159
+ )
160
+ if not local_rank_zero_on_all_dim:
161
+ return
162
+
163
+ device_map = _mesh_to_coordinate(device_mesh, device_type)
164
+ all_offsets = []
165
+ for device in device_map:
166
+ local_shape, global_offset = compute_local_shape_and_global_offset(
167
+ dtensor.shape, device_mesh, placements, device_map[device]
168
+ )
169
+ all_offsets.append([local_shape, global_offset, device])
170
+
171
+ # Convert offsets to blocks with row_ranges for tabulate
172
+ blocks = _convert_offset_to_ranges(all_offsets)
173
+
174
+ # Print the table
175
+ print(header)
176
+ print(_create_table(blocks))
venv/lib/python3.10/site-packages/torch/distributed/_tensor/device_mesh.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from torch.distributed.device_mesh import ( # noqa: F401
2
+ _get_device_handle,
3
+ _mesh_resources,
4
+ DeviceMesh,
5
+ init_device_mesh,
6
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/dispatch.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import functools
3
+ import operator
4
+ from typing import cast, Dict, List, Optional, Sequence, Tuple
5
+
6
+ import torch
7
+
8
+ import torch.distributed as dist
9
+ import torch.distributed._tensor.api as dtensor
10
+ import torch.distributed._tensor.random as random
11
+ from torch.distributed._tensor._utils import try_find_mesh_from_args
12
+ from torch.distributed._tensor.op_schema import (
13
+ _is_inplace_op,
14
+ _is_out_variant_op,
15
+ OpInfo,
16
+ OpSchema,
17
+ OutputSpecType,
18
+ )
19
+ from torch.distributed._tensor.placement_types import DTensorSpec, Replicate, TensorMeta
20
+ from torch.distributed._tensor.random import is_rng_supported_mesh
21
+ from torch.distributed._tensor.redistribute import redistribute_local_tensor
22
+ from torch.distributed._tensor.sharding_prop import ShardingPropagator
23
+ from torch.distributed._tensor.tp_conv import (
24
+ convolution_backward_handler,
25
+ convolution_handler,
26
+ )
27
+ from torch.distributed.device_mesh import DeviceMesh
28
+
29
+ try:
30
+ from torch.utils import _cxx_pytree as pytree
31
+ except ImportError:
32
+ from torch.utils import _pytree as pytree # type: ignore[no-redef]
33
+
34
+ aten = torch.ops.aten
35
+
36
+
37
+ def decompose_handler(
38
+ op_call: torch._ops.OpOverload,
39
+ args: Tuple[object, ...],
40
+ kwargs: Dict[str, object],
41
+ ) -> object:
42
+ """
43
+ Decomposes a op to core ATen op, this handler is mostly here
44
+ for inference mode usage where the ops are not core aten ops.
45
+ """
46
+ r = op_call.decompose(*args, **kwargs)
47
+ if r is not NotImplemented:
48
+ return r
49
+ else:
50
+ raise RuntimeError("Decomposition failed")
51
+
52
+
53
+ def is_same_size_handler(
54
+ op_call: torch._ops.OpOverload,
55
+ args: Tuple[object, ...],
56
+ kwargs: Dict[str, object],
57
+ ) -> bool:
58
+ lhs = cast(torch.Tensor, args[0])
59
+ rhs = cast(torch.Tensor, args[1])
60
+ return lhs.shape == rhs.shape
61
+
62
+
63
+ class OpDispatcher:
64
+ """
65
+ Op dispatching class instance to handle args/kwargs pre-processing (un-wrapping), sharding
66
+ propagation, redistribute local args, local compute, and post-processing (re-wrapping). It
67
+ also handles any op specific logic if necessary.
68
+ """
69
+
70
+ def __init__(self) -> None:
71
+ self.sharding_propagator = ShardingPropagator()
72
+ self._random_ops = {
73
+ aten.native_dropout.default,
74
+ aten.normal_.default,
75
+ aten.rand_like.default,
76
+ aten.randn_like.default,
77
+ aten.randint_like.default,
78
+ aten.randint_like.low_dtype,
79
+ aten.randint_like.low_dtype_out,
80
+ aten.uniform_.default,
81
+ aten.bernoulli.default,
82
+ aten.bernoulli_.float,
83
+ }
84
+ self._custom_op_handlers = {
85
+ aten.linear.default: decompose_handler,
86
+ aten.is_same_size.default: is_same_size_handler,
87
+ aten.convolution.default: convolution_handler,
88
+ aten.convolution_backward.default: convolution_backward_handler,
89
+ }
90
+
91
+ # This flag is used internally to control whether we treat the torch.Tensor(non-DTensor)
92
+ # as implicitly replicated or we throw error to user.
93
+ # NOTE: It is EXTREMELY UNSAFE to turn this flag on by default so we intentionally leave
94
+ # it as False by default.
95
+ self._allow_implicit_replication = False
96
+
97
+ def dispatch(
98
+ self,
99
+ op_call: torch._ops.OpOverload,
100
+ args: Tuple[object, ...],
101
+ kwargs: Dict[str, object],
102
+ ) -> object:
103
+ """
104
+ Main dispatching logic
105
+ """
106
+ # operators that does not need to go through sharding propagation
107
+ if op_call in self._custom_op_handlers:
108
+ return self._custom_op_handlers[op_call](op_call, args, kwargs) # type: ignore[operator]
109
+
110
+ # extract local tensor and sharding infos to a OpInfo
111
+ op_info = self.unwrap_to_op_info(op_call, args, kwargs)
112
+
113
+ self.sharding_propagator.propagate(op_info)
114
+ output_sharding = op_info.output_sharding
115
+ assert output_sharding is not None, "output sharding should not be None"
116
+
117
+ mesh = op_info.mesh
118
+ if mesh.get_coordinate() is None:
119
+ # For a non-participating device, we do:
120
+ # 1. if the return type is scalar, set the local result to None.
121
+ # The local results from all devices will then be all-gathered
122
+ # and a reduce op will be performed on the list of results
123
+ # with appropriate operators:
124
+ # for bool type, we by default use AND to reduce;
125
+ # we can extend for more ops if necessary.
126
+ # 2. if the return type is Tensor or List[Tensor], return empty
127
+ # tensor(s) with correct dtype.
128
+ spec = output_sharding.output_spec
129
+ ret_list = op_info.schema.op._schema.returns
130
+
131
+ if spec is None:
132
+ # For a scalar return type, the non-participating device has None
133
+ # as its local result
134
+ local_results: object = None
135
+ else:
136
+
137
+ def default_tensor(spec: DTensorSpec) -> torch.Tensor:
138
+ if spec.tensor_meta is not None:
139
+ shape = spec.tensor_meta.shape
140
+ dtype = spec.tensor_meta.dtype
141
+ if len(shape) == 0:
142
+ # scalar tensor
143
+ return torch.zeros((), dtype=dtype)
144
+ else:
145
+ # non-scalar tensor
146
+ return torch.tensor([], dtype=dtype)
147
+ else:
148
+ raise RuntimeError(f"{spec} has no tensor metadata.")
149
+
150
+ if isinstance(spec, DTensorSpec):
151
+ # return a Tensor value
152
+ local_results = default_tensor(spec)
153
+ elif isinstance(spec, Sequence):
154
+ # return a List[Tensor] value
155
+ local_results = [
156
+ default_tensor(s) if s is not None else None for s in spec
157
+ ]
158
+ assert isinstance(local_results, List)
159
+ if None in local_results:
160
+ ret_type = str(ret_list[0].type)
161
+ raise NotImplementedError(
162
+ f"return type {ret_type} in DTensor op is not supported"
163
+ )
164
+ else:
165
+ if output_sharding.needs_redistribute:
166
+ # compute locally with redistribute first if needed
167
+ assert output_sharding.schema_suggestions is not None
168
+ self.redistribute_local_args(
169
+ op_info, output_sharding.schema_suggestions[0]
170
+ )
171
+
172
+ local_tensor_args = (
173
+ pytree.tree_unflatten(
174
+ cast(List[object], op_info.local_args), op_info.args_tree_spec
175
+ )
176
+ if op_info.args_tree_spec
177
+ else op_info.local_args
178
+ )
179
+
180
+ # run local op computation with potentially modified args/kwargs
181
+ local_tensor_args = cast(Tuple[object, ...], local_tensor_args)
182
+ if op_call in self._random_ops and is_rng_supported_mesh(mesh):
183
+ if not random._rng_tracker:
184
+ # Default to `OffsetBasedRNGTracker` if the parallelism API
185
+ # did not already construct one
186
+ random._rng_tracker = random.OffsetBasedRNGTracker(mesh.device_type)
187
+ # For DTensor random operator, run it within a distribute region
188
+ with random._rng_tracker._distribute_region(
189
+ cast(dtensor.DTensor, args[0])._spec
190
+ ):
191
+ local_results = op_call(*local_tensor_args, **op_info.local_kwargs)
192
+ else:
193
+ local_results = op_call(*local_tensor_args, **op_info.local_kwargs)
194
+
195
+ # communicate the result to all ranks for some operators that return scalar value
196
+ if output_sharding.output_spec is None:
197
+ if op_call == aten.equal.default:
198
+ obj_list = [None for _ in range(dist.get_world_size())]
199
+ dist.all_gather_object(obj_list, local_results) # type: ignore[possibly-undefined]
200
+ obj_list = list(filter(lambda x: x is not None, obj_list))
201
+ # perform reduce on the collection with AND op
202
+ local_results = functools.reduce(operator.and_, obj_list, True)
203
+
204
+ if _is_inplace_op(op_call):
205
+ # inplace op should return self instead of re-wrapping
206
+ if output_sharding.output_spec is not None:
207
+ return args[0]
208
+ else:
209
+ return None
210
+ elif _is_out_variant_op(op_call):
211
+ # out variant could possibly have multiple out args (i.e. lu_unpack.out)
212
+ output_specs = (
213
+ (output_sharding.output_spec,)
214
+ if not isinstance(output_sharding.output_spec, tuple)
215
+ else output_sharding.output_spec
216
+ )
217
+ out_dts = []
218
+ spec_idx = 0
219
+ for argument in op_call._schema.arguments:
220
+ if argument.is_out:
221
+ out_dt = cast(dtensor.DTensor, kwargs[argument.name])
222
+ out_dt._spec = cast(DTensorSpec, output_specs[spec_idx])
223
+ out_dts.append(out_dt)
224
+ spec_idx += 1
225
+
226
+ assert len(out_dts) >= 1, "out variant should have at least one out arg"
227
+ return tuple(out_dts) if len(out_dts) > 1 else out_dts[0]
228
+ else:
229
+ return self.wrap(local_results, output_sharding.output_spec) # type: ignore[possibly-undefined]
230
+
231
+ @staticmethod
232
+ def redistribute_local_args(
233
+ op_info: OpInfo,
234
+ suggested_input_schema: OpSchema,
235
+ ) -> None:
236
+ # NOTE: it's very rare that we need to reshard kwargs so we intentionally skip it
237
+
238
+ # TODO: the op schema should probably just remain flattened so that we can avoid this tree flatten
239
+ # Need to fix all the ops before doing this.
240
+ if op_info.args_tree_spec is not None:
241
+ flatten_args_schema_to_reshard = tuple(
242
+ pytree.tree_leaves(suggested_input_schema.args_schema)
243
+ )
244
+ else:
245
+ flatten_args_schema_to_reshard = suggested_input_schema.args_schema
246
+
247
+ new_local_args: List[object] = []
248
+ for i, arg_spec in enumerate(op_info.flat_args_schema):
249
+ reshard_arg_spec = flatten_args_schema_to_reshard[i]
250
+ if isinstance(arg_spec, DTensorSpec):
251
+ local_tensor = cast(torch.Tensor, op_info.local_args[i])
252
+ if arg_spec != reshard_arg_spec:
253
+ resharded_local_tensor = redistribute_local_tensor(
254
+ local_tensor, arg_spec, reshard_arg_spec
255
+ )
256
+ new_local_args.append(resharded_local_tensor)
257
+ else:
258
+ new_local_args.append(local_tensor)
259
+ else:
260
+ new_local_args.append(reshard_arg_spec)
261
+
262
+ op_info.local_args = tuple(new_local_args)
263
+
264
+ def unwrap_to_op_info(
265
+ self,
266
+ op_call: torch._ops.OpOverload,
267
+ args: Tuple[object, ...],
268
+ kwargs: Dict[str, object],
269
+ ) -> OpInfo:
270
+ # get runtime schema to determine whether to use pytree to flatten inputs
271
+ runtime_schema_info = self.sharding_propagator.op_to_schema_info.get(
272
+ op_call, None
273
+ )
274
+
275
+ if runtime_schema_info is not None and runtime_schema_info.needs_pytree:
276
+ # flatten args/kwargs when necessary
277
+ tree_args, args_spec = pytree.tree_flatten(args)
278
+ args_list: Sequence[object] = tree_args
279
+ else:
280
+ args_list, args_spec = args, None
281
+
282
+ args_schema: List[object] = []
283
+ kwargs_schema: Dict[str, object] = {}
284
+ local_args: List[object] = []
285
+ local_kwargs: Dict[str, object] = {}
286
+ mesh: Optional[DeviceMesh] = None
287
+
288
+ for arg in args_list:
289
+ if isinstance(arg, dtensor.DTensor):
290
+ args_schema.append(arg._spec)
291
+ local_args.append(arg._local_tensor)
292
+ if mesh is not None:
293
+ if mesh != arg.device_mesh:
294
+ raise NotImplementedError(
295
+ f"{op_call}: DTensor does not support cross-mesh operation yet!"
296
+ )
297
+ else:
298
+ mesh = arg.device_mesh
299
+ elif isinstance(arg, torch.Tensor):
300
+ if arg.ndim == 0 or self._allow_implicit_replication:
301
+ mesh = mesh or try_find_mesh_from_args(op_call, args_list)
302
+ # scalar tensor can be safely treated as replicated
303
+ args_schema.append(
304
+ DTensorSpec(
305
+ mesh,
306
+ (Replicate(),) * mesh.ndim,
307
+ tensor_meta=TensorMeta(
308
+ shape=arg.shape, stride=arg.stride(), dtype=arg.dtype
309
+ ),
310
+ )
311
+ )
312
+ local_args.append(arg)
313
+ else:
314
+ raise RuntimeError(
315
+ f"{op_call}: got mixed torch.Tensor and DTensor, need to convert all"
316
+ " torch.Tensor to DTensor before calling distributed operators!"
317
+ )
318
+ else:
319
+ args_schema.append(arg)
320
+ local_args.append(arg)
321
+
322
+ for k, v in kwargs.items():
323
+ if isinstance(v, dtensor.DTensor):
324
+ kwargs_schema[k] = v._spec
325
+ local_kwargs[k] = v._local_tensor
326
+ if mesh is not None:
327
+ if mesh != v.device_mesh:
328
+ raise NotImplementedError(
329
+ f"{op_call}: DTensor does not support cross-mesh operation yet!"
330
+ )
331
+ else:
332
+ mesh = v.device_mesh
333
+ elif isinstance(v, torch.Tensor):
334
+ raise RuntimeError(
335
+ f"{op_call}: got mixed torch.Tensor and DTensor, need to convert all"
336
+ " torch.Tensor to DTensor before calling distributed operators!"
337
+ )
338
+ else:
339
+ kwargs_schema[k] = v
340
+ local_kwargs[k] = v
341
+
342
+ assert mesh is not None, f"found no DeviceMesh from dtensor args for {op_call}!"
343
+ op_info = OpInfo(
344
+ mesh,
345
+ OpSchema(
346
+ op_call,
347
+ pytree.tree_unflatten(args_schema, args_spec)
348
+ if args_spec
349
+ else tuple(args_schema),
350
+ kwargs_schema,
351
+ schema_info=runtime_schema_info,
352
+ ),
353
+ args_schema,
354
+ tuple(local_args),
355
+ local_kwargs,
356
+ args_spec,
357
+ )
358
+ return op_info
359
+
360
+ @staticmethod
361
+ def wrap(res: object, spec: OutputSpecType) -> object:
362
+ if isinstance(res, torch.Tensor):
363
+ if spec is not None:
364
+ assert isinstance(
365
+ spec, DTensorSpec
366
+ ), f"output spec does not match with output! Expected DTensorSpec, got {spec}."
367
+ assert spec.tensor_meta is not None
368
+ return dtensor.DTensor(
369
+ res,
370
+ spec.mesh,
371
+ spec.placements,
372
+ shape=spec.tensor_meta.shape,
373
+ dtype=spec.tensor_meta.dtype,
374
+ requires_grad=res.requires_grad,
375
+ stride=spec.tensor_meta.stride,
376
+ )
377
+ else:
378
+ # if output does not have a DTensorSpec due to specific ops, it must be a scalar tensor
379
+ assert res.ndim == 0, "output tensor should be scalar!"
380
+ return res
381
+ elif isinstance(res, (list, tuple)):
382
+ assert spec is not None and isinstance(
383
+ spec, (list, tuple)
384
+ ), f"output spec does not match with output! Expected list/tuple, got {spec}."
385
+ res_list = []
386
+ for e, s in zip(res, spec):
387
+ res_list.append(OpDispatcher.wrap(e, s))
388
+
389
+ return tuple(res_list) if isinstance(res, tuple) else res_list
390
+ else:
391
+ # if the res contains only non tensor values (i.e. int/float/none), we simply return it
392
+ # without rewrapping to DTensor.
393
+ return res
venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ from torch.distributed._tensor.api import DTensor
4
+
5
+
6
+ @contextmanager
7
+ def implicit_replication():
8
+ try:
9
+ DTensor._op_dispatcher._allow_implicit_replication = True
10
+ yield
11
+ finally:
12
+ DTensor._op_dispatcher._allow_implicit_replication = False
venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (532 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import operator
3
+ from typing import Any, cast, Dict, List, Optional, Sequence, Tuple
4
+
5
+ import torch
6
+ from torch._subclasses.fake_tensor import FakeTensor
7
+ from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
8
+ from torch.distributed._tensor.op_schema import (
9
+ DTensorSpec,
10
+ OpSchema,
11
+ OutputSharding,
12
+ OutputSpecType,
13
+ PlacementStrategy,
14
+ )
15
+ from torch.distributed._tensor.placement_types import (
16
+ Placement,
17
+ Replicate,
18
+ Shard,
19
+ TensorMeta,
20
+ )
21
+ from torch.distributed._tensor.redistribute import redistribute_local_tensor
22
+ from torch.distributed.tensor.parallel.style import ColwiseParallel, ParallelStyle
23
+ from torch.export import ExportedProgram
24
+ from torch.export.exported_program import ExportGraphSignature
25
+ from torch.fx import GraphModule
26
+ from torch.fx.experimental.proxy_tensor import make_fx
27
+ from torch.fx.node import Node
28
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
29
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata
30
+ from torch.utils import _pytree as pytree
31
+
32
+
33
+ aten = torch.ops.aten
34
+
35
+
36
+ def tensor_parallel_transformation(
37
+ exported_program: ExportedProgram,
38
+ rank: int,
39
+ world_size: int,
40
+ device_type: str,
41
+ parallel_strategies: Dict[str, ParallelStyle],
42
+ ) -> ExportedProgram:
43
+ """
44
+ The entry point function to perform graph transformations on an exported program
45
+ to transform a single-device graph into a tensor parallel graph.
46
+
47
+ .. warning::
48
+ This API is experimental and subject to change.
49
+ """
50
+
51
+ gm = exported_program.graph_module
52
+ sig = copy.deepcopy(exported_program.graph_signature)
53
+ state_dict = copy.copy(exported_program.state_dict)
54
+
55
+ with gm._set_replace_hook(sig.get_replace_hook()):
56
+ res = TensorParallelTransformPass(
57
+ rank,
58
+ world_size,
59
+ device_type,
60
+ state_dict,
61
+ exported_program.graph_signature,
62
+ parallel_strategies,
63
+ )(gm)
64
+ assert res is not None
65
+ gm = res.graph_module
66
+
67
+ return exported_program._update(gm, sig, state_dict)
68
+
69
+
70
+ class TensorParallelTransformPass(PassBase):
71
+ """
72
+ This pass is responsible for transforming a single-device graph into a tensor parallel
73
+ graph. It will mark the placement strategy of each node in the graph,
74
+ partition the graph into distributed graph, then shard the parameters/buffers accordingly.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ rank: int,
80
+ world_size: int,
81
+ device_type: str,
82
+ state_dict: Dict[str, torch.Tensor],
83
+ graph_signature: ExportGraphSignature,
84
+ parallel_strategies: Dict[str, ParallelStyle],
85
+ ) -> None:
86
+ super().__init__()
87
+ self.rank = rank
88
+ self.mesh = DeviceMesh(device_type, torch.arange(world_size))
89
+ self.state_dict: Dict[str, torch.Tensor] = state_dict
90
+ self.graph_signature = graph_signature
91
+ self.parallel_strategies = parallel_strategies
92
+
93
+ def call(self, graph_module) -> PassResult:
94
+ gm = copy.deepcopy(graph_module)
95
+
96
+ parameter_placements = _generate_parameter_and_buffer_placements(
97
+ list(self.state_dict.keys()), self.parallel_strategies
98
+ )
99
+ placement_strategies = _mark_sharding(
100
+ gm, self.graph_signature, self.mesh, parameter_placements
101
+ )
102
+ _partitioner(gm)
103
+ _shard_state_dict(
104
+ self.state_dict, placement_strategies, self.graph_signature, self.mesh
105
+ )
106
+ return PassResult(gm, True)
107
+
108
+
109
+ def _generate_parameter_and_buffer_placements(
110
+ params_and_buffers: List[str],
111
+ parallel_strategies: Dict[str, ParallelStyle],
112
+ ) -> Dict[str, Placement]:
113
+ """
114
+ Build parameter placements based on the give parallel style of linear layers.
115
+ """
116
+ parameter_placements: Dict[str, Placement] = {}
117
+ for linear_fqn, parallel_style in parallel_strategies.items():
118
+ weight_fqn = f"{linear_fqn}.weight"
119
+ bias_fqn = f"{linear_fqn}.bias"
120
+ assert weight_fqn in params_and_buffers
121
+ parameter_placements[weight_fqn] = (
122
+ Shard(0) if parallel_style == ColwiseParallel else Shard(1)
123
+ )
124
+ if bias_fqn in params_and_buffers:
125
+ parameter_placements[bias_fqn] = (
126
+ Shard(0) if parallel_style == ColwiseParallel else Replicate()
127
+ )
128
+ return parameter_placements
129
+
130
+
131
+ def _mark_tensor_parallel_shardings(
132
+ gm: GraphModule,
133
+ graph_signature: ExportGraphSignature,
134
+ mesh: DeviceMesh,
135
+ parameter_placements: Dict[str, Placement],
136
+ ) -> Dict[Node, PlacementStrategy]:
137
+ """
138
+ Mark the placement strategies of the parameter and buffer placeholder nodes.
139
+ """
140
+ placement_strategies: Dict[Node, PlacementStrategy] = {}
141
+ num_params_and_buffers = len(graph_signature.inputs_to_parameters) + len(
142
+ graph_signature.inputs_to_buffers
143
+ )
144
+ placeholder_idx: int = 0
145
+ for node in gm.graph.nodes:
146
+ if node.op == "placeholder":
147
+ if placeholder_idx < num_params_and_buffers:
148
+ fqn: str = _get_input_node_fqn(node.name, graph_signature)
149
+ placement: Placement = (
150
+ parameter_placements[fqn]
151
+ if fqn in parameter_placements
152
+ else Replicate()
153
+ )
154
+ placement_strategies[node] = _create_placement_strategy(
155
+ node,
156
+ mesh,
157
+ placements=(placement,),
158
+ )
159
+ placeholder_idx += 1
160
+ else:
161
+ placement_strategies[node] = _create_placement_strategy(
162
+ node,
163
+ mesh,
164
+ placements=(Replicate(),),
165
+ )
166
+ return placement_strategies
167
+
168
+
169
+ def _get_input_node_fqn(input_name: str, graph_signature: ExportGraphSignature) -> str:
170
+ """
171
+ Return the FQN of an input node.
172
+ """
173
+ if input_name in graph_signature.inputs_to_parameters:
174
+ return graph_signature.inputs_to_parameters[input_name]
175
+ elif input_name in graph_signature.inputs_to_buffers:
176
+ return graph_signature.inputs_to_buffers[input_name]
177
+ else:
178
+ raise ValueError(
179
+ f"{input_name} not found in inputs_to_parameters or inputs_to_buffers"
180
+ )
181
+
182
+
183
+ def _mark_sharding(
184
+ gm: GraphModule,
185
+ graph_signature: ExportGraphSignature,
186
+ mesh: DeviceMesh,
187
+ parameter_placements: Dict[str, Placement],
188
+ ) -> Dict[Node, PlacementStrategy]:
189
+ """
190
+ Mark the sharding strategy for each node in the graph module.
191
+ """
192
+ placement_strategies: Dict[
193
+ Node, PlacementStrategy
194
+ ] = _mark_tensor_parallel_shardings(gm, graph_signature, mesh, parameter_placements)
195
+
196
+ for node in gm.graph.nodes:
197
+ if node.op == "placeholder":
198
+ if node not in placement_strategies:
199
+ placement_strategies[node] = _create_placement_strategy(
200
+ node, mesh, placements=(Replicate(),)
201
+ )
202
+ node.meta["sharding"] = placement_strategies[node]
203
+ elif node.op == "call_function":
204
+ if node.target == operator.getitem:
205
+ input_nodes = node.all_input_nodes
206
+ assert (
207
+ len(input_nodes) == 1
208
+ ), f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}"
209
+ arg_strategy = placement_strategies[input_nodes[0]]
210
+ placement_strategies[node] = _create_placement_strategy(
211
+ node,
212
+ mesh,
213
+ placements=arg_strategy.output_spec.placements,
214
+ input_specs=_get_input_node_specs(node, placement_strategies),
215
+ )
216
+ node.meta["sharding"] = placement_strategies[node]
217
+ else:
218
+ op_schema = _get_op_schema(node, placement_strategies)
219
+
220
+ # get DTensor specs for inputs and outputs
221
+ if (
222
+ op_schema.op
223
+ not in DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs
224
+ and op_schema.op
225
+ not in DTensor._op_dispatcher.sharding_propagator.op_to_rules
226
+ ):
227
+ # Mark all as replicated
228
+ output_sharding = _generate_default_output_sharding(
229
+ node,
230
+ mesh,
231
+ op_schema,
232
+ )
233
+ else:
234
+ output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding(
235
+ op_schema,
236
+ )
237
+ placement_strategies[node] = PlacementStrategy(
238
+ output_specs=_get_output_spec_from_output_sharding(output_sharding),
239
+ input_specs=output_sharding.schema_suggestions[0].args_spec
240
+ if output_sharding.schema_suggestions is not None
241
+ else _get_input_node_specs(node, placement_strategies),
242
+ )
243
+ node.meta["sharding"] = placement_strategies[node]
244
+ elif node.op == "output":
245
+ node.meta["sharding"] = None
246
+ else:
247
+ raise RuntimeError(f"op code {node.op} not supported")
248
+ return placement_strategies
249
+
250
+
251
+ def _get_output_spec_from_output_sharding(
252
+ output_sharding: OutputSharding,
253
+ ) -> DTensorSpec:
254
+ """
255
+ Util function to extract output spec from output sharding.
256
+ """
257
+ if isinstance(output_sharding.output_spec, DTensorSpec):
258
+ return output_sharding.output_spec
259
+ else:
260
+ # For ops that return multiple outputs, the outputs should have the same output spec
261
+ assert isinstance(output_sharding.output_spec, Sequence)
262
+ assert output_sharding.output_spec[0] is not None
263
+ output_sharding.output_spec[0].tensor_meta = None
264
+ return output_sharding.output_spec[0]
265
+
266
+
267
+ def _create_placement_strategy(
268
+ node: Node,
269
+ mesh: DeviceMesh,
270
+ placements: Tuple[Placement, ...],
271
+ input_specs: Optional[Sequence[DTensorSpec]] = None,
272
+ ) -> PlacementStrategy:
273
+ """
274
+ Util function to construct a placement strategy for a given node.
275
+ """
276
+ placement = PlacementStrategy(
277
+ input_specs=input_specs,
278
+ output_specs=DTensorSpec(
279
+ mesh=mesh,
280
+ placements=placements,
281
+ ),
282
+ )
283
+ _populate_tensor_meta(node, placement.output_specs)
284
+ return placement
285
+
286
+
287
+ def _populate_tensor_meta(node: Node, output_spec: OutputSpecType) -> None:
288
+ """
289
+ Util function to populate tensor meta of output_spec based on node metadata.
290
+ """
291
+ if isinstance(node.meta["val"], Sequence):
292
+ assert isinstance(output_spec, Sequence)
293
+ for spec, fake_tensor in zip(output_spec, node.meta["val"]):
294
+ assert spec is not None
295
+ spec.tensor_meta = TensorMeta(
296
+ shape=fake_tensor.shape,
297
+ stride=fake_tensor.stride(),
298
+ dtype=fake_tensor.dtype,
299
+ )
300
+ else:
301
+ assert isinstance(output_spec, DTensorSpec)
302
+ output_spec.tensor_meta = TensorMeta(
303
+ shape=node.meta["val"].shape,
304
+ stride=node.meta["val"].stride(),
305
+ dtype=node.meta["val"].dtype,
306
+ )
307
+
308
+
309
+ def _generate_default_output_sharding(
310
+ node: Node,
311
+ mesh: DeviceMesh,
312
+ op_schema: OpSchema,
313
+ ) -> OutputSharding:
314
+ """
315
+ Util function to create a default output sharding that suggests Replicate placement for both args and outputs.
316
+ """
317
+
318
+ def update_arg_spec(arg_spec: DTensorSpec) -> DTensorSpec:
319
+ return DTensorSpec(
320
+ mesh=arg_spec.mesh,
321
+ placements=(Replicate(),),
322
+ tensor_meta=arg_spec.tensor_meta,
323
+ )
324
+
325
+ new_op_schema = OpSchema(
326
+ op=op_schema.op,
327
+ args_schema=pytree.tree_map_only(
328
+ DTensorSpec, update_arg_spec, op_schema.args_schema
329
+ ),
330
+ kwargs_schema=op_schema.kwargs_schema,
331
+ )
332
+
333
+ def create_output_spec(tensor: FakeTensor) -> DTensorSpec:
334
+ return DTensorSpec(
335
+ mesh=mesh,
336
+ placements=(Replicate(),),
337
+ tensor_meta=TensorMeta(
338
+ shape=tensor.shape,
339
+ stride=tensor.stride(),
340
+ dtype=tensor.dtype,
341
+ ),
342
+ )
343
+
344
+ return OutputSharding(
345
+ output_spec=pytree.tree_map_only(
346
+ FakeTensor, create_output_spec, node.meta["val"]
347
+ ),
348
+ schema_suggestions=[new_op_schema],
349
+ failed_reason=f"{node.op} does not have sharding strategy registered",
350
+ needs_redistribute=True,
351
+ )
352
+
353
+
354
+ def _partitioner(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
355
+ """
356
+ Graph partitioner that partitions the single device graph
357
+ to distributed graph
358
+ """
359
+ for node in gm.graph.nodes:
360
+ node_sharding = node.meta["sharding"]
361
+ if node.op == "placeholder":
362
+ out_spec = node_sharding.output_spec
363
+ local_val = _partition_val(node.meta["val"], out_spec)
364
+ # update node value
365
+ node.meta["val"] = local_val
366
+ elif node.op == "call_function":
367
+ out_spec = node_sharding.output_spec
368
+ # check if there's misaligned sharding, insert reshard if there is
369
+ expected_input_specs = node_sharding.input_specs
370
+ for idx, input_arg in enumerate(node.all_input_nodes):
371
+ input_arg_sharding = input_arg.meta["sharding"]
372
+ input_arg_spec = input_arg_sharding.output_spec
373
+ desired_spec = (
374
+ out_spec
375
+ if expected_input_specs is None
376
+ else expected_input_specs[idx]
377
+ )
378
+ if input_arg_spec != desired_spec:
379
+ _insert_reshard_gm(
380
+ gm, node, input_arg, input_arg_spec, desired_spec
381
+ )
382
+ # convert output val to its local component
383
+ output_val = node.meta["val"]
384
+ node.meta["val"] = _partition_val(output_val, out_spec)
385
+ elif node.op == "output":
386
+ for input_arg in node.all_input_nodes:
387
+ # input args of output should be Replicate, otherwise redistribution is needed.
388
+ input_args_to_check: Sequence[Node] = (
389
+ input_arg if isinstance(input_arg, Sequence) else [input_arg]
390
+ )
391
+ for arg in input_args_to_check:
392
+ arg_sharding = arg.meta["sharding"]
393
+ arg_spec = arg_sharding.output_spec
394
+ desired_spec = copy.copy(arg_spec)
395
+ desired_spec.placements = (Replicate(),)
396
+ if arg_spec != desired_spec:
397
+ _insert_reshard_gm(gm, node, arg, arg_spec, desired_spec)
398
+ else:
399
+ raise RuntimeError(f"op code {node} not supported")
400
+
401
+ _clean_up_graph_metadata(gm)
402
+ gm.graph.lint()
403
+ gm.recompile()
404
+ return gm
405
+
406
+
407
+ def _partition_val(val: Any, spec: DTensorSpec) -> Any:
408
+ """
409
+ util function to convert a full tensor val to its local component
410
+ """
411
+ if isinstance(val, torch.Tensor):
412
+ local_shard = val
413
+ if val.ndim == 0:
414
+ # If it's already a scalar tensor, it is already local, we don't
415
+ # need to do anything
416
+ return local_shard
417
+
418
+ for idx, placement in enumerate(spec.placements):
419
+ if placement.is_shard():
420
+ placement = cast(Shard, placement)
421
+ num_chunks = spec.mesh.size(mesh_dim=idx)
422
+ my_coord = spec.mesh.get_coordinate()
423
+ assert my_coord is not None, "current rank not in mesh!"
424
+ my_coord_on_mesh_dim = my_coord[idx]
425
+ local_shard = placement._split_tensor(
426
+ local_shard, num_chunks, with_padding=False, contiguous=True
427
+ )[0][my_coord_on_mesh_dim]
428
+ return local_shard
429
+ elif isinstance(val, (list, tuple)):
430
+ return val.__class__(_partition_val(v, spec) for v in val)
431
+ else:
432
+ raise RuntimeError(f"val type {type(val)} not supported")
433
+
434
+
435
+ def _insert_reshard_gm(
436
+ gm: torch.fx.GraphModule,
437
+ node: Node,
438
+ input_arg: Node,
439
+ input_arg_spec: DTensorSpec,
440
+ desired_spec: DTensorSpec,
441
+ ) -> None:
442
+ """
443
+ Transform the graph for tensor redistribution.
444
+ """
445
+ input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"]
446
+ desired_spec.tensor_meta = input_arg.meta["tensor_meta"]
447
+ input_arg_tensor = input_arg.meta["val"]
448
+
449
+ # insert reshard operation
450
+ def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:
451
+ return redistribute_local_tensor(
452
+ local_tensor,
453
+ input_arg_spec,
454
+ desired_spec,
455
+ )
456
+
457
+ reshard_gm = make_fx(reshard_fn)(input_arg_tensor)
458
+ reshard_gm_nodes = list(reshard_gm.graph.nodes)
459
+ input_node = reshard_gm_nodes[0]
460
+ with gm.graph.inserting_before(node):
461
+ output_node = gm.graph.graph_copy(
462
+ reshard_gm.graph,
463
+ val_map={
464
+ input_node: input_arg,
465
+ },
466
+ )
467
+ node.replace_input_with(input_arg, output_node)
468
+
469
+
470
+ def _clean_up_graph_metadata(gm: torch.fx.GraphModule) -> None:
471
+ """
472
+ Clean up the graph by removing sharding and partitioning related metadata
473
+ """
474
+ for node in gm.graph.nodes:
475
+ if "sharding" in node.meta:
476
+ del node.meta["sharding"]
477
+ if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor):
478
+ local_tensor_meta = _extract_tensor_metadata(node.meta["val"])
479
+ node.meta["tensor_meta"] = local_tensor_meta
480
+
481
+
482
+ def _get_input_node_specs(
483
+ node: Node, placement_strategies: Dict[Node, PlacementStrategy]
484
+ ) -> Tuple[DTensorSpec, ...]:
485
+ """
486
+ Get the input specs of a node.
487
+ """
488
+ input_specs_list: List[DTensorSpec] = []
489
+ for input_arg in node.all_input_nodes:
490
+ if input_arg in placement_strategies:
491
+ output_spec = placement_strategies[input_arg].output_specs
492
+ assert isinstance(output_spec, DTensorSpec)
493
+ input_specs_list.append(output_spec)
494
+ else:
495
+ raise ValueError(f"{input_arg} does not have output_spec populated.")
496
+ return tuple(input_specs_list)
497
+
498
+
499
+ def _get_op_schema(
500
+ node: Node, placement_strategies: Dict[Node, PlacementStrategy]
501
+ ) -> OpSchema:
502
+ """
503
+ Util function to construct the operator schema of a node.
504
+ """
505
+ args_schema_list = pytree.tree_map_only(
506
+ Node, lambda arg: placement_strategies[arg].output_specs, node.args
507
+ )
508
+ op_schema = OpSchema(
509
+ op=cast(torch._ops.OpOverload, node.target),
510
+ args_schema=tuple(args_schema_list),
511
+ kwargs_schema=cast(Dict[str, object], node.kwargs),
512
+ )
513
+ return op_schema
514
+
515
+
516
+ def _shard_state_dict(
517
+ state_dict: Dict[str, torch.Tensor],
518
+ placement_strategies: Dict[Node, PlacementStrategy],
519
+ graph_signature: ExportGraphSignature,
520
+ mesh: DeviceMesh,
521
+ ) -> None:
522
+ """
523
+ Inplace partition the weights based on the placement strategy
524
+ """
525
+ for node, placement_strategy in placement_strategies.items():
526
+ if node.op != "placeholder":
527
+ continue
528
+ if node.name in graph_signature.inputs_to_parameters:
529
+ fqn = graph_signature.inputs_to_parameters[node.name]
530
+ elif node.name in graph_signature.inputs_to_buffers:
531
+ fqn = graph_signature.inputs_to_buffers[node.name]
532
+ else:
533
+ continue
534
+ assert fqn in state_dict, f"{fqn} not found in state dict: {state_dict.keys()}"
535
+
536
+ original_param = state_dict[fqn]
537
+ dtensor_param = distribute_tensor(
538
+ original_param,
539
+ mesh,
540
+ placement_strategy.output_spec.placements,
541
+ )
542
+ local_param = dtensor_param.to_local()
543
+ state_dict[fqn] = (
544
+ torch.nn.Parameter(local_param)
545
+ if isinstance(original_param, torch.nn.Parameter)
546
+ else local_param
547
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from functools import cached_property
3
+ from typing import Dict, List, Optional, Sequence, Tuple, Union
4
+
5
+ import torch
6
+ from torch._ops import OpOverload
7
+ from torch.distributed._tensor.placement_types import DTensorSpec
8
+ from torch.distributed.device_mesh import DeviceMesh
9
+
10
+ try:
11
+ from torch.utils._cxx_pytree import tree_map_only, TreeSpec
12
+ except ImportError:
13
+ from torch.utils._pytree import ( # type: ignore[no-redef, assignment]
14
+ tree_map_only,
15
+ TreeSpec,
16
+ )
17
+
18
+
19
+ # Common type aliases
20
+ ArgsType = Tuple[object, ...]
21
+ KwargsType = Dict[str, object]
22
+ # ATen op schemas could have Tensor, Tuple[Tensor] and List[Tensor], so output type sould
23
+ # be the same set of possibilities.
24
+ OutputSpecType = Optional[Union[DTensorSpec, Sequence[Optional[DTensorSpec]]]]
25
+
26
+
27
+ def _rebuild_tensor_from_dtensor_meta(arg) -> object:
28
+ """
29
+ This is used to propagate tensor metadata, must be under fake mode
30
+ """
31
+ assert arg.tensor_meta is not None, "DTensorSpec does not contain tensor_meta."
32
+ return torch.empty_strided(
33
+ arg.tensor_meta.shape,
34
+ arg.tensor_meta.stride,
35
+ dtype=arg.tensor_meta.dtype,
36
+ )
37
+
38
+
39
+ def _is_inplace_op(op: OpOverload):
40
+ # simple analysis of function schema to determine
41
+ # if this is an inplace variant, it might not
42
+ # be entirely correct, but it's good enough for now.
43
+ return op._schema.name[-1] == "_"
44
+
45
+
46
+ def _is_out_variant_op(op: OpOverload):
47
+ # simple analysis of function schema to determine
48
+ # if this is an out variant, it might not
49
+ # be entirely correct, but it's good enough for now.
50
+ return "out" in op._schema.overload_name
51
+
52
+
53
+ def _pretty_print_spec(spec: object) -> str:
54
+ if spec is None:
55
+ return "None"
56
+ elif isinstance(spec, DTensorSpec):
57
+ return "".join([str(p) for p in spec.placements])
58
+ elif isinstance(spec, Sequence):
59
+ return "(" + ", ".join([_pretty_print_spec(s) for s in spec]) + ")"
60
+ else:
61
+ raise RuntimeError(f"Unknown spec type to print: spec={spec}")
62
+
63
+
64
+ @dataclass
65
+ class PlacementStrategy:
66
+ """
67
+ A placement strategy describes acceptable sharding placements of the output
68
+ and the tensor arguments of an operation.
69
+
70
+ note: when the op return value is a single DTensor object, output_specs is
71
+ DTensorSpec; when the return value is a tuple of Optional[DTensor],
72
+ output_specs is a tuple of Optional[DTensorSpec].
73
+ """
74
+
75
+ output_specs: Union[DTensorSpec, Tuple[Optional[DTensorSpec], ...]]
76
+ input_specs: Optional[Sequence[DTensorSpec]] = None
77
+
78
+ # redistribute costs for this op placement strategy
79
+ # we need a nested list to record the cost for each
80
+ # operand of this operator, and for each operand of
81
+ # this operator it might have multiple placement strategies
82
+ redistribute_cost: Optional[List[List[float]]] = None
83
+
84
+ @cached_property
85
+ def output_spec(self) -> DTensorSpec:
86
+ """
87
+ This function requires that the strategy have exactly one DTensorSpec as the
88
+ output spec. If the output_specs is a tuple, we throw an exception.
89
+ """
90
+ if isinstance(self.output_specs, DTensorSpec):
91
+ return self.output_specs
92
+ else:
93
+ raise ValueError(
94
+ f"function output_spec expects a single DTensorSpec but got: {self.output_specs}"
95
+ )
96
+
97
+ def input_spec(self, index: int = 0) -> DTensorSpec:
98
+ assert self.input_specs is not None, "input_specs of PlacementStrategy is None!"
99
+ assert len(self.input_specs) > index, (
100
+ f"Invalid index {index} for input_specs of length "
101
+ f"{len(self.input_specs)}: {self.input_specs}"
102
+ )
103
+ return self.input_specs[index]
104
+
105
+ def __str__(self) -> str:
106
+ input_specs_str = _pretty_print_spec(self.input_specs)
107
+ output_spec_str = _pretty_print_spec(self.output_specs)
108
+ return f"{input_specs_str} -> {output_spec_str}"
109
+
110
+
111
+ class StrategyType:
112
+ """
113
+ Base class type for op strategy, We have two StrategyType:
114
+ OpStrategy and TupleStrategy
115
+ """
116
+
117
+ pass
118
+
119
+
120
+ class OpStrategy(StrategyType):
121
+ """
122
+ OpStrategy that consists of a list of placement strategies associated with the op
123
+ """
124
+
125
+ def __init__(self, strategies: List[PlacementStrategy]) -> None:
126
+ super().__init__()
127
+ self.strategies: List[PlacementStrategy] = strategies
128
+
129
+ def __str__(self) -> str:
130
+ strategy_list_str = ", ".join([str(strategy) for strategy in self.strategies])
131
+ mesh_shape = self.output_mesh_shape
132
+ return f"OpStrategy:[{strategy_list_str}] @ mesh: {mesh_shape}"
133
+
134
+ def max_num_shards(self) -> int:
135
+ """
136
+ Returns the max number of shards across all placement strategies
137
+ """
138
+ return max([strategy.output_spec.num_shards for strategy in self.strategies])
139
+
140
+ @property
141
+ def output_mesh_shape(self):
142
+ output_spec = self.strategies[0].output_specs
143
+ if isinstance(output_spec, DTensorSpec):
144
+ return output_spec.mesh.shape
145
+ else:
146
+ assert isinstance(
147
+ output_spec, tuple
148
+ ), "found no DTensorSpec in the OpStrategy!"
149
+ assert output_spec[0] is not None
150
+ return output_spec[0].mesh.shape
151
+
152
+ @property
153
+ def output_ndim(self):
154
+ return self.strategies[0].output_spec.ndim
155
+
156
+ @property
157
+ def output_shape(self):
158
+ return self.strategies[0].output_spec.shape
159
+
160
+
161
+ class TupleStrategy(StrategyType):
162
+ """
163
+ TupleStrategy represents the output strategy of this op is a tuple
164
+ of strategy, i.e. If the output of this op is a tuple of tensors or list of tensors
165
+ with possibly different placement strategies, we should return a TupleStrategy that
166
+ contains a tuple of OpStrategy, where each child represents the sharding strategy
167
+ of "each element" of the tuple/list of tensors the op returns.
168
+
169
+ NOTE: if the output of the op is a List[Tensor] and they share the same placement
170
+ strategy, then we should return a single OpStrategy instead of a TupleStrategy
171
+ """
172
+
173
+ def __init__(self, childs: Sequence[StrategyType]) -> None:
174
+ super().__init__()
175
+ self.childs: Sequence[StrategyType] = childs
176
+
177
+ def __str__(self) -> str:
178
+ child_strategies_str = ", ".join(
179
+ [f"{str(strat)}" for idx, strat in enumerate(self.childs)]
180
+ )
181
+ return f"TupleStrategy({child_strategies_str})"
182
+
183
+
184
+ @dataclass
185
+ class RuntimeSchemaInfo:
186
+ """
187
+ RuntimeSchemaInfo stores the operator schema related information for runtime (eager)
188
+ execution. This is mainly used for two ways: 1. to generate hash for args to determine
189
+ whether to re-run sharding prop or not 2. to determine if we need pytree
190
+ """
191
+
192
+ # This static_argnum records static arg "starting index" for ops that have non-tensor
193
+ # args/kwargs which would affect sharding propagation results. All args starting from
194
+ # this index would be hashed to our sharding cache.
195
+ # Note that only a few ops need this information, e.g. view, transpose, var.dim, etc.
196
+ static_argnum: int = 100
197
+ # This static_kwargkey records static kwarg names which would affect sharding prop
198
+ static_kwargkey: Optional[List[str]] = None
199
+ # each op can decide if it wants to use pytree flatten/unflatten during operator
200
+ # eager execution, by default we don't need to do flatten/unflatten, only if the
201
+ # op indicate it needs to, this is to accelate eager performance.
202
+ needs_pytree: bool = False
203
+
204
+
205
+ @dataclass
206
+ class OpSchema:
207
+ """
208
+ OpSchema is a data class that describes an operator input schemas, it
209
+ includes DTensor DTensorSpecs and non-tensor args/kwargs (positional order
210
+ preserved). It is mainly used by the dispatching logic below to run things like
211
+ sharding propagation.
212
+
213
+ NOTE: this should be used as a read only data class
214
+ TODO: make this a frozen dataclass
215
+
216
+ Args:
217
+ op: the operator overload we are intercepting
218
+ args_schema: contains args except that the DTensor args have been replaced
219
+ with its DTensorSpec
220
+ kwargs_schema: contains kwargs except that the DTensor kwargs have been replaced
221
+ with its DTensorSpec
222
+ """
223
+
224
+ op: OpOverload
225
+ args_schema: ArgsType
226
+ kwargs_schema: KwargsType
227
+
228
+ schema_info: Optional[RuntimeSchemaInfo] = None
229
+
230
+ @property
231
+ def args_spec(self) -> Tuple[DTensorSpec, ...]:
232
+ """
233
+ args_spec: Tuple[DTensorSpec, ...]: contains a clean list of args spec list
234
+ with NO non-DTensor positional arguments (i.e. int/float/tuple, etc)
235
+ mainly used by sharding propagation to propagate the output spec
236
+ """
237
+ # filter out non-relevant values from args schema to get a clean spec list
238
+ # this would mainly be used by sharding propagation rules
239
+ return tuple(item for item in self.args_schema if isinstance(item, DTensorSpec))
240
+
241
+ def __repr__(self) -> str:
242
+ return (
243
+ f"OpSchema(op={self.op},"
244
+ f" args_schema={self.args_schema},"
245
+ f" kwargs_schema={self.kwargs_schema})"
246
+ )
247
+
248
+ def __str__(self) -> str:
249
+ args_sharding: List[str] = []
250
+ mesh_shape = None
251
+ for arg in self.args_schema:
252
+ if isinstance(arg, DTensorSpec):
253
+ args_sharding.append(str(arg))
254
+ mesh_shape = arg.mesh.shape
255
+ elif isinstance(arg, OpStrategy):
256
+ assert len(arg.strategies) == 1
257
+ args_sharding.append(_pretty_print_spec(arg.strategies[0].output_specs))
258
+ mesh_shape = arg.output_mesh_shape
259
+ elif isinstance(arg, TupleStrategy):
260
+ first_op_strtgy = arg.childs[0]
261
+ assert isinstance(first_op_strtgy, OpStrategy)
262
+ mesh_shape = first_op_strtgy.output_mesh_shape
263
+ args_sharding.append(str(arg))
264
+ else:
265
+ args_sharding.append(str(arg))
266
+ return f"Op(op={self.op}, args_sharding={', '.join(args_sharding)} @ mesh: {mesh_shape})"
267
+
268
+ def __post_init__(self) -> None:
269
+ has_symints = False
270
+ for a in self.args_schema:
271
+ if isinstance(a, DTensorSpec) and a.tensor_meta is not None:
272
+ if any(isinstance(s, torch.SymInt) for s in a.tensor_meta.shape):
273
+ has_symints = True
274
+ break
275
+ self.has_symints = has_symints
276
+
277
+ def arg_type_tensor_or_tensor_list_like(self, arg_idx: int) -> bool:
278
+ arg = self.args_schema[arg_idx]
279
+ is_tensor = isinstance(arg, DTensorSpec)
280
+ if is_tensor:
281
+ return True
282
+
283
+ if not isinstance(arg, list):
284
+ return False
285
+
286
+ return all(isinstance(e, DTensorSpec) or e is None for e in arg)
287
+
288
+ def return_type_tuple_tensor_like(self) -> bool:
289
+ # all dispatch ops could only return Tuple[Tensor] or have None/ints/floats
290
+ # in the tuple, but the first element must be a Tensor, so this check is enough
291
+ return_types = self.op._schema.returns
292
+ return len(return_types) > 1 and isinstance(
293
+ return_types[0].type, torch.TensorType
294
+ )
295
+
296
+ def return_type_tensor(self) -> bool:
297
+ return_types = self.op._schema.returns
298
+ # all dispatch ops only return Tensor or Tuple[Tensor] for tensor like
299
+ # return types, so this check is enough for tensor like types
300
+ return isinstance(return_types[0].type, torch.TensorType)
301
+
302
+ def __hash__(self) -> int:
303
+ # Only hash args and kwargs that op indicates to hash
304
+ if not self.schema_info:
305
+ static_argnum = len(self.args_schema)
306
+ static_kwargkey = None
307
+ else:
308
+ static_argnum = self.schema_info.static_argnum
309
+ static_kwargkey = self.schema_info.static_kwargkey
310
+
311
+ args_to_hash = tuple(
312
+ tuple(e) if isinstance(e, list) else e
313
+ for i, e in enumerate(self.args_schema)
314
+ if self.arg_type_tensor_or_tensor_list_like(i) or i >= static_argnum
315
+ )
316
+ if static_kwargkey is not None:
317
+ kwargs_to_hash = tuple(
318
+ self.kwargs_schema.get(k, None) for k in static_kwargkey
319
+ )
320
+ return hash((self.op, args_to_hash, kwargs_to_hash))
321
+ else:
322
+ return hash((self.op, args_to_hash))
323
+
324
+ def __eq__(self, other: object) -> bool:
325
+ # early return checks
326
+ if not isinstance(other, OpSchema):
327
+ return False
328
+
329
+ if self.op != other.op:
330
+ return False
331
+
332
+ if len(self.args_schema) != len(other.args_schema):
333
+ return False
334
+
335
+ # compare each element and early return if any of them is different
336
+ if not self.schema_info:
337
+ static_argnum = len(self.args_schema)
338
+ static_kwargkey = None
339
+ else:
340
+ static_argnum = self.schema_info.static_argnum
341
+ static_kwargkey = self.schema_info.static_kwargkey
342
+
343
+ for i, (self_arg, other_arg) in enumerate(
344
+ zip(self.args_schema, other.args_schema)
345
+ ):
346
+ if isinstance(self_arg, DTensorSpec) and self_arg != other_arg:
347
+ return False
348
+ elif i >= static_argnum and self_arg != other_arg:
349
+ return False
350
+
351
+ # check kwarg equality when there's a static kwarg key
352
+ if static_kwargkey:
353
+ for key in static_kwargkey:
354
+ if self.kwargs_schema.get(key, None) != other.kwargs_schema.get(
355
+ key, None
356
+ ):
357
+ return False
358
+
359
+ return True
360
+
361
+ def gen_fake_args(self) -> ArgsType:
362
+ """
363
+ gen_fake_args: generate fake args for the operator, this is mainly used
364
+ by sharding propagation rules to generate fake args for the operator
365
+ to run the local tensor operator and get the output spec.
366
+ """
367
+ return tree_map_only(
368
+ DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.args_schema
369
+ )
370
+
371
+ def gen_fake_kwargs(self) -> KwargsType:
372
+ """
373
+ gen_fake_kwargs: generate fake kwargs for the operator, this is mainly used
374
+ by sharding propagation rules to generate fake kwargs for the operator
375
+ to run the local tensor operator and get the output spec.
376
+ """
377
+ return tree_map_only(
378
+ DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.kwargs_schema
379
+ )
380
+
381
+ def _inplace_rewrap_schema_suggestion(self, origin_schema: "OpSchema") -> None:
382
+ suggestion_args_spec = self.args_spec
383
+ new_arg_schema: List[object] = []
384
+ idx_of_args_spec = 0
385
+ for arg in origin_schema.args_schema:
386
+ if isinstance(arg, DTensorSpec):
387
+ new_arg_schema.append(suggestion_args_spec[idx_of_args_spec])
388
+ idx_of_args_spec += 1
389
+ else:
390
+ new_arg_schema.append(arg)
391
+ self.args_schema = tuple(new_arg_schema)
392
+ self.kwargs_schema = origin_schema.kwargs_schema
393
+
394
+
395
+ @dataclass
396
+ class OutputSharding:
397
+ """
398
+ OutputSharding is a data class that is used by the sharding propagation
399
+ rules, it could set the output_spec upon successful propagation, and if
400
+ it failed, output_spec would become None and sharding propagation rules
401
+ could give a list of suggestions for inputs to reshard.
402
+
403
+ NOTE: the schema_suggestion generated by sharding propagation should be
404
+ exactly the same as the operator OpSchema, except the DTensor DTensorSpecs
405
+ """
406
+
407
+ output_spec: OutputSpecType
408
+ schema_suggestions: Optional[List[OpSchema]] = None
409
+ failed_reason: Optional[str] = None
410
+ needs_redistribute: bool = False
411
+
412
+
413
+ @dataclass
414
+ class OpInfo:
415
+ """
416
+ All Runtime Op execution info are packed here
417
+ """
418
+
419
+ mesh: DeviceMesh
420
+ schema: OpSchema
421
+ flat_args_schema: List[object]
422
+ local_args: Sequence[object]
423
+ local_kwargs: Dict[str, object]
424
+ args_tree_spec: Optional[TreeSpec] = None
425
+
426
+ # the output sharding info
427
+ output_sharding: Optional[OutputSharding] = None
venv/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any, cast, List, NamedTuple, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.distributed._functional_collectives as funcol
8
+ import torch.distributed.distributed_c10d as c10d
9
+
10
+ from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter
11
+ from torch.distributed.device_mesh import DeviceMesh
12
+
13
+
14
+ class Placement:
15
+ # base class Placement type
16
+
17
+ # convenient utils to check for placement types
18
+ def is_shard(self, dim: Optional[int] = None) -> bool:
19
+ is_shard_instance = isinstance(self, Shard)
20
+ if dim is not None and is_shard_instance:
21
+ return cast(Shard, self).dim == dim
22
+ else:
23
+ return is_shard_instance
24
+
25
+ def is_replicate(self) -> bool:
26
+ return isinstance(self, Replicate)
27
+
28
+ def is_partial(self) -> bool:
29
+ return isinstance(self, _Partial)
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class Shard(Placement):
34
+ # shard placement, shard on a dim
35
+ dim: int
36
+
37
+ def _split_tensor(
38
+ self,
39
+ tensor: torch.Tensor,
40
+ num_chunks: int,
41
+ *,
42
+ with_padding: bool = True,
43
+ contiguous: bool = True,
44
+ ) -> Tuple[List[torch.Tensor], List[int]]:
45
+ """
46
+ This function uses torch.chunk to split a tensor into num_chunks shards along
47
+ the Shard placement dimension, and return a list of shards with their pad sizes.
48
+
49
+ Keyword args:
50
+ with_padding (bool, optional): when True, we pad the tensor on the last
51
+ few ranks before calling the collectives (i.e. scatter/all_gather, etc.).
52
+ This is because collectives usually require equal size tensor inputs
53
+ """
54
+ assert (
55
+ self.dim <= tensor.ndim
56
+ ), f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}"
57
+
58
+ # chunk tensor over dimension `dim` into n slices with padding if necessary
59
+ tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim))
60
+ # compute the chunk size inline with ``torch.chunk``
61
+ full_chunk_size = (tensor.size(self.dim) + num_chunks - 1) // num_chunks
62
+
63
+ # Compute chunk size for each chunk for ``self.dim``
64
+ chunk_sizes = [
65
+ tensor_list[idx].size(self.dim) if idx < len(tensor_list) else 0
66
+ for idx in range(num_chunks)
67
+ ]
68
+ # Compute pad size on each chunk
69
+ pad_sizes = [full_chunk_size - chunk_size for chunk_size in chunk_sizes]
70
+
71
+ # Reuse tensor to fill empty chunk with empty tensor
72
+ num_empty_tensors = num_chunks - len(tensor_list)
73
+ tensor_size = list(tensor_list[0].size())
74
+ tensor_size = [
75
+ size if idx != self.dim else 0 for idx, size in enumerate(tensor_size)
76
+ ]
77
+ tensor = tensor.new_zeros(tensor_size)
78
+ for _ in range(num_empty_tensors):
79
+ tensor_list.append(tensor)
80
+
81
+ if with_padding or contiguous:
82
+ shard_list = []
83
+ for shard, pad_size in zip(tensor_list, pad_sizes):
84
+ # Fill the empty tensor with zeroes with padding.
85
+ if with_padding and pad_size > 0:
86
+ shard = self._pad_tensor(shard, pad_size)
87
+ shard = shard.contiguous() if contiguous else shard
88
+ shard_list.append(shard)
89
+ return shard_list, pad_sizes
90
+ else:
91
+ return tensor_list, pad_sizes
92
+
93
+ def _pad_tensor(
94
+ self,
95
+ tensor: torch.Tensor,
96
+ pad_size: int,
97
+ ) -> torch.Tensor:
98
+ if pad_size == 0:
99
+ return tensor
100
+ pad = [0, 0] * (tensor.ndim - self.dim)
101
+ pad[-1] = pad_size
102
+ return torch.nn.functional.pad(tensor, pad)
103
+
104
+ def _unpad_tensor(
105
+ self,
106
+ tensor: torch.Tensor,
107
+ pad_size: int,
108
+ ) -> torch.Tensor:
109
+ if pad_size == 0:
110
+ return tensor
111
+ return tensor.narrow(
112
+ self.dim,
113
+ start=0,
114
+ length=tensor.size(self.dim) - pad_size,
115
+ )
116
+
117
+ @staticmethod
118
+ def _local_shard_size_on_dim(
119
+ size_on_dim: int,
120
+ num_chunks: int,
121
+ rank: int,
122
+ return_offset: bool = False,
123
+ ) -> Tuple[int, int]:
124
+ """
125
+ returns the local shard size and offset on a given tensor dim
126
+ """
127
+ # Compute the chunk size inline with ``torch.chunk``
128
+ if size_on_dim % num_chunks == 0:
129
+ full_chunk_size = size_on_dim // num_chunks
130
+ return full_chunk_size, full_chunk_size * rank if return_offset else -1
131
+
132
+ # uneven sharding case
133
+ full_chunk_size = (size_on_dim + num_chunks - 1) // num_chunks
134
+ shard_starting_idx = full_chunk_size * rank
135
+
136
+ if size_on_dim < shard_starting_idx:
137
+ return 0, size_on_dim if return_offset else -1
138
+ else:
139
+ local_shard_size = (
140
+ min(size_on_dim, shard_starting_idx + full_chunk_size)
141
+ - shard_starting_idx
142
+ )
143
+ return local_shard_size, shard_starting_idx if return_offset else -1
144
+
145
+ def _shard_tensor(
146
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
147
+ ) -> torch.Tensor:
148
+ """
149
+ shard and scatter a tensor on a mesh dimension (use coordinate
150
+ 0 on the mesh dimension as source of truth)
151
+ """
152
+ my_coordinate = mesh.get_coordinate()
153
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
154
+
155
+ if my_coordinate is None:
156
+ # if rank is not part of mesh, we simply return an empty tensor
157
+ return tensor.new_empty(0, requires_grad=tensor.requires_grad)
158
+
159
+ scatter_list, pad_sizes = self._split_tensor(
160
+ tensor, num_chunks, with_padding=True, contiguous=True
161
+ )
162
+
163
+ output = torch.empty_like(scatter_list[my_coordinate[mesh_dim]])
164
+ mesh_scatter(output, scatter_list, mesh, mesh_dim=mesh_dim)
165
+
166
+ # Only unpad if the local_tensor was padded on the dimension.
167
+ pad_size = pad_sizes[my_coordinate[mesh_dim]]
168
+ if pad_size > 0:
169
+ output = self._unpad_tensor(output, pad_size)
170
+ return output
171
+
172
+ def _reduce_shard_tensor(
173
+ self,
174
+ tensor: torch.Tensor,
175
+ mesh: DeviceMesh,
176
+ reduce_op: c10d.ReduceOp.RedOpType,
177
+ mesh_dim: int,
178
+ ) -> torch.Tensor:
179
+ """
180
+ reduce and scatter a tensor on a mesh dimension
181
+ """
182
+ my_coordinate = mesh.get_coordinate()
183
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
184
+
185
+ if my_coordinate is None:
186
+ # if rank is not part of mesh, we simply return local_tensor,
187
+ # which should be an empty tensor
188
+ return tensor
189
+
190
+ is_padded = tensor.size(self.dim) % num_chunks != 0
191
+ if is_padded:
192
+ scattered_list, pad_sizes = self._split_tensor(
193
+ tensor, num_chunks, with_padding=True, contiguous=True
194
+ )
195
+ tensor = torch.cat(scattered_list, dim=self.dim)
196
+ elif not tensor.is_contiguous():
197
+ tensor = tensor.contiguous()
198
+
199
+ output = funcol.reduce_scatter_tensor(
200
+ tensor, reduce_op.name, scatter_dim=self.dim, group=(mesh, mesh_dim)
201
+ )
202
+
203
+ if is_padded:
204
+ output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
205
+ return output
206
+
207
+ def _to_replicate_tensor(
208
+ self,
209
+ local_tensor: torch.Tensor,
210
+ mesh: DeviceMesh,
211
+ mesh_dim: int,
212
+ current_logical_shape: List[int],
213
+ ) -> torch.Tensor:
214
+ """
215
+ This function all_gather all shards and return a tensor that
216
+ is replicated on the previously sharded mesh dimension
217
+ """
218
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
219
+ # check if it's uneven, so we need to pad input tensor before all_gather
220
+ local_shape = list(local_tensor.size())
221
+
222
+ logical_dim_size = current_logical_shape[self.dim]
223
+ is_padded = logical_dim_size % num_chunks != 0
224
+
225
+ if is_padded:
226
+ full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
227
+ pad_size = full_chunk_size - local_shape[self.dim]
228
+ local_tensor = self._pad_tensor(local_tensor, pad_size)
229
+
230
+ if not local_tensor.is_contiguous():
231
+ local_tensor = local_tensor.contiguous()
232
+
233
+ result = funcol.all_gather_tensor(
234
+ local_tensor,
235
+ gather_dim=self.dim,
236
+ group=(mesh, mesh_dim),
237
+ )
238
+ if is_padded:
239
+ unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined]
240
+ result = self._unpad_tensor(result, unpad_size)
241
+ return result
242
+
243
+ def _replicate_to_shard(
244
+ self,
245
+ local_tensor: torch.Tensor,
246
+ mesh: DeviceMesh,
247
+ mesh_dim: int,
248
+ shard_index: int,
249
+ ) -> torch.Tensor:
250
+ """
251
+ transform from replicated tensor to a sharded tensor on
252
+ the current rank, which would perform a local chunk
253
+ """
254
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
255
+ shards, _ = self._split_tensor(
256
+ local_tensor,
257
+ num_chunks,
258
+ with_padding=False,
259
+ contiguous=False,
260
+ )
261
+ return shards[shard_index].clone()
262
+
263
+ def __eq__(self, other: object) -> bool:
264
+ if not isinstance(other, Shard):
265
+ return False
266
+ return self.dim == other.dim
267
+
268
+ def __hash__(self) -> int:
269
+ return hash(self.dim)
270
+
271
+ def __repr__(self) -> str:
272
+ """
273
+ machine readable representation of the Shard placement
274
+ """
275
+ return f"Shard(dim={self.dim})"
276
+
277
+ def __str__(self) -> str:
278
+ """human readable representation of the Shard placement"""
279
+ return f"S({self.dim})"
280
+
281
+
282
+ @dataclass(frozen=True)
283
+ class Replicate(Placement):
284
+ # replicate placement
285
+ def __eq__(self, other: object) -> bool:
286
+ if not isinstance(other, Replicate):
287
+ return False
288
+ return True
289
+
290
+ def __hash__(self) -> int:
291
+ # every replicate placement is the same
292
+ return -1
293
+
294
+ def __repr__(self) -> str:
295
+ """
296
+ machine readable representation of the Replicate placement
297
+ """
298
+ return "Replicate()"
299
+
300
+ def __str__(self) -> str:
301
+ """
302
+ human readable representation of the Replicate placement
303
+ """
304
+ return "R"
305
+
306
+ def _replicate_tensor(
307
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
308
+ ) -> torch.Tensor:
309
+ """
310
+ Replicate (broadcast) a torch.Tensor on a mesh dimension (use
311
+ the first coordinate on the mesh dimension as source of truth)
312
+ """
313
+ my_coordinate = mesh.get_coordinate()
314
+ if my_coordinate is None:
315
+ # if rank is not part of mesh, we simply return an empty tensor
316
+ return tensor.new_empty(0, requires_grad=tensor.requires_grad)
317
+
318
+ tensor = tensor.contiguous()
319
+ mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim)
320
+ return tensor
321
+
322
+
323
+ @dataclass(frozen=True)
324
+ class _Partial(Placement):
325
+ # This is a default _Partial placement with element-wise reduce op
326
+ # _Partial define three contracts:
327
+ # 1. _reduce_value: reduce the value of the tensor on the mesh dimension
328
+ # 2. _reduce_shard_value: reduce_scatter the value of the tensor on the mesh dimension
329
+ # 3. _partition_value: partition the value of a replicated tensor on the mesh dimension
330
+ # We can implement custom reductions as needed by subclassing this
331
+ # class and override those contracts.
332
+ reduce_op: c10d.ReduceOp.RedOpType = c10d.ReduceOp.SUM
333
+
334
+ def _reduce_value(
335
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
336
+ ) -> torch.Tensor:
337
+ return funcol.all_reduce(
338
+ tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim)
339
+ )
340
+
341
+ def _reduce_shard_value(
342
+ self,
343
+ tensor: torch.Tensor,
344
+ mesh: DeviceMesh,
345
+ mesh_dim: int,
346
+ shard_spec: Placement,
347
+ ) -> torch.Tensor:
348
+ # by default call reduce_shard_tensor of the shard_spec.
349
+ shard_spec = cast(Shard, shard_spec)
350
+ return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim)
351
+
352
+ def _partition_value(
353
+ self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
354
+ ) -> torch.Tensor:
355
+ # _partition_value is the conjugate operation of _reduce_value
356
+ # - i.e. _partition_value on a sum reduce op is just a divison operation
357
+ # - the _reduce_value on a sum reduce op would just be a sum(allreduce) operation
358
+ # TODO: if the reduce_op is min/max, etc. the _partition_value should be a
359
+ # different operation
360
+ assert (
361
+ self.reduce_op == c10d.ReduceOp.SUM
362
+ ), "only support replicate to PartialSUM for now!"
363
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
364
+ return tensor / num_chunks
365
+
366
+ def __eq__(self, other: object) -> bool:
367
+ if not isinstance(other, _Partial):
368
+ return False
369
+ return self.reduce_op == other.reduce_op
370
+
371
+ def __hash__(self) -> int:
372
+ return 1 + hash(self.reduce_op)
373
+
374
+ def __repr__(self) -> str:
375
+ """
376
+ machine readable representation of the Partial placement
377
+ """
378
+ return f"_Partial(reduce_op={self.reduce_op})"
379
+
380
+ def __str__(self) -> str:
381
+ """
382
+ human readable representation of the Partial placement
383
+ """
384
+ return "P"
385
+
386
+
387
+ class TensorMeta(NamedTuple):
388
+ # simple named tuple to represent tensor metadata
389
+ # intentionally to stay simple only for sharding
390
+ # propagation purposes.
391
+ shape: torch.Size
392
+ stride: Tuple[int, ...]
393
+ dtype: torch.dtype
394
+
395
+
396
+ # used internally to propagate the placements
397
+ @dataclass
398
+ class DTensorSpec:
399
+ mesh: DeviceMesh
400
+ placements: Tuple[Placement, ...]
401
+
402
+ # tensor meta will only be set during sharding propagation
403
+ tensor_meta: Optional[TensorMeta] = None
404
+
405
+ def __post_init__(self):
406
+ if not isinstance(self.placements, tuple):
407
+ self.placements = tuple(self.placements)
408
+ self._hash: Optional[int] = None
409
+
410
+ def __setattr__(self, attr: str, value: Any):
411
+ super().__setattr__(attr, value)
412
+ # Make sure to recompute the hash in case any of the hashed attributes
413
+ # change (though we do not expect `mesh` or `placements` to change)
414
+ if hasattr(self, "_hash") and attr in ("mesh", "placements", "tensor_meta"):
415
+ self._hash = None
416
+
417
+ def _hash_impl(self) -> int:
418
+ # hashing and equality check for DTensorSpec are used to cache the sharding
419
+ # propagation results. We only need to consider the mesh, placements, shape
420
+ # dtype and stride.
421
+ # Caveat: we need to keep this in mind and sync hash and eq if we add more
422
+ # fields to them.
423
+ if self.tensor_meta is not None:
424
+ return hash(
425
+ (
426
+ self.mesh,
427
+ self.placements,
428
+ self.tensor_meta.shape,
429
+ self.tensor_meta.stride,
430
+ self.tensor_meta.dtype,
431
+ )
432
+ )
433
+ return hash((self.mesh, self.placements))
434
+
435
+ def __hash__(self) -> int:
436
+ # We lazily cache the spec to avoid recomputing the hash upon each
437
+ # use, where we make sure to update the hash when the `tensor_meta`
438
+ # changes by overriding `__setattr__`. This must be lazy so that Dynamo
439
+ # does not try to hash non-singleton `SymInt`s for the stride.
440
+ if self._hash is None:
441
+ self._hash = self._hash_impl()
442
+ return self._hash
443
+
444
+ def __eq__(self, __o: object) -> bool:
445
+ if not (
446
+ isinstance(__o, DTensorSpec)
447
+ and self.mesh == __o.mesh
448
+ and self.placements == __o.placements
449
+ ):
450
+ return False
451
+ if self.tensor_meta is None or __o.tensor_meta is None:
452
+ return self.tensor_meta == __o.tensor_meta
453
+
454
+ return (
455
+ self.tensor_meta.shape == __o.tensor_meta.shape # type: ignore[union-attr]
456
+ and self.tensor_meta.stride == __o.tensor_meta.stride # type: ignore[union-attr]
457
+ and self.tensor_meta.dtype == __o.tensor_meta.dtype # type: ignore[union-attr]
458
+ )
459
+
460
+ def __str__(self) -> str:
461
+ """
462
+ human readable representation of the DTensorSpec
463
+ """
464
+ if len(self.placements) == 1:
465
+ placement_str = str(self.placements[0])
466
+ else:
467
+ placement_str = str(self.placements)
468
+
469
+ if self.tensor_meta is not None:
470
+ tensor_shape = str(tuple(self.tensor_meta.shape))
471
+ else:
472
+ tensor_shape = "unknown shape"
473
+
474
+ return f"Spec({placement_str} on {tensor_shape})"
475
+
476
+ @property
477
+ def shape(self) -> torch.Size:
478
+ if self.tensor_meta is None:
479
+ raise ValueError("tensor_meta is not set")
480
+ return self.tensor_meta.shape
481
+
482
+ @property
483
+ def stride(self) -> Tuple[int, ...]:
484
+ if self.tensor_meta is None:
485
+ raise ValueError("tensor_meta is not set")
486
+ return self.tensor_meta.stride
487
+
488
+ @property
489
+ def ndim(self) -> int:
490
+ if self.tensor_meta is None:
491
+ raise ValueError("tensor_meta is not set")
492
+ return len(self.tensor_meta.shape)
493
+
494
+ @property
495
+ def num_shards(self) -> int:
496
+ num_shards = 1
497
+ for i, placement in enumerate(self.placements):
498
+ if placement.is_shard():
499
+ num_shards *= self.mesh.size(i)
500
+ return num_shards
501
+
502
+ @property
503
+ def device_mesh(self) -> DeviceMesh:
504
+ # simple aliasing for the mesh field, make some
505
+ # checks that mixes DTensor/DTensorSpec easier
506
+ return self.mesh
507
+
508
+ @property
509
+ def dim_map(self) -> List[int]:
510
+ """
511
+ dim_map is a property we derive from `placements` of
512
+ the distributed tensor. It simply return a list of ints
513
+ where dim_map[i] denotes the sharding mapping to the mesh
514
+ dimension, and len(dim_map) == dist_tensor.ndim
515
+ dim_map[i] = -1: means tensor dim i replicate on mesh
516
+ dim_map[i] = j: means tensor dim i shard on mesh dim j
517
+
518
+ For example, we have a dist tensor that have the shape of
519
+ [18, 20, 30], and device_mesh([0, 1, 2, 3]), placements:
520
+ [Shard(1)], the dim_map of this placement would be:
521
+ [-1, 0, -1]. This representation is pretty helpful during
522
+ sharding propagation where we could know exactly each
523
+ tensor dimension is sharded or not.
524
+
525
+ Note that if placements contains `_Partial`, we have to
526
+ explicitly deal with it, so that when we create a DTensorSpec
527
+ with dim_map, we could properly record the pending sums.
528
+ """
529
+ # dims mapping of dist tensor sharding
530
+ # return size of tensor ndim, -1 represent replicate
531
+ # and int >=0 represent shard on that device mesh dim
532
+ r = [-1] * self.ndim
533
+ for i, placement in enumerate(self.placements):
534
+ if placement.is_shard():
535
+ shard_dim = cast(Shard, placement).dim
536
+ if r[shard_dim] > -1:
537
+ raise ValueError(
538
+ f"Tensor dim {shard_dim} is already sharded on mesh dim {r[shard_dim]},"
539
+ " DTensor operator implementation does not support things like hybrid"
540
+ " sharding strategies yet (i.e. [Shard(0), Shard(0)])"
541
+ )
542
+ r[shard_dim] = i
543
+ return r
544
+
545
+ @property
546
+ def sums(self) -> List[int]:
547
+ """
548
+ sums is a property we derive from `placements` of the
549
+ distributed tensor. It simply return a list of ints where
550
+ sums[i] denotes the pending sum (partial) on mesh dim i
551
+ """
552
+ return [
553
+ idx
554
+ for idx, placement in enumerate(self.placements)
555
+ if placement.is_partial()
556
+ ]
557
+
558
+ @classmethod
559
+ def from_dim_map(
560
+ cls,
561
+ mesh: DeviceMesh,
562
+ dim_map: List[int],
563
+ sums: List[int],
564
+ tensor_meta: Optional[TensorMeta] = None,
565
+ ) -> "DTensorSpec":
566
+ """
567
+ Construct a DTensorSpec from dim_map list and pending sum.
568
+
569
+ Args:
570
+ mesh (class:`DeviceMesh`): device mesh to be used in the DTensorSpec
571
+ dim_map (List[int]): a list of integer that represents sharding on each
572
+ tensor dimension, see `dim_map` property doc for details
573
+ sums (List[int]): a list of integer that represents the dist tensor have
574
+ pending sum on which device mesh dimension.
575
+ tensor meta (TensorMeta): DTensor metadata
576
+
577
+ Return:
578
+ a class:`DTensorSpec` object
579
+ """
580
+ # by default replicate on device mesh dims
581
+ placements: List[Placement] = [Replicate() for _ in range(mesh.ndim)]
582
+
583
+ # find all mesh dims that need pending reductions
584
+ for s in sums:
585
+ placements[s] = _Partial()
586
+
587
+ for i, m in enumerate(dim_map):
588
+ if m >= 0:
589
+ placement = placements[m]
590
+ if placement.is_shard():
591
+ placement = cast(Shard, placement)
592
+ raise RuntimeError(
593
+ f"DeviceMesh dimension cann't be mapped to two dimension of the same tensor: {i} and {placement.dim}"
594
+ )
595
+ elif placement.is_partial():
596
+ raise RuntimeError(
597
+ f"DeviceMesh dimension {m} cannot be both shard and partial!"
598
+ )
599
+ placements[m] = Shard(i)
600
+
601
+ return cls(mesh, tuple(placements), tensor_meta=tensor_meta)
602
+
603
+ def is_replicated(self):
604
+ """
605
+ return True if the current DTensorSpec replicates on all mesh dims (devices)
606
+ """
607
+ return all(placement.is_replicate() for placement in self.placements)
608
+
609
+ def shallow_copy_with_tensor_meta(
610
+ self, tensor_meta: Optional[TensorMeta]
611
+ ) -> "DTensorSpec":
612
+ """
613
+ Shallow copy the DTensorSpec with a new tensor_meta.
614
+ """
615
+ assert tensor_meta is not None, "shallow copy with no tensor_meta!"
616
+ return DTensorSpec(
617
+ self.mesh,
618
+ self.placements,
619
+ tensor_meta=tensor_meta,
620
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/random.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import contextlib
3
+ import warnings
4
+ from typing import Dict, List, Optional
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+
9
+ from torch import Tensor
10
+ from torch.distributed._tensor.placement_types import DTensorSpec, Shard
11
+ from torch.distributed.device_mesh import _get_device_handle, DeviceMesh
12
+
13
+
14
+ _rng_tracker: Optional["RNGStateTracker"] = None
15
+
16
+
17
+ def is_rng_supported_mesh(device_mesh: DeviceMesh) -> bool:
18
+ """Checks if the current device of `device_mesh` supports DTensor's random APIs.
19
+ Currently DTensor Random APIs only supports cuda/cuda-like devices. We suggest
20
+ users call this API to test the availability before using our random APIs.
21
+
22
+ Args:
23
+ device_mesh (:class:`DeviceMesh`): The device mesh on which we check if the
24
+ random ops APIs are supported.
25
+
26
+ Returns:
27
+ A bool value. True if `device_mesh` supports DTensor Random APIs; False otherwise.
28
+
29
+ .. warning::
30
+ Currently we only support correct RNG on cuda/cuda-like devices.
31
+ """
32
+ device_handle = _get_device_handle(device_mesh.device_type)
33
+ if device_handle and hasattr(device_handle, "set_rng_state"):
34
+ return True
35
+ else:
36
+ warnings.warn(
37
+ f"DTensor random operators may not have complete support on {device_mesh.device_type} device mesh"
38
+ )
39
+ return False
40
+
41
+
42
+ def manual_seed(seed: int, device_mesh: DeviceMesh) -> None:
43
+ """Sets the seed for generating random numbers for the calling rank.
44
+
45
+ Args:
46
+ seed (int): The desired seed.
47
+ device_mesh (:class:`DeviceMesh`): The device mesh to set the seed.
48
+
49
+ Returns:
50
+ None
51
+
52
+ .. warning::
53
+ When calling this function, :func:`manual_seed` must be called from all ranks of the
54
+ default `ProcessGroup` even if some ranks may not be a part of the `device_mesh`,
55
+ with the same `seed` value.
56
+ If ``device_mesh`` is a sub-mesh and the calling rank is not a part of it,
57
+ `manual_seed` will not set its GPU device's generator seed.
58
+ Current implementation only supports a GPU device mesh.
59
+ """
60
+ device_handle = _get_device_handle(device_mesh.device_type)
61
+ if not device_handle:
62
+ raise NotImplementedError(
63
+ f"DTensor randomness only supports cuda/cuda-like device type, but got {device_mesh.device_type}"
64
+ )
65
+
66
+ # allgather the seed over the default PG
67
+ object_list = [seed] * dist.get_world_size()
68
+ dist.all_gather_object(object_list, seed)
69
+ for rank, object in enumerate(object_list):
70
+ if seed != int(object):
71
+ raise RuntimeError(
72
+ f"calling manual_seed function over {device_mesh} but received different seed values on ranks:",
73
+ f"seed on rank {dist.get_rank()} is {seed}, and seed on rank {rank} is {object}!",
74
+ )
75
+ # instantiate a RNG tracker if haven't. By default DTensor uses an
76
+ # OffsetBasedRNGTracker to perform random operators.
77
+ global _rng_tracker
78
+ if not _rng_tracker:
79
+ _rng_tracker = OffsetBasedRNGTracker(device_mesh.device_type)
80
+
81
+ # the current rank is in mesh
82
+ if device_mesh.get_coordinate() is not None:
83
+ if isinstance(_rng_tracker, TensorParallelRNGTracker):
84
+ _rng_tracker._manual_seed(device_mesh, seed)
85
+ elif isinstance(_rng_tracker, OffsetBasedRNGTracker):
86
+ _rng_tracker._manual_seed(seed)
87
+ else:
88
+ raise RuntimeError(
89
+ f"Unknown type of cuda RNG state tracker: _rng_tracker = {_rng_tracker}"
90
+ )
91
+
92
+
93
+ class RNGStateTracker:
94
+ """
95
+ RNGStateTracker stores Random Number Generator (RNG) state (a ByteTensor object)
96
+ in a dict, mapping from a corresponding tag to each state tensor. It also provides
97
+ a set of convenient utility methods to help access/modify the state tensors. The most
98
+ important interface is _distribute_region which will be used when DTensor executes
99
+ a random op (an operator that calls RNG).
100
+ """
101
+
102
+ def __init__(self, device_type: str = "cuda"):
103
+ self._device_type = device_type
104
+ self._device_handle = _get_device_handle(device_type)
105
+ if not (self._device_handle and self._device_handle.is_available()):
106
+ raise RuntimeError(
107
+ f"{self.__class__.__name__} instantiation requires the presence of CUDA/CUDA-like device"
108
+ )
109
+
110
+ self._states: Dict[str, Tensor] = {}
111
+ self._devices = [self._device_handle.current_device()]
112
+ self._use_distribute_region = True
113
+
114
+ @property
115
+ def rng_states(self) -> Dict[str, Tensor]:
116
+ return self._states
117
+
118
+ @property
119
+ def distribute_region_enabled(self) -> bool:
120
+ return self._use_distribute_region
121
+
122
+ @distribute_region_enabled.setter
123
+ def distribute_region_enabled(self, value) -> None:
124
+ self._use_distribute_region = value
125
+
126
+ def rng_state_is_sync(self, name) -> bool:
127
+ return name in self.rng_states
128
+
129
+ def get_seed(self, name: str) -> int:
130
+ if name not in self.rng_states:
131
+ raise RuntimeError(
132
+ f"{self.__class__.__name__} does not have random state for {name}"
133
+ )
134
+
135
+ seed_tensor = (self.rng_states[name])[0:8].view(dtype=torch.int64)
136
+ return int(seed_tensor.item())
137
+
138
+ def set_seed(self, name: str, seed: int) -> None:
139
+ seed_tensor = torch.tensor([seed]).view(torch.uint8)
140
+ offset_tensor = torch.tensor([0]).view(torch.uint8)
141
+ self.rng_states[name] = torch.cat([seed_tensor, offset_tensor])
142
+
143
+ def _distribute_region(self, spec: DTensorSpec):
144
+ pass
145
+
146
+
147
+ class OffsetBasedRNGTracker(RNGStateTracker):
148
+ """
149
+ This subclass of `RNGStateTracker` defines the default policy of how RNG states
150
+ should be shared and synchronized among all ranks to respect the semantics of DTensor
151
+ random operators.
152
+ """
153
+
154
+ def __init__(self, device_type: str = "cuda"):
155
+ super().__init__(device_type)
156
+ # synchronize RNG state using rank 0's current one
157
+ rng_state = self._device_handle.get_rng_state().to(device_type)
158
+ dist.broadcast(rng_state, 0)
159
+ self.rng_states["parallel-rng"] = rng_state.to("cpu")
160
+
161
+ def _manual_seed(self, parallel_seed: int) -> None:
162
+ self.set_seed("parallel-rng", parallel_seed)
163
+
164
+ @contextlib.contextmanager
165
+ def _distribute_region(self, spec: DTensorSpec):
166
+ # check if the parallel rng state has been synchronized or not
167
+ if not self.rng_state_is_sync("parallel-rng"):
168
+ raise RuntimeError(
169
+ "OffsetBasedRNGTracker requires the random state to be synchronized "
170
+ "before entering into a distribute region!"
171
+ )
172
+
173
+ if self.distribute_region_enabled:
174
+ old_offset = self.get_offset("parallel-rng")
175
+ self._set_pre_op_offset(spec)
176
+ with torch.random.fork_rng(self._devices, device_type=self._device_type):
177
+ self._device_handle.set_rng_state(self.rng_states["parallel-rng"])
178
+ try:
179
+ yield # execute the region code
180
+ finally:
181
+ # update offset to synchronize among ranks
182
+ self._set_post_op_offset(spec, old_offset)
183
+ else:
184
+ yield
185
+
186
+ def get_offset(self, name: str) -> int:
187
+ if name not in self.rng_states:
188
+ raise RuntimeError(
189
+ f"{self.__class__.__name__} does not have random state for {name}"
190
+ )
191
+
192
+ offset_tensor = (self.rng_states[name])[8:].view(dtype=torch.int64)
193
+ return int(offset_tensor.item())
194
+
195
+ def set_offset(self, name: str, offset: int) -> None:
196
+ if name not in self.rng_states:
197
+ raise RuntimeError(
198
+ f"{self.__class__.__name__} does not have random state for {name}"
199
+ )
200
+
201
+ seed_tensor = (self.rng_states[name])[0:8]
202
+ offset_tensor = torch.tensor([offset]).view(torch.uint8)
203
+ self.rng_states[name] = torch.cat([seed_tensor, offset_tensor])
204
+
205
+ def _set_pre_op_offset(self, spec: DTensorSpec) -> None:
206
+ """Set the starting RNG offset for current device's local shard before actual
207
+ op execution. The pre_op_offset value should start from the current RNG offset
208
+ and increment by the size of local shard until it reaches the size of the whole
209
+ DTensor. For different ranks that hold the same DTensor shard, their pre_op_offset
210
+ will be the same.
211
+
212
+ Args:
213
+ spec (:class:`DTensorSpec`): the spec of the DTensor object on which
214
+ we prepare the offset for running random ops.
215
+
216
+ Returns:
217
+ None
218
+
219
+ .. warning::
220
+ Note that, current implementation does not consider DTensor's continguity.
221
+
222
+ Example:
223
+ take a DTensor of shape [8, 16] as an example. Assume that the DTensor
224
+ is placed on a device mesh with placements ([Shard(1), Replicate(), Shard(0)]),
225
+ and the mesh is:
226
+ [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
227
+ ``spec.mesh.get_coordinate()`` provides the coordinate of the current rank
228
+ in the mesh. For example, the coordinate of rank 5 is (1, 0, 1).
229
+
230
+ Another concept to introduce besides rank coordinate is shard coordinate.
231
+ Each rank holds a local shard of the DTensor. In the example, the DTensor
232
+ is partitioned into 4 [4, 8] shards. The first shard has 2 replicas and
233
+ rank 0 (coord (0, 0, 0)) and rank 2 (coord (0, 1, 0)) have 1 replica each.
234
+ That being said, the local shard on rank 0 and rank 2 correspond to the same
235
+ shard of the DTensor. To denote each DTensor shard, we use a shard coordinate
236
+ (in the example, it will be a tuple (i, j) where shard (i, j) has the slice
237
+ DTensor[4 * i : 4 * (i + 1), 8 * j : 8 * (j + 1)], 0 <= i < 2, 0 <= j < 2).
238
+
239
+ Once we have rank coordinate and shard coordinate, we can calculate on each rank
240
+ what shard of the DTensor the rank holds, with the help of dim_map. The dim_map
241
+ of the above DTensor is [2, 0] so the shard coordinate of a rank with rank coord
242
+ (x, y, z) is simply (z, x) by taking(rank_coord[dim_map[0]],rank_coord[dim_map[1]]).
243
+ Following this calculation,
244
+ rank 0 and rank 2 holds the shard of coord (0, 0);
245
+ rank 1 and rank 3 holds the shard of coord (0, 1);
246
+ rank 4 and rank 6 holds the shard of coord (1, 0);
247
+ rank 5 and rank 7 holds the shard of coord (1, 1);
248
+
249
+ The last value to calculate before obtaining the starting offset is the shard linear index.
250
+ The starting offset for each rank will be its shard_linear_index * local_tensor_numel.
251
+ """
252
+ dtensor_shape = spec.shape
253
+ mesh = spec.mesh
254
+ dim_map = spec.dim_map
255
+
256
+ # Compute shard coordinate:
257
+ # The coordinate on each tensor dim is a tuple (idx, range)
258
+ # If a DTensor is partitioned on its dim i into n shards, and the current rank
259
+ # holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i
260
+ coordinate = mesh.get_coordinate()
261
+ assert coordinate is not None
262
+ shard_coord = [
263
+ coordinate[mesh_dim] if mesh_dim >= 0 else 0 for mesh_dim in dim_map
264
+ ]
265
+ shard_size = [
266
+ mesh.size(mesh_dim) if mesh_dim >= 0 else 1 for mesh_dim in dim_map
267
+ ]
268
+
269
+ # compute shard linear index
270
+ shard_linear_idx = self._calc_shard_linear_idx(shard_coord, shard_size)
271
+
272
+ # compute starting offset using the first shard's size
273
+ local_size_on_rank_0 = list(dtensor_shape)
274
+ for idx, placement in enumerate(spec.placements):
275
+ if isinstance(placement, Shard):
276
+ mesh_dim_size = mesh.size(idx)
277
+ shard_dim = placement.dim
278
+ local_size_on_rank_0[shard_dim] = placement._local_shard_size_on_dim(
279
+ dtensor_shape[shard_dim],
280
+ mesh_dim_size,
281
+ 0,
282
+ return_offset=False,
283
+ )[0]
284
+
285
+ from torch.distributed._tensor.ops.utils import prod
286
+
287
+ local_size = prod(local_size_on_rank_0)
288
+
289
+ # get current RNG offset
290
+ current_offset = self.get_offset("parallel-rng")
291
+
292
+ # pytorch: offset must be multiple of 4
293
+ # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
294
+ offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4
295
+ self.set_offset("parallel-rng", current_offset + offset_incr)
296
+
297
+ def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None:
298
+ """Sets the RNG to a synchronized state after running the local random op. Every
299
+ rank should set its RNG offset to `old_offset + DTensor.numel()` where old_offset is
300
+ the offset before calling `set_pre_op_offset` i.e. the offset before running DTensor
301
+ random ops.
302
+
303
+ Args:
304
+ spec (:class:`DTensorSpec`): the spec of the DTensor object on which
305
+ we post-process the offset for running random ops.
306
+
307
+ Returns:
308
+ None
309
+ """
310
+ dtensor_shape = spec.shape
311
+
312
+ from torch.distributed._tensor.ops.utils import prod
313
+
314
+ numel = prod(dtensor_shape)
315
+ # pytorch: offset must be multiple of 4
316
+ # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
317
+ numel = (numel + 3) // 4 * 4
318
+ self.set_offset("parallel-rng", old_offset + numel)
319
+
320
+ def _calc_shard_linear_idx(
321
+ self, shard_coord: List[int], shard_size: List[int]
322
+ ) -> int:
323
+ # compute shard linear index
324
+ shard_linear_idx = 0
325
+ shard_coord_stride = 1
326
+ for idx, size in zip(reversed(shard_coord), reversed(shard_size)):
327
+ shard_linear_idx += idx * shard_coord_stride
328
+ shard_coord_stride *= size
329
+
330
+ return shard_linear_idx
331
+
332
+
333
+ class TensorParallelRNGTracker(RNGStateTracker):
334
+ def __init__(self, device_type: str = "cuda"):
335
+ super().__init__(device_type)
336
+ # copy the default RNG state
337
+ self.rng_states["tensor-parallel-rng"] = self._device_handle.get_rng_state()
338
+
339
+ def _manual_seed(
340
+ self,
341
+ tp_mesh: DeviceMesh,
342
+ base_seed: int = 1234,
343
+ ):
344
+ tensor_parallel_rank = tp_mesh.get_local_rank()
345
+ # this magic number 2718 comes from Megatron's code
346
+ # (https://github.com/NVIDIA/Megatron-LM/blob/060415572f4365a2e895f8036c4e37dad0efbdf5/megatron/core/tensor_parallel/random.py#L162-L163)
347
+ MegatronMagicNum = 2718
348
+ tensor_parallel_seed = base_seed + MegatronMagicNum + tensor_parallel_rank
349
+ self.set_seed("tensor-parallel-rng", tensor_parallel_seed)
350
+
351
+ @contextlib.contextmanager
352
+ def _distribute_region(self, spec: DTensorSpec):
353
+ # check if the tensor parallel rng state has been synchronized or not
354
+ if not self.rng_state_is_sync("tensor-parallel-rng"):
355
+ raise RuntimeError(
356
+ "TensorParallelRNGTracker requires the random state to be synchronized "
357
+ "before entering into a distribute region!"
358
+ )
359
+
360
+ if self.distribute_region_enabled:
361
+ with torch.random.fork_rng(self._devices, device_type=self._device_type):
362
+ self._device_handle.set_rng_state(
363
+ self.rng_states["tensor-parallel-rng"]
364
+ )
365
+ try:
366
+ yield
367
+ finally:
368
+ self.rng_states[
369
+ "tensor-parallel-rng"
370
+ ] = self._device_handle.get_rng_state()
371
+ else:
372
+ yield
venv/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from functools import lru_cache
3
+ from typing import cast, Dict, List, NamedTuple, Tuple
4
+
5
+ import torch
6
+ import torch.distributed._functional_collectives as funcol
7
+ import torch.distributed._tensor.api as dtensor
8
+ from torch.distributed._tensor.device_mesh import DeviceMesh
9
+ from torch.distributed._tensor.placement_types import (
10
+ _Partial,
11
+ DTensorSpec,
12
+ Placement,
13
+ Replicate,
14
+ Shard,
15
+ )
16
+
17
+
18
+ class _TransformInfo(NamedTuple):
19
+ mesh_dim: int
20
+ src_dst_placements: Tuple[Placement, Placement]
21
+ # logical_shape on this mesh dimension
22
+ logical_shape: List[int]
23
+
24
+
25
+ def _replicate_then_shard(val: _TransformInfo) -> int:
26
+ """
27
+ This is a helper function to allow reordering _TransformInfo list. The high level
28
+ idea is that we want to reorder the sharding redistributions so that the DTensor
29
+ redistribution is consistent with its full tensor. This is built on top of two simple
30
+ assumptions:
31
+ 1. Replication happens from inner to outer dimension. i.e. Shard -> Replicate
32
+ 2. Sharding happens from outer to inner dimension, i.e. Replicate -> Shard
33
+
34
+ So we always put the replication first and put sharding later.
35
+ """
36
+ mesh_dim = val.mesh_dim
37
+ src, dst = val.src_dst_placements
38
+ if (dst.is_replicate() or dst.is_partial()) and src.is_shard():
39
+ return -mesh_dim
40
+ elif (src.is_replicate() or src.is_partial()) and dst.is_shard():
41
+ return mesh_dim
42
+ else:
43
+ return 0
44
+
45
+
46
+ @lru_cache(maxsize=None)
47
+ def _gen_transform_infos(
48
+ src_spec: DTensorSpec,
49
+ dst_spec: DTensorSpec,
50
+ ) -> List[_TransformInfo]:
51
+ """
52
+ Generate the transform infos from the source placements to the target placements, to
53
+ transform from source to target placement it might have multipl steps, i.e. it might
54
+ decompose Si -> Sj into Si -> R -> Sj.
55
+ This would detects if there're mis-aligned shardings between src/dst placements.
56
+ i.e. (Shard(0), Shard(0)) -> (Replicate(), Shard(0)), in this case Shard(0) -> Shard(0)
57
+ for mesh dimension 1 actually needs reshard, because in the first case it's a sub-sharding
58
+ of an already tensor dimension 0, and in the second case, it's the first sharding on tensor
59
+ dimension 0.
60
+
61
+ Note that we also currently handles sharding on different tensor dimensions, e.g.
62
+ Shard(0) -> Shard(1) in this pass
63
+ """
64
+ src_dim_counts: Dict[int, int] = {}
65
+ dst_dim_counts: Dict[int, int] = {}
66
+ transform_infos: List[_TransformInfo] = []
67
+
68
+ src_placements = src_spec.placements
69
+ dst_placements = dst_spec.placements
70
+ device_mesh = src_spec.device_mesh
71
+ my_coordinate = device_mesh.get_coordinate()
72
+ assert my_coordinate is not None
73
+
74
+ # logical shape records the logic tensor shape on the mesh dimension
75
+ # this is useful to ensure uneven sharding gets correct output shape
76
+ initial_logical_shape = list(src_spec.shape)
77
+ mesh_dims_to_logical_shape = [initial_logical_shape]
78
+ mesh_ndim = len(src_placements)
79
+
80
+ for i, (src, dst) in enumerate(zip(src_placements, dst_placements)):
81
+ # detect mis-aligned sharding and build logical shapes
82
+ current_logical_shape = mesh_dims_to_logical_shape[i]
83
+ if isinstance(src, Shard):
84
+ src_dim_counts[src.dim] = src_dim_counts.get(src.dim, 0) + 1
85
+
86
+ if i < mesh_ndim - 1:
87
+ # calculate and save the logical shape for this sharding
88
+ mesh_dim_size = device_mesh.size(mesh_dim=i)
89
+ local_shard_size, _ = src._local_shard_size_on_dim(
90
+ current_logical_shape[src.dim],
91
+ mesh_dim_size,
92
+ my_coordinate[i],
93
+ )
94
+ new_logical_shape = list(current_logical_shape)
95
+ new_logical_shape[src.dim] = local_shard_size
96
+ mesh_dims_to_logical_shape.append(new_logical_shape)
97
+ else:
98
+ mesh_dims_to_logical_shape.append(current_logical_shape)
99
+
100
+ if isinstance(dst, Shard):
101
+ dst_dim_counts[dst.dim] = dst_dim_counts.get(dst.dim, 0) + 1
102
+
103
+ if (
104
+ isinstance(src, Shard)
105
+ and isinstance(dst, Shard)
106
+ and (
107
+ src.dim != dst.dim or src_dim_counts[src.dim] != dst_dim_counts[dst.dim]
108
+ )
109
+ ):
110
+ # decompose Shard(i) -> Shard(j) into Shard(i) -> Replicate() -> Shard(j)
111
+ transform_infos.append(
112
+ _TransformInfo(
113
+ mesh_dim=i,
114
+ src_dst_placements=(src, Replicate()),
115
+ logical_shape=mesh_dims_to_logical_shape[i],
116
+ )
117
+ )
118
+ transform_infos.append(
119
+ _TransformInfo(
120
+ mesh_dim=i,
121
+ src_dst_placements=(Replicate(), dst),
122
+ logical_shape=mesh_dims_to_logical_shape[i],
123
+ )
124
+ )
125
+ else:
126
+ transform_infos.append(
127
+ _TransformInfo(
128
+ mesh_dim=i,
129
+ src_dst_placements=(src, dst),
130
+ logical_shape=mesh_dims_to_logical_shape[i],
131
+ )
132
+ )
133
+
134
+ # sort the pairs by first perform replication then sharding
135
+ transform_infos.sort(key=_replicate_then_shard)
136
+ return transform_infos
137
+
138
+
139
+ def redistribute_local_tensor(
140
+ local_tensor: torch.Tensor,
141
+ current_spec: DTensorSpec,
142
+ target_spec: DTensorSpec,
143
+ *,
144
+ async_op: bool = False,
145
+ is_backward: bool = False,
146
+ ) -> torch.Tensor:
147
+ """
148
+ This redistribute the local tensor (torch.Tensor) from the current DTensorSpec to
149
+ the target DTensorSpec, which involves the necessary collective calls to transform
150
+ the local shard of the DTensor from its current spec to the target spec.
151
+ """
152
+
153
+ if current_spec.mesh != target_spec.mesh:
154
+ # TODO: alltoall/permute reshuffling to change device_mesh if they are not the same
155
+ raise NotImplementedError("Cross device mesh comm not supported yet!")
156
+
157
+ new_local_tensor = None
158
+ device_mesh = current_spec.mesh
159
+
160
+ my_coordinate = device_mesh.get_coordinate()
161
+
162
+ if my_coordinate is None:
163
+ # if rank is not part of mesh, we skip redistribute and simply return local_tensor,
164
+ # which should be an empty tensor
165
+ return local_tensor
166
+
167
+ transform_infos = _gen_transform_infos(current_spec, target_spec)
168
+
169
+ for transform_info in transform_infos:
170
+ i = transform_info.mesh_dim
171
+ current, target = transform_info.src_dst_placements
172
+ num_chunks = device_mesh.size(mesh_dim=i)
173
+
174
+ if current == target:
175
+ # short cut, just use the original local tensor
176
+ new_local_tensor = local_tensor
177
+ continue
178
+
179
+ if target.is_replicate():
180
+ # Case 1: target is Replicate
181
+ if current.is_partial():
182
+ partial_spec = cast(_Partial, current)
183
+ new_local_tensor = partial_spec._reduce_value(
184
+ local_tensor, device_mesh, i
185
+ )
186
+ elif current.is_shard():
187
+ current_placement = cast(Shard, current)
188
+ new_local_tensor = current_placement._to_replicate_tensor(
189
+ local_tensor, device_mesh, i, transform_info.logical_shape
190
+ )
191
+ else:
192
+ raise RuntimeError(
193
+ f"redistribute from {current} to {target} not supported yet"
194
+ )
195
+ elif target.is_shard():
196
+ # Case 2: target is Shard
197
+ target_placement = cast(Shard, target)
198
+ target_dim = target_placement.dim
199
+ if current.is_partial():
200
+ partial_spec = cast(_Partial, current)
201
+ new_local_tensor = partial_spec._reduce_shard_value(
202
+ local_tensor, device_mesh, i, target_placement
203
+ )
204
+ elif current.is_replicate():
205
+ # split the tensor and return the corresponding cloned local shard
206
+ new_local_tensor = target_placement._replicate_to_shard(
207
+ local_tensor, device_mesh, i, my_coordinate[i]
208
+ )
209
+ else:
210
+ # NOTE: we don't support this case efficiently yet, the fallback path we are going here is
211
+ # to decompose Shard(0) -> Shard(1) into Shard(0) -> Replicate -> Shard(1)
212
+ # TODO: enable this with all_to_all
213
+ assert (
214
+ current.is_shard()
215
+ ), f"Current placement should be shard but found {current}"
216
+ shard_spec = cast(Shard, current)
217
+ if shard_spec.dim != target_placement.dim:
218
+ new_local_tensor = shard_spec._to_replicate_tensor(
219
+ local_tensor, device_mesh, i, transform_info.logical_shape
220
+ )
221
+ shards, _ = target_placement._split_tensor(
222
+ new_local_tensor,
223
+ num_chunks,
224
+ with_padding=False,
225
+ contiguous=False,
226
+ )
227
+ new_local_tensor = shards[my_coordinate[i]]
228
+ elif target.is_partial():
229
+ if current.is_replicate():
230
+ partial_spec = cast(_Partial, target)
231
+ # skip the replicate to partial transformation when we are in backward pass
232
+ # In this case we keep the grad as replicate, this is because we don't
233
+ # want to convert the replicated gradients back to partial, although
234
+ # that's logically conform with the same layout, converting the gradients
235
+ # back to partial is actually useless as you would have to do reduce later
236
+ # which would be more expensive than keeping it replicate! For this reason,
237
+ # we keep the replicate grad here.
238
+ new_local_tensor = (
239
+ partial_spec._partition_value(local_tensor, device_mesh, i)
240
+ if not is_backward
241
+ else local_tensor
242
+ )
243
+ elif current.is_shard():
244
+ if not is_backward:
245
+ raise RuntimeError(
246
+ f"redistribute from {current} to {target} not supported yet"
247
+ )
248
+ # for backward shard -> partial, we just need to convert the shard to replicate
249
+ current_placement = cast(Shard, current)
250
+ new_local_tensor = current_placement._to_replicate_tensor(
251
+ local_tensor, device_mesh, i, transform_info.logical_shape
252
+ )
253
+ else:
254
+ # partial -> partial no op, should never hit
255
+ new_local_tensor = local_tensor
256
+
257
+ assert new_local_tensor is not None
258
+ local_tensor = new_local_tensor
259
+
260
+ assert new_local_tensor is not None, "redistribute failed!"
261
+
262
+ if not async_op and isinstance(new_local_tensor, funcol.AsyncCollectiveTensor):
263
+ new_local_tensor = new_local_tensor.wait()
264
+
265
+ return new_local_tensor
266
+
267
+
268
+ class Redistribute(torch.autograd.Function):
269
+ @staticmethod
270
+ def forward( # type: ignore[override]
271
+ # pyre-fixme[2]: Parameter must be annotated.
272
+ ctx,
273
+ input: "dtensor.DTensor",
274
+ device_mesh: DeviceMesh,
275
+ placements: Tuple[Placement, ...],
276
+ async_op: bool = False,
277
+ ):
278
+ current_spec = input._spec
279
+ ctx.current_spec = current_spec
280
+ ctx.async_op = async_op
281
+ target_spec = DTensorSpec(
282
+ device_mesh, placements, tensor_meta=input._spec.tensor_meta
283
+ )
284
+
285
+ local_tensor = input._local_tensor
286
+ output = redistribute_local_tensor(
287
+ local_tensor, current_spec, target_spec, async_op=async_op
288
+ )
289
+
290
+ return dtensor.DTensor(
291
+ output,
292
+ device_mesh,
293
+ target_spec.placements,
294
+ shape=input.shape,
295
+ dtype=input.dtype,
296
+ requires_grad=input.requires_grad,
297
+ stride=input.stride(),
298
+ )
299
+
300
+ @staticmethod
301
+ def backward(ctx, grad_output: "dtensor.DTensor"): # type: ignore[override]
302
+ previous_spec = ctx.current_spec
303
+ current_spec = grad_output._spec
304
+ async_op = ctx.async_op
305
+
306
+ local_tensor = grad_output._local_tensor
307
+ output = redistribute_local_tensor(
308
+ local_tensor,
309
+ current_spec,
310
+ previous_spec,
311
+ async_op=async_op,
312
+ is_backward=True,
313
+ )
314
+ # normalize the target placement to replicate if it is partial
315
+ normalized_placements: List[Placement] = []
316
+ for previous_placement in previous_spec.placements:
317
+ if previous_placement.is_partial():
318
+ # keep target placement to replicate instead of partial in this case
319
+ normalized_placements.append(Replicate())
320
+ else:
321
+ normalized_placements.append(previous_placement)
322
+ output_dtensor = dtensor.DTensor(
323
+ output,
324
+ previous_spec.mesh,
325
+ tuple(normalized_placements),
326
+ shape=grad_output.shape,
327
+ dtype=grad_output.dtype,
328
+ requires_grad=grad_output.requires_grad,
329
+ stride=grad_output.stride(),
330
+ )
331
+
332
+ return (
333
+ output_dtensor,
334
+ None,
335
+ None,
336
+ None,
337
+ )
venv/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from itertools import chain
3
+ from typing import Callable, cast, Dict, List, Optional, Sequence, Union
4
+
5
+ import torch
6
+ from torch._ops import OpOverload
7
+ from torch._subclasses import FakeTensorMode
8
+ from torch.distributed._tensor._utils import try_find_mesh_from_args
9
+ from torch.distributed._tensor.op_schema import (
10
+ DTensorSpec,
11
+ OpInfo,
12
+ OpSchema,
13
+ OpStrategy,
14
+ OutputSharding,
15
+ OutputSpecType,
16
+ PlacementStrategy,
17
+ RuntimeSchemaInfo,
18
+ StrategyType,
19
+ TupleStrategy,
20
+ )
21
+ from torch.distributed._tensor.placement_types import TensorMeta
22
+ from torch.distributed.device_mesh import DeviceMesh
23
+
24
+ aten = torch.ops.aten
25
+
26
+
27
+ def _length(obj) -> int:
28
+ if obj is None:
29
+ return 0
30
+ if not isinstance(obj, Sequence):
31
+ return 1
32
+ return len(obj)
33
+
34
+
35
+ class ShardingPropagator:
36
+ def __init__(self) -> None:
37
+ self.op_to_rules: Dict[OpOverload, Callable[[OpSchema], OutputSharding]] = {}
38
+ self.op_strategy_funcs: Dict[
39
+ OpOverload,
40
+ Callable[[DeviceMesh, OpSchema], StrategyType],
41
+ ] = {}
42
+ # op map to save static argnum to decide to reuse sharding prop cache or re-run sharding prop
43
+ self.op_to_schema_info: Dict[OpOverload, RuntimeSchemaInfo] = {}
44
+ self.propagate_op_sharding = lru_cache(None)(self.propagate_op_sharding_non_cached) # type: ignore[method-assign]
45
+
46
+ def register_sharding_prop_rule(
47
+ self,
48
+ op_overload: OpOverload,
49
+ rule_func: Callable[[OpSchema], OutputSharding],
50
+ schema_info: Optional[RuntimeSchemaInfo] = None,
51
+ ):
52
+ """
53
+ Register a sharding propagation rule for an operator.
54
+ """
55
+ self.op_to_rules[op_overload] = rule_func
56
+ if schema_info is not None:
57
+ self.op_to_schema_info[op_overload] = schema_info
58
+
59
+ def register_op_strategy(
60
+ self,
61
+ op_overload: OpOverload,
62
+ strategy_func: Callable[[DeviceMesh, OpSchema], StrategyType],
63
+ schema_info: Optional[RuntimeSchemaInfo] = None,
64
+ ):
65
+ """
66
+ Register a sharding strategy generator for an operator.
67
+ """
68
+ self.op_strategy_funcs[op_overload] = strategy_func
69
+ if schema_info is not None:
70
+ self.op_to_schema_info[op_overload] = schema_info
71
+
72
+ @lru_cache
73
+ def _propagate_tensor_meta(
74
+ self, op_schema: OpSchema
75
+ ) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]:
76
+ """
77
+ Propagate the tensor metadata, it could either return a TensorMeta
78
+ or a list/tuple of TensorMetas
79
+ """
80
+ if op_schema.op == aten.equal.default:
81
+ # data dependent ops can't be used for fake propagation
82
+ return None
83
+
84
+ # NOTE: We must call the tracing in fake tensor mode so that it
85
+ # avoids materializing memory
86
+ with FakeTensorMode():
87
+ fake_args = op_schema.gen_fake_args()
88
+ fake_kwargs = op_schema.gen_fake_kwargs()
89
+ fake_out = op_schema.op(*fake_args, **fake_kwargs)
90
+
91
+ if isinstance(fake_out, torch.Tensor):
92
+ return TensorMeta(
93
+ shape=fake_out.shape, stride=fake_out.stride(), dtype=fake_out.dtype
94
+ )
95
+
96
+ elif isinstance(fake_out, (tuple, list)):
97
+ tensor_meta_list: List[Optional[TensorMeta]] = []
98
+ for fake_out_item in fake_out:
99
+ if isinstance(fake_out_item, torch.Tensor):
100
+ tensor_meta_list.append(
101
+ TensorMeta(
102
+ shape=fake_out_item.shape,
103
+ stride=fake_out_item.stride(),
104
+ dtype=fake_out_item.dtype,
105
+ )
106
+ )
107
+ else:
108
+ tensor_meta_list.append(None)
109
+ return (
110
+ tuple(tensor_meta_list)
111
+ if isinstance(fake_out, tuple)
112
+ else tensor_meta_list
113
+ )
114
+ else:
115
+ # if fake is not a tensor or tuple of tensor, return as none
116
+ return None
117
+
118
+ def _wrap_output_spec_tensor_meta(
119
+ self,
120
+ op: OpOverload,
121
+ output_specs: OutputSpecType,
122
+ output_tensor_meta: Union[None, TensorMeta, Sequence[Optional[TensorMeta]]],
123
+ ) -> None:
124
+ """
125
+ Wrap the output_specs with the tensor metadata from the output.
126
+ """
127
+
128
+ if isinstance(output_specs, DTensorSpec):
129
+ if not isinstance(output_tensor_meta, TensorMeta):
130
+ # Either error due to ShardingPropagator or due to incorrect OutputSpec
131
+ if not isinstance(output_tensor_meta, (tuple, list)):
132
+ raise ValueError(
133
+ "ShardingPropagator error: output does not have an associated TensorMeta"
134
+ )
135
+ raise ValueError(
136
+ f"For the op {op.name()}, `output_specs` has 1 output which does not equal the "
137
+ f"number of op outputs: {len(output_tensor_meta)}."
138
+ )
139
+ output_specs.tensor_meta = output_tensor_meta
140
+ elif isinstance(output_specs, (tuple, list)):
141
+ if not isinstance(output_tensor_meta, (tuple, list)) or len(
142
+ output_specs
143
+ ) != len(output_tensor_meta):
144
+ raise ValueError(
145
+ f"For the op {op.name()}, `output_specs` has {len(output_specs)} outputs which does not equal the "
146
+ f"number of op outputs {_length(output_tensor_meta)}."
147
+ )
148
+ for i, spec in enumerate(output_specs):
149
+ if isinstance(spec, DTensorSpec):
150
+ output_tensor_meta_i = output_tensor_meta[i]
151
+ if not isinstance(output_tensor_meta_i, TensorMeta):
152
+ raise ValueError(
153
+ f"ShardingPropagator error: output {i} does not have an associated TensorMeta"
154
+ )
155
+ spec.tensor_meta = output_tensor_meta_i
156
+
157
+ def propagate(self, op_info: OpInfo) -> None:
158
+ # We cannot use an lru cache if we know that inputs will have dynamic shapes,
159
+ # because SymInts are not hashable.
160
+ # This is generally ok because this only happens during tracing in torch.compile,
161
+ # and tracing does not need to be as fast as eagermode DTensor usages.
162
+ if op_info.schema.has_symints:
163
+ output_sharding = self.propagate_op_sharding_non_cached(op_info.schema)
164
+ else:
165
+ output_sharding = self.propagate_op_sharding(op_info.schema)
166
+ op_info.output_sharding = output_sharding
167
+
168
+ def propagate_op_sharding_non_cached(self, op_schema: OpSchema) -> OutputSharding:
169
+ """
170
+ Propagate the sharding for an operator given the op_schema.
171
+ """
172
+ # special case op, we don't need to propagate for local
173
+ # scalar. TODO: figure out a better way to handle this
174
+ if op_schema.op is aten._local_scalar_dense.default:
175
+ return OutputSharding(None, [op_schema])
176
+
177
+ out_tensor_meta = self._propagate_tensor_meta(op_schema)
178
+
179
+ def spec_to_strategy(spec: object) -> object:
180
+ if isinstance(spec, DTensorSpec):
181
+ return OpStrategy([PlacementStrategy(spec)])
182
+ elif (
183
+ isinstance(spec, (list, tuple))
184
+ and len(spec) > 0
185
+ and isinstance(spec[0], DTensorSpec)
186
+ ):
187
+ # tensor list create tuple strategy
188
+ tuple_strategy = [spec_to_strategy(s) for s in spec]
189
+ tuple_strategy = cast(Sequence[StrategyType], tuple_strategy)
190
+ return TupleStrategy(
191
+ tuple(tuple_strategy) if isinstance(spec, tuple) else tuple_strategy
192
+ )
193
+ else:
194
+ return spec
195
+
196
+ if op_schema.op in self.op_strategy_funcs:
197
+ # generate op strategy for the op.
198
+ mesh = try_find_mesh_from_args(op_schema.op, op_schema.args_schema)
199
+ # swap the args spec with args strategies
200
+ args_op_strategy = [spec_to_strategy(i) for i in op_schema.args_schema]
201
+
202
+ kwargs_op_strategy = {
203
+ k: spec_to_strategy(v) for k, v in op_schema.kwargs_schema.items()
204
+ }
205
+
206
+ # construct a new OpSchema on args for strategy based propagation
207
+ strategy_schema: OpSchema = OpSchema(
208
+ op=op_schema.op,
209
+ args_schema=tuple(args_op_strategy),
210
+ kwargs_schema=kwargs_op_strategy,
211
+ )
212
+
213
+ op_strategy = self.op_strategy_funcs[op_schema.op](mesh, strategy_schema)
214
+
215
+ if isinstance(op_strategy, OpStrategy):
216
+ # single Op strategy
217
+ output_strategy = self._select_strategy(op_strategy)
218
+
219
+ # check if we need to redistribute the input
220
+ needs_redistribute = False
221
+ expected_input_specs = []
222
+
223
+ # in case where the op does not specify input_specs and output_specs
224
+ # is a DTensorSpec, we use output_specs as the spec for each DTensor
225
+ # input arg.
226
+ if output_strategy.input_specs is None:
227
+ assert isinstance(output_strategy.output_specs, DTensorSpec)
228
+
229
+ for idx, input_spec in enumerate(op_schema.args_spec):
230
+ desired_spec = (
231
+ output_strategy.output_spec
232
+ if output_strategy.input_specs is None
233
+ else output_strategy.input_specs[idx]
234
+ )
235
+ expected_input_specs.append(desired_spec)
236
+ if input_spec.placements != desired_spec.placements:
237
+ needs_redistribute = True
238
+
239
+ suggestion_schema = None
240
+ if needs_redistribute:
241
+ reshard_schema = OpSchema(
242
+ op_schema.op, tuple(expected_input_specs), {}
243
+ )
244
+ reshard_schema._inplace_rewrap_schema_suggestion(op_schema)
245
+ suggestion_schema = [reshard_schema]
246
+
247
+ # construct output spec for the op
248
+ if op_schema.return_type_tuple_tensor_like():
249
+ # for ops that return multiple tensors and the output_specs is not
250
+ # a tuple, we use a tuple of that single output spec as the new
251
+ # output_specs
252
+ output_specs: OutputSpecType = output_strategy.output_specs
253
+ if isinstance(output_specs, DTensorSpec):
254
+ output_specs = tuple(
255
+ [
256
+ # create a new DTensorSpec with the same placement as the
257
+ # output_specs in output_strategy
258
+ DTensorSpec(
259
+ mesh=output_specs.mesh,
260
+ placements=output_specs.placements,
261
+ tensor_meta=output_specs.tensor_meta,
262
+ )
263
+ for _ in range(len(op_schema.op._schema.returns))
264
+ ]
265
+ )
266
+ elif op_schema.return_type_tensor():
267
+ output_specs = output_strategy.output_specs
268
+ else:
269
+ output_specs = None
270
+
271
+ output_sharding = OutputSharding(
272
+ output_specs,
273
+ suggestion_schema,
274
+ needs_redistribute=needs_redistribute,
275
+ )
276
+ elif isinstance(op_strategy, TupleStrategy):
277
+ # tuple strategy output sharding processing
278
+ # runtime selected placement strategy for each TupleStrategy input arg
279
+ selected_strategies: List[PlacementStrategy] = []
280
+ out_spec_list: List[DTensorSpec] = []
281
+ for strategy in op_strategy.childs:
282
+ assert isinstance(strategy, OpStrategy)
283
+ selected_strategy = self._select_strategy(strategy)
284
+ selected_strategies.append(selected_strategy)
285
+ out_spec_list.append(selected_strategy.output_spec)
286
+
287
+ needs_redistribute = False
288
+ suggestion_args: List[object] = []
289
+ for arg_idx, arg in enumerate(op_schema.args_schema):
290
+ if isinstance(arg, (list, tuple)) and isinstance(
291
+ arg[0], DTensorSpec
292
+ ):
293
+ expected_input_spec_list: List[DTensorSpec] = []
294
+ for idx, arg_spec in enumerate(arg):
295
+ expected_input_spec = selected_strategies[idx].input_spec(
296
+ arg_idx
297
+ )
298
+ expected_input_spec = (
299
+ expected_input_spec.shallow_copy_with_tensor_meta(
300
+ arg_spec.tensor_meta
301
+ )
302
+ )
303
+ if arg_spec.placements != expected_input_spec.placements:
304
+ needs_redistribute = True
305
+ expected_input_spec_list.append(expected_input_spec)
306
+ suggestion_args.append(
307
+ tuple(expected_input_spec_list)
308
+ if isinstance(arg, tuple)
309
+ else expected_input_spec_list
310
+ )
311
+ elif isinstance(arg, DTensorSpec):
312
+ expected_input_spec = selected_strategies[0].input_spec(arg_idx)
313
+ expected_input_spec = (
314
+ expected_input_spec.shallow_copy_with_tensor_meta(
315
+ arg.tensor_meta
316
+ )
317
+ )
318
+ if arg.placements != expected_input_spec.placements:
319
+ needs_redistribute = True
320
+ suggestion_args.append(expected_input_spec)
321
+ else:
322
+ suggestion_args.append(arg)
323
+
324
+ suggestion_schema = None
325
+ if needs_redistribute:
326
+ reshard_schema = OpSchema(
327
+ op_schema.op, tuple(suggestion_args), op_schema.kwargs_schema
328
+ )
329
+ suggestion_schema = [reshard_schema]
330
+
331
+ output_sharding = OutputSharding(
332
+ tuple(out_spec_list) if out_tensor_meta is not None else None,
333
+ suggestion_schema,
334
+ needs_redistribute=needs_redistribute,
335
+ )
336
+ else:
337
+ raise ValueError("Unsupported op strategy type")
338
+
339
+ # associate the output sharding with the output tensor metadata
340
+ self._wrap_output_spec_tensor_meta(
341
+ op_schema.op, output_sharding.output_spec, out_tensor_meta
342
+ )
343
+ return output_sharding
344
+ elif op_schema.op in self.op_to_rules:
345
+ # propagate the sharding with rule
346
+ sharding_prop_func = self.op_to_rules[op_schema.op]
347
+
348
+ # step 1. there's sharding propagation rule, run
349
+ # sharding propagation to get the output sharding
350
+ try:
351
+ output_sharding = sharding_prop_func(op_schema)
352
+ except NotImplementedError as e:
353
+ raise e
354
+ except Exception as e:
355
+ raise RuntimeError(
356
+ f"Sharding propagation failed on op {op_schema}.\n" f"Error: {e}"
357
+ ) from e
358
+
359
+ # step 2. if can't get output_spec from sharding
360
+ # propagation (i.e. no rules apply for input
361
+ # placements), we return the output sharding
362
+ # with schema suggestions, which can be used to
363
+ # decide how to do redistribute on inputs
364
+ if output_sharding.output_spec is None:
365
+ if output_sharding.schema_suggestions is None:
366
+ if output_sharding.failed_reason is not None:
367
+ raise RuntimeError(
368
+ f"Sharding propagation failed on op {op_schema}!"
369
+ f"Failed reason: {output_sharding.failed_reason}"
370
+ )
371
+ else:
372
+ # we do auto redistribute on inputs if necessary
373
+ # to get an eligible input, which we will pick a
374
+ # schema suggestion base on the redistribute cost.
375
+ # For now we simply pick the first suggestion.
376
+ suggested_input_schema = output_sharding.schema_suggestions[0]
377
+ # run sharding propagation again with suggested schema
378
+ propagation_res = sharding_prop_func(suggested_input_schema)
379
+ # we set the output sharding with the new propagation result
380
+ # so that dispatching know both output_spec and schema_suggestions
381
+ # exist, which indicates a reshard is needed
382
+ output_sharding.output_spec = propagation_res.output_spec
383
+ output_sharding.needs_redistribute = True
384
+
385
+ # associate the output sharding with the output tensor metadata
386
+ self._wrap_output_spec_tensor_meta(
387
+ op_schema.op, output_sharding.output_spec, out_tensor_meta
388
+ )
389
+
390
+ return output_sharding
391
+ else:
392
+ raise NotImplementedError(
393
+ f"Operator {op_schema.op} does not have a sharding strategy registered."
394
+ )
395
+
396
+ def _select_strategy(self, strategy: OpStrategy) -> PlacementStrategy:
397
+ if len(strategy.strategies) == 1:
398
+ # short cut with only one possible strategy
399
+ return strategy.strategies[0]
400
+
401
+ strategy_costs: List[float] = []
402
+ for strtg in strategy.strategies:
403
+ assert (
404
+ strtg.redistribute_cost is not None
405
+ ), "must set redistribute cost each strategy!"
406
+ redistribute_cost = sum(chain.from_iterable(strtg.redistribute_cost))
407
+ strategy_costs.append(redistribute_cost)
408
+
409
+ # for eager execution, we just select the one with the minimal redistribute cost
410
+ return strategy.strategies[strategy_costs.index(min(strategy_costs))]
venv/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # implement matrix related ops for distributed tensor
3
+ from typing import cast, Dict, List, Tuple
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.distributed._tensor.api as dtensor
8
+
9
+ aten = torch.ops.aten
10
+
11
+
12
+ def _requires_data_exchange(padding):
13
+ # TODO: whether there requires data exchange is currently determined by padding
14
+ return padding[1] != 0
15
+
16
+
17
+ def _is_supported(input_size, kernel_size, stride, padding, dilation):
18
+ if dilation[1] != 1:
19
+ raise RuntimeError("Dilation must be 1 for tensor parallel convolution.")
20
+ if padding[1] != 0:
21
+ if stride[1] != 1:
22
+ raise RuntimeError(
23
+ "Stride must be 1 when there is padding for tensor parallel convolution."
24
+ )
25
+ if kernel_size[3] // 2 > input_size[3]:
26
+ raise RuntimeError(
27
+ "kernel_size[3] // 2 should be less than or equal to input_size[3] for tensor parallel convolution."
28
+ )
29
+ else:
30
+ if not (input_size[3] % stride[1] == 0 and stride[1] == kernel_size[3]):
31
+ raise RuntimeError(
32
+ "It requires that input_size[3] is divisible by stride[1] and stride[1] equals kernel_size[3] "
33
+ "when there is padding for tensor parallel convolution."
34
+ )
35
+ return True
36
+
37
+
38
+ def _ring_send_recv_construct(in_tensor, d1, d2, left, right, rank, size):
39
+ # dist comms and reconstruct local input tensor
40
+ send_to_right = in_tensor[:, :, :, -d1:].contiguous()
41
+ send_to_left = in_tensor[:, :, :, :d2].contiguous()
42
+ recv_from_right = torch.zeros_like(send_to_left)
43
+ recv_from_left = torch.zeros_like(send_to_right)
44
+
45
+ send_op_right = dist.P2POp(dist.isend, send_to_right, right)
46
+ send_op_left = dist.P2POp(dist.isend, send_to_left, left)
47
+ recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right)
48
+ recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left)
49
+
50
+ reqs = dist.batch_isend_irecv(
51
+ [send_op_right, send_op_left, recv_op_left, recv_op_right]
52
+ )
53
+ for req in reqs:
54
+ req.wait()
55
+
56
+ if rank == 0:
57
+ in_tensor = torch.cat([in_tensor, recv_from_right], dim=-1)
58
+ elif rank == size - 1:
59
+ in_tensor = torch.cat([recv_from_left, in_tensor], dim=-1)
60
+ else:
61
+ in_tensor = torch.cat([recv_from_left, in_tensor, recv_from_right], dim=-1)
62
+
63
+ return in_tensor
64
+
65
+
66
+ def _ring_send_recv_aggregate(grad_in_tensor, d1, d2, left, right, rank, size):
67
+ # dist comms and aggregate gradients for edge pixels
68
+ send_to_right = grad_in_tensor[:, :, :, -d2:].contiguous()
69
+ send_to_left = grad_in_tensor[:, :, :, :d1].contiguous()
70
+ recv_from_right = torch.zeros_like(send_to_left)
71
+ recv_from_left = torch.zeros_like(send_to_right)
72
+
73
+ send_op_right = dist.P2POp(dist.isend, send_to_right, right)
74
+ send_op_left = dist.P2POp(dist.isend, send_to_left, left)
75
+ recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right)
76
+ recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left)
77
+
78
+ reqs = dist.batch_isend_irecv(
79
+ [send_op_right, send_op_left, recv_op_left, recv_op_right]
80
+ )
81
+ for req in reqs:
82
+ req.wait()
83
+
84
+ if rank == 0:
85
+ grad_in_tensor = grad_in_tensor[:, :, :, :-d2]
86
+ grad_in_tensor[:, :, :, -d1:] = torch.add(
87
+ grad_in_tensor[:, :, :, -d1:], recv_from_right
88
+ )
89
+ elif rank == size - 1:
90
+ grad_in_tensor = grad_in_tensor[:, :, :, d1:]
91
+ grad_in_tensor[:, :, :, :d2] = torch.add(
92
+ grad_in_tensor[:, :, :, :d2], recv_from_left
93
+ )
94
+ else:
95
+ grad_in_tensor = grad_in_tensor[:, :, :, d1:-d2]
96
+ grad_in_tensor[:, :, :, -d1:] = torch.add(
97
+ grad_in_tensor[:, :, :, -d1:], recv_from_right
98
+ )
99
+ grad_in_tensor[:, :, :, :d2] = torch.add(
100
+ grad_in_tensor[:, :, :, :d2], recv_from_left
101
+ )
102
+
103
+
104
+ def tp_convolution(
105
+ op_call: torch._ops.OpOverload,
106
+ local_tensor_args: Tuple[object, ...],
107
+ local_tensor_kwargs: Dict[str, object],
108
+ ) -> object:
109
+ assert op_call == aten.convolution.default
110
+ assert len(local_tensor_args) == 9
111
+
112
+ rank = dist.get_rank()
113
+ size = dist.get_world_size()
114
+ in_tensor = cast(torch.Tensor, local_tensor_args[0])
115
+ weight = cast(torch.Tensor, local_tensor_args[1])
116
+ stride, padding, dilation = local_tensor_args[3:6]
117
+
118
+ assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation)
119
+ assert isinstance(padding, List)
120
+
121
+ if not _requires_data_exchange(padding):
122
+ local_results = op_call(*local_tensor_args, **local_tensor_kwargs)
123
+ return local_results
124
+ else:
125
+ # step 0 compute the overlap pixels of the input tensor
126
+ d = weight.shape[3] - 1
127
+ d1 = d // 2
128
+ d2 = d - d1
129
+ assert d1 + d2 == d
130
+ right = (rank + 1) % size
131
+ left = (rank - 1 + size) % size
132
+
133
+ # step1 reconstruct local input tensor
134
+ in_tensor = _ring_send_recv_construct(
135
+ in_tensor, d1, d2, left, right, rank, size
136
+ )
137
+
138
+ # step2 feed local input tensor to op_call
139
+ local_tensor_args_list = list(local_tensor_args)
140
+ local_tensor_args_list[0] = in_tensor
141
+ local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list)
142
+ local_results = op_call(*local_tensor_args, **local_tensor_kwargs)
143
+
144
+ # step3 remove extra outputs from the results
145
+ padding_w = padding[1]
146
+ w = local_results.size(3)
147
+ if rank == 0:
148
+ local_results = local_results[:, :, :, : w - padding_w]
149
+ elif rank == size - 1:
150
+ local_results = local_results[:, :, :, padding_w:]
151
+ else:
152
+ local_results = local_results[:, :, :, padding_w : w - padding_w]
153
+
154
+ return local_results
155
+
156
+
157
+ def tp_convolution_backward(
158
+ op_call: torch._ops.OpOverload,
159
+ local_tensor_args: Tuple[object, ...],
160
+ local_tensor_kwargs: Dict[str, object],
161
+ ) -> object:
162
+ assert op_call == aten.convolution_backward.default
163
+ assert len(local_tensor_args) == 11
164
+
165
+ rank = dist.get_rank()
166
+ size = dist.get_world_size()
167
+ grad_out_tensor = cast(torch.Tensor, local_tensor_args[0])
168
+ in_tensor = cast(torch.Tensor, local_tensor_args[1])
169
+ weight = cast(torch.Tensor, local_tensor_args[2])
170
+ stride, padding, dilation = local_tensor_args[4:7]
171
+
172
+ assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation)
173
+ assert isinstance(padding, List)
174
+
175
+ if not _requires_data_exchange(padding):
176
+ local_results = op_call(*local_tensor_args, **local_tensor_kwargs)
177
+ return local_results
178
+ else:
179
+ # step 0 compute the overlap pixels of the input tensor
180
+ d = weight.shape[3] - 1
181
+ d1 = d // 2
182
+ d2 = d - d1
183
+ assert d1 + d2 == d
184
+ right = (rank + 1) % size
185
+ left = (rank - 1 + size) % size
186
+
187
+ # step1 reconstruct local input tensor
188
+ in_tensor = _ring_send_recv_construct(
189
+ in_tensor, d1, d2, left, right, rank, size
190
+ )
191
+
192
+ # step2 reconstruct local gradient output tensor
193
+ N, C_out, H_out, _ = grad_out_tensor.shape
194
+ padding_w = padding[1]
195
+ if rank == 0:
196
+ grad_out_tensor = torch.nn.functional.pad(
197
+ grad_out_tensor, (0, padding_w), "constant", 0
198
+ )
199
+ elif rank == size - 1:
200
+ grad_out_tensor = torch.nn.functional.pad(
201
+ grad_out_tensor, (padding_w, 0), "constant", 0
202
+ )
203
+ else:
204
+ grad_out_tensor = torch.nn.functional.pad(
205
+ grad_out_tensor, (padding_w, padding_w), "constant", 0
206
+ )
207
+
208
+ # step3 feed local input tensor to op_call
209
+ local_tensor_args_list = list(local_tensor_args)
210
+ local_tensor_args_list[0] = grad_out_tensor
211
+ local_tensor_args_list[1] = in_tensor
212
+ local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list)
213
+ local_results = op_call(*local_tensor_args, **local_tensor_kwargs)
214
+
215
+ # step4 aggregate gradients for edge pixels
216
+ grad_in_tensor = local_results[0]
217
+ grad_in_tensor = _ring_send_recv_aggregate(
218
+ grad_in_tensor, d1, d2, left, right, rank, size
219
+ )
220
+
221
+ local_results = list(local_results)
222
+ local_results[0] = grad_in_tensor
223
+ local_results = cast(Tuple[object, ...], local_results)
224
+
225
+ return local_results
226
+
227
+
228
+ def convolution_handler(
229
+ op_call: torch._ops.OpOverload,
230
+ args: Tuple[object, ...],
231
+ kwargs: Dict[str, object],
232
+ ) -> object:
233
+ # extract local tensor and sharding infos to a OpInfo
234
+ op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs)
235
+
236
+ # sharding propagation
237
+ dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info)
238
+ output_sharding = op_info.output_sharding
239
+ assert output_sharding is not None, "output sharding should not be None"
240
+
241
+ # local propagation
242
+ local_results = tp_convolution(
243
+ op_call, tuple(op_info.local_args), op_info.local_kwargs
244
+ )
245
+
246
+ return dtensor.DTensor._op_dispatcher.wrap(
247
+ local_results, output_sharding.output_spec
248
+ )
249
+
250
+
251
+ def convolution_backward_handler(
252
+ op_call: torch._ops.OpOverload,
253
+ args: Tuple[object, ...],
254
+ kwargs: Dict[str, object],
255
+ ) -> object:
256
+ # Redistribute grad_output tensor to the same placement as input tensor
257
+ args = list(args)
258
+ assert isinstance(args[0], dtensor.DTensor) and isinstance(args[1], dtensor.DTensor)
259
+ args[0] = args[0].redistribute(args[1].device_mesh, args[1].placements)
260
+ args = tuple(args)
261
+
262
+ # extract local tensor and sharding infos to a OpInfo
263
+ op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs)
264
+
265
+ # sharding propagation
266
+ dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info)
267
+ output_sharding = op_info.output_sharding
268
+ assert output_sharding is not None, "output sharding should not be None"
269
+
270
+ # local propagation
271
+ local_results = tp_convolution_backward(
272
+ op_call, tuple(op_info.local_args), op_info.local_kwargs
273
+ )
274
+
275
+ return dtensor.DTensor._op_dispatcher.wrap(
276
+ local_results, output_sharding.output_spec
277
+ )
venv/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (462 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 Kakao Brain
2
+ #
3
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+ """A Pipe implementation in PyTorch."""
8
+ from .checkpoint import is_checkpointing, is_recomputing
9
+ from .pipe import Pipe, WithDevice
10
+ from .microbatch import NoChunk
11
+
12
+ __all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (457 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc ADDED
Binary file (7.76 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (6.46 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc ADDED
Binary file (4.14 kB). View file