applied-ai-018 commited on
Commit
5b794ca
·
verified ·
1 Parent(s): 0a828ae

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py +6 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py +28 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py +290 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py +61 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py +61 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py +35 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py +54 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py +97 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py +469 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py +143 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py +1253 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py +37 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py +16 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py +82 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py +248 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py +58 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py +211 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py +27 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py +4 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py +86 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py +342 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py +313 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/api.py +760 -0
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .api import (
2
+ _shard_tensor,
3
+ load_with_process_group,
4
+ shard_module,
5
+ shard_parameter,
6
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc ADDED
Binary file (9.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributed._shard.metadata import ShardMetadata
3
+ from typing import Sequence
4
+
5
+ DEPRECATE_MSG = "Please use DTensor instead and we are deprecating ShardedTensor."
6
+
7
+ def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:
8
+ """
9
+ Narrow the tensor according to ``offsets`` and ``sizes``.
10
+ """
11
+ narrowed_tensor = tensor
12
+ for idx, (offset, size) in enumerate(zip(offsets, sizes)):
13
+ if size < tensor.size(idx):
14
+ # Reshape to get shard for this rank and we don't want autograd
15
+ # recording here for the narrow op and 'local_shard' should be a
16
+ # leaf variable in the autograd graph.
17
+ narrowed_tensor = narrowed_tensor.narrow(
18
+ idx,
19
+ offset,
20
+ size
21
+ )
22
+ return narrowed_tensor
23
+
24
+ def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor:
25
+ """
26
+ Narrow the tensor according to the metadata
27
+ """
28
+ return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import Optional
3
+ import torch
4
+ import torch.distributed as dist
5
+ import torch.nn as nn
6
+ from torch.distributed import distributed_c10d
7
+ from torch.distributed._shard.sharded_tensor import (
8
+ ShardedTensor,
9
+ )
10
+ from .sharding_spec import (
11
+ ShardingSpec,
12
+ ChunkShardingSpec
13
+ )
14
+ from .sharding_plan import (
15
+ ShardingPlan
16
+ )
17
+ from .sharder import Sharder
18
+
19
+ def _shard_tensor(
20
+ tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None
21
+ ) -> ShardedTensor:
22
+ """
23
+ Given a :class:`torch.Tensor`, it shards that tensor according to the provided
24
+ ``sharding_spec``. ``src_rank`` denotes the source rank which would be
25
+ used as the ground truth of the data which would be scattered as shards
26
+ across the rest of the ranks.
27
+
28
+ Args:
29
+ tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
30
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
31
+ describing how to shard the Tensor.
32
+
33
+ Keyword args:
34
+ src_rank (int, optional): The source rank which is used as the ground truth of
35
+ the data for the parameter that would be sharded and scattered
36
+ across the rest of the ranks.
37
+ Default: 0.
38
+ process_group (ProcessGroup, optional): The process group to work on. If None,
39
+ the default process group will be used.
40
+
41
+ Returns:
42
+ A :class:`ShardedTensor` sharded from the given tensor.
43
+
44
+ .. warning::
45
+ Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
46
+ currently supported as the ``sharding_spec``.
47
+ """
48
+ if not tensor.is_contiguous():
49
+ raise ValueError('input tensor is not a contiguous Tensor')
50
+
51
+ pg = process_group if process_group is not None else distributed_c10d._get_default_group()
52
+ world_size = dist.get_world_size(pg)
53
+ current_rank = dist.get_rank(pg)
54
+
55
+ # Validate src_rank and sharding_spec are same across all ranks.
56
+ gathered_list = [None] * world_size
57
+ dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg)
58
+
59
+ for idx, entry in enumerate(gathered_list):
60
+ if src_rank != entry[0]: # type: ignore[index]
61
+ raise ValueError(
62
+ f'src_rank={src_rank} on rank: {current_rank} does not ' # type: ignore[index]
63
+ f'match with src_rank={entry[0]} on rank: {idx}')
64
+ if sharding_spec != entry[1]: # type: ignore[index]
65
+ raise ValueError(
66
+ f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index]
67
+ f'match with sharding_spec={entry[1]} on rank: {idx}')
68
+
69
+ st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group)
70
+
71
+ return st
72
+
73
+ def shard_parameter(
74
+ module: torch.nn.Module,
75
+ param_name: str,
76
+ sharding_spec: ShardingSpec,
77
+ src_rank=0,
78
+ process_group=None):
79
+ """
80
+ Given a :class:`torch.nn.Module`, a ``param_name`` for a parameter in that
81
+ module, it shards that parameter according to the provided
82
+ ``sharding_spec``. ``src_rank`` denotes the source rank which would be
83
+ used as the ground truth of the data which would be scattered as shards
84
+ across the rest of the ranks.
85
+
86
+ This method replaces ``module.param_name`` with a
87
+ :class:`torch.distributed._sharded_tensor.ShardedTensor`
88
+
89
+ Args:
90
+ module (:class:`torch.nn.Module`): Module whose parameter needs to be sharded.
91
+ param_name (str): Name of the parameter of ``module`` that needs to be sharded.
92
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
93
+ describing how to shard the Tensor.
94
+
95
+ Keyword args:
96
+ src_rank (int, optional): The source rank which is used as the ground truth of
97
+ the data for the parameter that would be sharded and scattered
98
+ across the rest of the ranks.
99
+ Default: 0.
100
+ process_group (ProcessGroup, optional): The process group to work on. If None,
101
+ the default process group will be used.
102
+
103
+ .. warning::
104
+ Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
105
+ currently supported as the ``sharding_spec``.
106
+ """
107
+ # Perform some validation first.
108
+ if not hasattr(module, param_name):
109
+ raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`')
110
+
111
+ tensor = getattr(module, param_name)
112
+ if not isinstance(tensor, torch.Tensor):
113
+ raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}')
114
+
115
+ if not tensor.is_contiguous():
116
+ raise ValueError(f'param: {param_name} is not a contiguous Tensor')
117
+
118
+ st = _shard_tensor(tensor, sharding_spec, src_rank, process_group)
119
+
120
+ # Replace param with ShardedTensor.
121
+ module.register_parameter(param_name, nn.Parameter(st))
122
+
123
+ # Tracks the current process group in the load context manager.
124
+ _CURRENT_PROCESS_GROUP: Optional[dist.ProcessGroup] = None
125
+
126
+ @contextmanager
127
+ def load_with_process_group(process_group):
128
+ """
129
+ Context manager to set the process group with which to load a ShardedTensor.
130
+ """
131
+ global _CURRENT_PROCESS_GROUP
132
+ if _CURRENT_PROCESS_GROUP is not None:
133
+ raise RuntimeError(
134
+ 'ProcessGroup already set by previous "load_with_process_group" '
135
+ 'context manager')
136
+ _CURRENT_PROCESS_GROUP = process_group
137
+ try:
138
+ yield process_group
139
+ finally:
140
+ _CURRENT_PROCESS_GROUP = None
141
+
142
+ def _get_current_process_group():
143
+ """
144
+ Retrieves the current process group set by ``load_with_process_group``.
145
+ If not set, it just returns the default group.
146
+ """
147
+ global _CURRENT_PROCESS_GROUP
148
+ if _CURRENT_PROCESS_GROUP is None:
149
+ return distributed_c10d._get_default_group()
150
+ else:
151
+ return _CURRENT_PROCESS_GROUP
152
+
153
+ def _reshard_output(
154
+ module: torch.nn.Module,
155
+ resharding_spec: ShardingSpec) -> torch.nn.Module:
156
+ """
157
+ Hook a module with output resharding in the forward pass according
158
+ to the given ``resharding_spec``.
159
+
160
+ Args:
161
+ module (:class:`torch.nn.Module`): Module whose output needs to be resharded.
162
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
163
+ The specification describing how the output of the module will be resharded.
164
+
165
+ Returns:
166
+ A :class:`torch.nn.Module` object with reshard API hooked.
167
+ """
168
+ def hook_func(_module, _input, output):
169
+ if isinstance(output, ShardedTensor):
170
+ return output.reshard(resharding_spec)
171
+ return output
172
+ module.register_forward_hook(hook_func)
173
+ return module
174
+
175
+ def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:
176
+ """
177
+ Hook a module with local shards collection in the forward pass.
178
+
179
+ This API is typically used to convert a sharded representation back to data parallel
180
+ representation. In particular, it returns the local tensor for this Shard. If the
181
+ size along the sharding dimension for the local tensor is 1, this dimension is removed
182
+ from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically
183
+ a local Tensor of size [16] across each rank and not [1, 16] across each rank.
184
+
185
+ Args:
186
+ module (:class:`torch.nn.Module`): Module whose output is ShardedTensor and the
187
+ local tensor value needs to be returned.
188
+
189
+ Returns:
190
+ A :class:`torch.nn.Module` object with collection API hooked.
191
+ """
192
+
193
+ def hook_func(_module, _input, output):
194
+ if isinstance(output, ShardedTensor):
195
+ local_tensor = output.local_tensor()
196
+ # Squeeze the # of dimensions manually, only applicable to ChunkShardingSpec
197
+ sharding_spec = output._sharding_spec
198
+ if isinstance(sharding_spec, ChunkShardingSpec) \
199
+ and local_tensor.size(sharding_spec.dim) == 1: # type: ignore[attr-defined, arg-type]
200
+ local_tensor = local_tensor.squeeze(
201
+ output._sharding_spec.dim # type: ignore[attr-defined]
202
+ )
203
+ return local_tensor
204
+ module.register_forward_hook(hook_func)
205
+ return module
206
+
207
+ def shard_module(
208
+ module: nn.Module,
209
+ plan: ShardingPlan,
210
+ src_rank=0,
211
+ process_group=None
212
+ ):
213
+ """
214
+ Shards a given module according to the provided sharding `plan`. This method
215
+ first shards all the parameters according to the given sharding `plan`. Then if
216
+ `output_plan` and `return_local_tensor` are specified in the sharding `plan`, it
217
+ will tag the output of modules according `output_plan`, convert the module's
218
+ output back to data parallel according to `return_local_tensor`.
219
+
220
+ Needs to be called on all ranks in an SPMD fashion.
221
+
222
+ Args:
223
+ module (:class:`torch.nn.Module`): The module to apply sharding to
224
+ plan (:class:`torch.distributed._shard.sharding_plan.ShardingPlan`):
225
+ The ShardingPlan which specified param name to ShardingSpec to apply to
226
+ each parameter.
227
+
228
+ Keyword args:
229
+ src_rank (int, optional): The source rank which is used as the ground truth of
230
+ the data for the module that would be sharded and scattered across the rest
231
+ of the ranks.
232
+ Default: 0.
233
+ process_group (ProcessGroup, optional): The process group to work on. If None,
234
+ the default process group will be used.
235
+ """
236
+ # record Sharder paths for sanity check on the plan to ensure items in the plan
237
+ # does not conflict with the submodule tree that the Sharder is working with
238
+ sharder_paths = []
239
+ for name, spec in plan.plan.items():
240
+ if isinstance(spec, Sharder):
241
+ sharder_paths.append(name)
242
+
243
+ # shard the parameter according to the ShardingPlan
244
+ for name, spec in plan.plan.items():
245
+ if isinstance(spec, ShardingSpec):
246
+ # if found a sharding spec, try to shard the parameter
247
+ module_path, _, param_name = name.rpartition(".")
248
+
249
+ for sharder_path in sharder_paths:
250
+ if module_path.startswith(sharder_path):
251
+ raise RuntimeError(f"ShardingPlan is in-valid, trying to shard a parameter: {name},"
252
+ f" but there's already a Sharder entry for module {sharder_path},"
253
+ f" parameter sharding should not conflict with the submodule tree"
254
+ f" that a Sharder is working with!")
255
+
256
+ mod = module.get_submodule(module_path)
257
+ shard_parameter(
258
+ mod,
259
+ param_name,
260
+ spec,
261
+ src_rank=src_rank,
262
+ process_group=process_group
263
+ )
264
+ elif isinstance(spec, Sharder):
265
+ parent_mod_path, _, mod_name = name.rpartition(".")
266
+ if name == "":
267
+ raise KeyError("Module path must not be empty for custom sharder!")
268
+ mod = module.get_submodule(name)
269
+ parent_mod = module.get_submodule(parent_mod_path)
270
+ sharded_mod = spec.shard(mod)
271
+ # swap this submodule with the sharded module
272
+ parent_mod.mod_name = sharded_mod
273
+ else:
274
+ raise TypeError(f"Only `ShardingSpec` and `Sharder` are supported to shard '{name}'")
275
+
276
+ # reshard output if there's an entry in `reshard_output` for this module
277
+ if plan.output_plan is not None:
278
+ for module_path, output_spec in plan.output_plan.items():
279
+ if isinstance(output_spec, ShardingSpec):
280
+ mod = module.get_submodule(module_path)
281
+ _reshard_output(mod, output_spec)
282
+ else:
283
+ raise TypeError(f"Only `ShardingSpec` is supported as output_plan for '{module_path}'")
284
+ # convert the output back to data parallel for the modules appears in
285
+ # `return_local_tensor` of the plan, we will call `_collect_local_shard`
286
+ # to collect the local tensor for output of modules
287
+ if plan.return_local_tensor is not None:
288
+ for module_path in plan.return_local_tensor:
289
+ mod = module.get_submodule(module_path)
290
+ _collect_local_shard(mod)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils import _pytree as pytree
3
+ from typing import Optional
4
+
5
+ def _basic_validation(op, args=(), kwargs=None):
6
+ """
7
+ Common validation across all ops go in here.
8
+ """
9
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
10
+
11
+ if len(args) == 0 and (kwargs is None or len(kwargs) == 0):
12
+ raise ValueError(f" No input for '{op.__name__}'!")
13
+
14
+ # Validate types
15
+ has_distributed_tensor = False
16
+
17
+ def is_distributed_tensor(e):
18
+ nonlocal has_distributed_tensor
19
+ if isinstance(e, ShardedTensor):
20
+ has_distributed_tensor = True
21
+
22
+ pytree.tree_map_(is_distributed_tensor, args)
23
+ pytree.tree_map_(is_distributed_tensor, kwargs)
24
+
25
+ if not has_distributed_tensor:
26
+ raise TypeError(
27
+ f"torch function '{op.__name__}', with args: {args} and "
28
+ f"kwargs: {kwargs} are called without any distributed tensor!"
29
+ )
30
+
31
+ # Validate all distributed tensors use the same PG.
32
+ cur_pg: Optional[torch.distributed.ProcessGroup] = None
33
+
34
+ def validate_pg(e):
35
+ nonlocal cur_pg
36
+ if isinstance(e, ShardedTensor):
37
+ if cur_pg is not None and e._process_group is not cur_pg:
38
+ raise RuntimeError(
39
+ 'All distributed tensors should use the '
40
+ 'same ProcessGroup if used together in an op.'
41
+ )
42
+ cur_pg = e._process_group
43
+
44
+ pytree.tree_map_(validate_pg, args)
45
+ pytree.tree_map_(validate_pg, kwargs)
46
+
47
+ def _register_default_op(op, decorator):
48
+ @decorator(op)
49
+ def tensor_default_op(types, args=(), kwargs=None, pg=None):
50
+ """
51
+ Handles ``__torch_function__`` dispatch for the default tensor ops that
52
+ behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or
53
+ ``torch.Tensor.dtype``. We simply lower to the real op call with
54
+ DisableTorchFunctionSubclass context like ``torch.Tensor.__torch_function__``
55
+ to avoid recursions.
56
+ """
57
+ if kwargs is None:
58
+ kwargs = {}
59
+
60
+ with torch._C.DisableTorchFunctionSubclass():
61
+ return op(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Union, Optional
3
+ from functools import reduce
4
+
5
+ from torch.distributed.remote_device import _remote_device
6
+
7
+ @dataclass
8
+ class ShardMetadata:
9
+ """
10
+ Represents a shard of the overall Tensor including its
11
+ offsets, lengths and device placement.
12
+
13
+ Args:
14
+ shard_offsets(List[int]): Offsets in the original tensor indicating
15
+ the start offsets for this shard. Should have the same rank as
16
+ the original tensor.
17
+ shard_sizes(List[int]): Integers indicating the size of each
18
+ dimension for this shard. Should have the same rank as the
19
+ original tensor.
20
+ placement(:class:`torch.distributed._remote_device`):
21
+ Specifies the placement of this shard.
22
+ """
23
+
24
+ __slots__ = ['shard_offsets', 'shard_sizes', 'placement']
25
+
26
+ shard_offsets: List[int]
27
+ shard_sizes: List[int]
28
+ placement: Optional[_remote_device]
29
+
30
+ def __init__(
31
+ self,
32
+ shard_offsets: List[int],
33
+ shard_sizes: List[int],
34
+ placement: Optional[Union[str, _remote_device]] = None
35
+ ):
36
+ self.shard_offsets = shard_offsets
37
+ self.shard_sizes = shard_sizes
38
+ if isinstance(placement, str):
39
+ self.placement = _remote_device(placement)
40
+ else:
41
+ self.placement = placement
42
+ if len(self.shard_offsets) != len(self.shard_sizes):
43
+ raise ValueError(
44
+ f'shard_offsets and shard_sizes should have '
45
+ f'the same number of elements, found {len(self.shard_offsets)} '
46
+ f'and {self.shard_sizes} respectively')
47
+
48
+ for i in range(len(self.shard_offsets)):
49
+ if self.shard_offsets[i] < 0:
50
+ raise ValueError('shard_offsets should be >=0')
51
+ if self.shard_sizes[i] < 0:
52
+ raise ValueError('shard_sizes should be >= 0')
53
+
54
+ def __hash__(self):
55
+ def _hash_reduce(a, b):
56
+ return (a << 8) + hash(b)
57
+
58
+ res = reduce(_hash_reduce, self.shard_offsets, 37)
59
+ res = reduce(_hash_reduce, self.shard_sizes, res)
60
+ res = _hash_reduce(res, self.placement)
61
+ return res
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from inspect import signature
3
+ from .common_op_utils import _basic_validation
4
+
5
+ """
6
+ Common utilities to register ops on ShardedTensor
7
+ and PartialTensor.
8
+ """
9
+
10
+ def _register_op(op, func, op_table):
11
+ """
12
+ Performs basic validation and registers the provided op in the given
13
+ op_table.
14
+ """
15
+ if len(signature(func).parameters) != 4:
16
+ raise TypeError(
17
+ f'Custom sharded op function expects signature: '
18
+ f'(types, args, kwargs, process_group), but received '
19
+ f'signature: {signature(func)}')
20
+
21
+ op_table[op] = func
22
+
23
+ def _decorator_func(wrapped_func, op, op_table):
24
+ """
25
+ Decorator function to register the given ``op`` in the provided
26
+ ``op_table``
27
+ """
28
+
29
+ @functools.wraps(wrapped_func)
30
+ def wrapper(types, args, kwargs, process_group):
31
+ _basic_validation(op, args, kwargs)
32
+ return wrapped_func(types, args, kwargs, process_group)
33
+
34
+ _register_op(op, wrapper, op_table)
35
+ return wrapper
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, Tuple, Union
2
+ from .api import ShardedOptimizer
3
+
4
+ import torch.nn as nn
5
+
6
+ from torch.distributed._shard.sharded_tensor import (
7
+ ShardedTensor
8
+ )
9
+
10
+ def named_params_with_sharded_tensor(
11
+ module: nn.Module,
12
+ prefix: str = '',
13
+ recurse: bool = True,
14
+ ) -> Iterator[Tuple[str, Union[nn.Parameter, ShardedTensor]]]:
15
+
16
+ r"""Returns an iterator over module parameters (together with the
17
+ ShardedTensor parameters), yielding both the name of the parameter
18
+ as well as the parameter itself. This is typically passed to a
19
+ :class:torch.distributed._shard.sharded_optim.ShardedOptimizer
20
+
21
+ Args:
22
+ prefix (str): prefix to prepend to all parameter names.
23
+ recurse (bool): if True, then yields parameters of this module
24
+ and all submodules. Otherwise, yields only parameters that
25
+ are direct members of this module.
26
+
27
+ Yields:
28
+ (str, Union[Tensor, ShardedTensor]): Tuple containing
29
+ the name and parameter (or ShardedTensor parameter)
30
+
31
+ Example::
32
+
33
+ >>> # xdoctest: +SKIP
34
+ >>> model = torch.nn.Linear(*linear_size)
35
+ >>> shard_parameter(model, "weight", spec)
36
+ >>> for name, param in named_params_with_sharded_tensor(model):
37
+ >>> if name in ['weight']:
38
+ >>> print(param.size())
39
+
40
+ """
41
+ modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
42
+
43
+ memo = set()
44
+ for mod_prefix, mod in modules:
45
+ # find all sharded tensor params
46
+ for name, val in vars(mod).items():
47
+ if isinstance(val, ShardedTensor) and val not in memo:
48
+ memo.add(val)
49
+ name = mod_prefix + ('.' if mod_prefix else '') + name
50
+ yield name, val
51
+
52
+ # find all nn.Parameters
53
+ for name, val in module.named_parameters():
54
+ yield name, val
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Mapping, Dict, Any
2
+
3
+ import torch.optim as optim
4
+ from torch import Tensor
5
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
6
+
7
+
8
+ class ShardedOptimizer(optim.Optimizer):
9
+ def __init__(
10
+ self,
11
+ named_params: Mapping[str, Union[Tensor, ShardedTensor]],
12
+ optimizer_class,
13
+ *optimizer_args,
14
+ **optimizer_kwargs
15
+ ):
16
+ """
17
+ ShardedOptimizer collects all tensors and local shard tensors of
18
+ ShardedTensor, then use these tensors as ``params`` for optimizers
19
+
20
+ Args:
21
+ named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
22
+ of parameters, where key is the parameter key, value is either
23
+ Tensor or ShardedTensor parameter.
24
+ optimizer_class (torch.optim.Optimizer): the Optimizer to use
25
+ locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
26
+ *optimizer_args: the arguments to initialize the optimizer.
27
+ **optimizer_kwargs: the key-word arguments to initialize the optimizer.
28
+
29
+ """
30
+ tensors: List[Tensor] = []
31
+ for value in named_params.values():
32
+ if isinstance(value, ShardedTensor):
33
+ for local_shard in value.local_shards():
34
+ tensors.append(local_shard.tensor)
35
+ else:
36
+ tensors.append(value)
37
+
38
+ self.named_params = named_params
39
+ self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs)
40
+ self.param_groups = self._optim.param_groups
41
+ self.state = self._optim.state
42
+
43
+ def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
44
+ r"""Resets the gradients of all optimized :class:`torch.Tensor` s.
45
+
46
+ Args:
47
+ set_to_none (bool): instead of setting to zero, set the grads to None.
48
+ This will in general have lower memory footprint, and can modestly improve performance.
49
+ However, it changes certain behaviors. For example:
50
+ 1. When the user tries to access a gradient and perform manual ops on it,
51
+ a None attribute or a Tensor full of 0s will behave differently.
52
+ 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
53
+ are guaranteed to be None for params that did not receive a gradient.
54
+ 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
55
+ (in one case it does the step with a gradient of 0 and in the other it skips
56
+ the step altogether).
57
+ """
58
+ self._optim.zero_grad(set_to_none)
59
+
60
+ def step(self, closure=None):
61
+ r"""Performs a single optimization step (parameter update).
62
+
63
+ Args:
64
+ closure (Callable): A closure that reevaluates the model and
65
+ returns the loss. Optional for most optimizers.
66
+
67
+ .. note::
68
+ Unless otherwise specified, this function should not modify the
69
+ ``.grad`` field of the parameters.
70
+ """
71
+ self._optim.step(closure)
72
+
73
+ def state_dict(self) -> Dict[str, Any]:
74
+ """
75
+ Returned state and param_groups will contain parameter keys
76
+ instead of parameter indices like torch.optim.Optimizer.
77
+ This allows for advanced functionality like optimizer re-sharding to be implemented.
78
+ """
79
+ # TODO: implement state_dict
80
+ raise NotImplementedError("ShardedOptimizer state_dict not implemented yet!")
81
+
82
+
83
+ def load_state_dict(self, state_dict: Mapping[str, Any]):
84
+ r"""Loads the ShardedOptimizer state.
85
+
86
+ Args:
87
+ state_dict (dict): ShardedOptimizer state. Should be an object returned
88
+ from a call to :meth:`state_dict`.
89
+ """
90
+ # TODO: implement load_state_dict
91
+ raise NotImplementedError("ShardedOptimizer load_state_dict not implemented yet!")
92
+
93
+ def add_param_group(self, param_group: Any):
94
+ r"""Add a new param group
95
+ """
96
+ # TODO: implement add_param_group
97
+ raise NotImplementedError("ShardedOptimizer add_param_group not implemented yet!")
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import List, TYPE_CHECKING
3
+
4
+ import torch
5
+
6
+ if TYPE_CHECKING:
7
+ from torch.distributed._shard.sharding_spec import ShardingSpec
8
+ else:
9
+ ShardingSpec = "ShardingSpec"
10
+
11
+ from .api import (
12
+ _CUSTOM_SHARDED_OPS,
13
+ _SHARDED_OPS,
14
+ Shard,
15
+ ShardedTensorBase,
16
+ ShardedTensor,
17
+ ShardedTensorMetadata,
18
+ TensorProperties,
19
+ )
20
+ from .metadata import ShardMetadata # noqa: F401
21
+ from torch.distributed._shard.op_registry_utils import _decorator_func
22
+
23
+
24
+ def empty(sharding_spec: ShardingSpec,
25
+ *size,
26
+ dtype=None,
27
+ layout=torch.strided,
28
+ requires_grad=False,
29
+ pin_memory=False,
30
+ memory_format=torch.contiguous_format,
31
+ process_group=None,
32
+ init_rrefs=False) -> ShardedTensor:
33
+ """
34
+ Returns a :class:`ShardedTensor` filled with uninitialized data.
35
+ Needs to be called on all ranks in an SPMD fashion.
36
+
37
+ Args:
38
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
39
+ describing how to shard the Tensor.
40
+ size (int...): a sequence of integers defining the shape of the output
41
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
42
+
43
+ Keyword args:
44
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
45
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
46
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
47
+ Default: ``torch.strided``.
48
+ requires_grad (bool, optional): If autograd should record operations on the
49
+ returned tensor. Default: ``False``.
50
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
51
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
52
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
53
+ returned Tensor. Default: ``torch.contiguous_format``.
54
+ process_group (ProcessGroup, optional): The process group to work on. If None,
55
+ the default process group will be used.
56
+ init_rrefs (bool, optional): Whether or not to initialize
57
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
58
+ Need to initialize the RPC Framework if specified as ``True``.
59
+ Default: ``False``.
60
+
61
+ Returns:
62
+ A :class:`ShardedTensor` object on each rank
63
+ """
64
+ return ShardedTensor(
65
+ sharding_spec,
66
+ *size,
67
+ dtype=dtype,
68
+ layout=layout,
69
+ requires_grad=requires_grad,
70
+ pin_memory=pin_memory,
71
+ memory_format=memory_format,
72
+ process_group=process_group,
73
+ init_rrefs=init_rrefs,
74
+ )
75
+
76
+ def ones(sharding_spec: ShardingSpec,
77
+ *size,
78
+ dtype=None,
79
+ layout=torch.strided,
80
+ requires_grad=False,
81
+ pin_memory=False,
82
+ memory_format=torch.contiguous_format,
83
+ process_group=None,
84
+ init_rrefs=False) -> ShardedTensor:
85
+ """
86
+ Returns a :class:`ShardedTensor` with the scalar value 1.
87
+ Needs to be called on all ranks in an SPMD fashion.
88
+
89
+ Args:
90
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
91
+ describing how to shard the Tensor.
92
+ size (int...): a sequence of integers defining the shape of the output
93
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
94
+
95
+ Keyword args:
96
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
97
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
98
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
99
+ Default: ``torch.strided``.
100
+ requires_grad (bool, optional): If autograd should record operations on the
101
+ returned tensor. Default: ``False``.
102
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
103
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
104
+ process_group (ProcessGroup, optional): The process group to work on. If None,
105
+ the default process group will be used.
106
+ init_rrefs (bool, optional): Whether or not to initialize
107
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
108
+ Need to initialize the RPC Framework if specified as ``True``.
109
+ Default: ``False``.
110
+
111
+ Returns:
112
+ A :class:`ShardedTensor` object on each rank
113
+ """
114
+ return full(
115
+ sharding_spec,
116
+ size,
117
+ fill_value=1,
118
+ dtype=dtype,
119
+ layout=layout,
120
+ requires_grad=requires_grad,
121
+ pin_memory=pin_memory,
122
+ memory_format=memory_format,
123
+ process_group=process_group,
124
+ init_rrefs=init_rrefs
125
+ )
126
+
127
+ def zeros(sharding_spec: ShardingSpec,
128
+ *size,
129
+ dtype=None,
130
+ layout=torch.strided,
131
+ requires_grad=False,
132
+ pin_memory=False,
133
+ memory_format=torch.contiguous_format,
134
+ process_group=None,
135
+ init_rrefs=False) -> ShardedTensor:
136
+ """
137
+ Returns a :class:`ShardedTensor` filled with the scalar value 0.
138
+ Needs to be called on all ranks in an SPMD fashion.
139
+
140
+ Args:
141
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
142
+ describing how to shard the Tensor.
143
+ size (int...): a sequence of integers defining the shape of the output
144
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
145
+
146
+ Keyword args:
147
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
148
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
149
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
150
+ Default: ``torch.strided``.
151
+ requires_grad (bool, optional): If autograd should record operations on the
152
+ returned tensor. Default: ``False``.
153
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
154
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
155
+ process_group (ProcessGroup, optional): The process group to work on. If None,
156
+ the default process group will be used.
157
+ init_rrefs (bool, optional): Whether or not to initialize
158
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
159
+ Need to initialize the RPC Framework if specified as ``True``.
160
+ Default: ``False``.
161
+
162
+ Returns:
163
+ A :class:`ShardedTensor` object on each rank
164
+ """
165
+ return full(
166
+ sharding_spec,
167
+ size,
168
+ fill_value=0,
169
+ dtype=dtype,
170
+ layout=layout,
171
+ requires_grad=requires_grad,
172
+ pin_memory=pin_memory,
173
+ memory_format=memory_format,
174
+ process_group=process_group,
175
+ init_rrefs=init_rrefs
176
+ )
177
+
178
+ def full(sharding_spec: ShardingSpec,
179
+ size,
180
+ fill_value,
181
+ *,
182
+ dtype=None,
183
+ layout=torch.strided,
184
+ requires_grad=False,
185
+ pin_memory=False,
186
+ memory_format=torch.contiguous_format,
187
+ process_group=None,
188
+ init_rrefs=False) -> ShardedTensor:
189
+ """
190
+ Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
191
+ is inferred from fill_value. If dtype is specified, it will override the
192
+ inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
193
+ Args:
194
+ sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
195
+ describing how to shard the Tensor.
196
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
197
+ output tensor.
198
+ fill_value (Scalar) – the value to fill the output tensor with.
199
+ Keyword args:
200
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
201
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
202
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
203
+ Default: ``torch.strided``.
204
+ requires_grad (bool, optional): If autograd should record operations on the
205
+ returned tensor. Default: ``False``.
206
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
207
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
208
+ process_group (ProcessGroup, optional): The process group to work on. If None,
209
+ the default process group will be used.
210
+ init_rrefs (bool, optional): Whether or not to initialize
211
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
212
+ Need to initialize the RPC Framework if specified as ``True``.
213
+ Default: ``False``.
214
+ Returns:
215
+ A :class:`ShardedTensor` object on each rank
216
+ """
217
+ sharded_tensor = ShardedTensor(
218
+ sharding_spec,
219
+ *size,
220
+ dtype=dtype,
221
+ layout=layout,
222
+ requires_grad=requires_grad,
223
+ pin_memory=pin_memory,
224
+ memory_format=memory_format,
225
+ process_group=process_group,
226
+ init_rrefs=init_rrefs,
227
+ )
228
+ torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]
229
+ return sharded_tensor
230
+
231
+ def rand(sharding_spec: ShardingSpec,
232
+ *size,
233
+ dtype=None,
234
+ layout=torch.strided,
235
+ requires_grad=False,
236
+ pin_memory=False,
237
+ memory_format=torch.contiguous_format,
238
+ process_group=None,
239
+ init_rrefs=False) -> ShardedTensor:
240
+ """
241
+ Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
242
+ on the interval :math:`[0, 1)`. The shape of the tensor is defined by the
243
+ variable argument `size`. Needs to be called on all ranks in an SPMD fashion.
244
+
245
+ Args:
246
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
247
+ describing how to shard the Tensor.
248
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
249
+ output tensor.
250
+
251
+ Keyword args:
252
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
253
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
254
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
255
+ Default: ``torch.strided``.
256
+ requires_grad (bool, optional): If autograd should record operations on the
257
+ returned tensor. Default: ``False``.
258
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
259
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
260
+ process_group (ProcessGroup, optional): The process group to work on. If None,
261
+ the default process group will be used.
262
+ init_rrefs (bool, optional): Whether or not to initialize
263
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
264
+ Need to initialize the RPC Framework if specified as ``True``.
265
+ Default: ``False``.
266
+
267
+ Returns:
268
+ A :class:`ShardedTensor` object on each rank
269
+ """
270
+ sharded_tensor = ShardedTensor(
271
+ sharding_spec,
272
+ *size,
273
+ dtype=dtype,
274
+ layout=layout,
275
+ requires_grad=requires_grad,
276
+ pin_memory=pin_memory,
277
+ memory_format=memory_format,
278
+ process_group=process_group,
279
+ init_rrefs=init_rrefs,
280
+ )
281
+ torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]
282
+ return sharded_tensor
283
+
284
+ def randn(sharding_spec: ShardingSpec,
285
+ *size,
286
+ dtype=None,
287
+ layout=torch.strided,
288
+ requires_grad=False,
289
+ pin_memory=False,
290
+ memory_format=torch.contiguous_format,
291
+ process_group=None,
292
+ init_rrefs=False) -> ShardedTensor:
293
+ """
294
+ Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
295
+ with mean `0` and variance `1` (also called standard normal distribution). The shape
296
+ of the tensor is defined by the variable argument `size`. Needs to be called on all ranks
297
+ in an SPMD fashion.
298
+
299
+ Args:
300
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
301
+ describing how to shard the Tensor.
302
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
303
+ output tensor.
304
+
305
+ Keyword args:
306
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
307
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
308
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
309
+ Default: ``torch.strided``.
310
+ requires_grad (bool, optional): If autograd should record operations on the
311
+ returned tensor. Default: ``False``.
312
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
313
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
314
+ process_group (ProcessGroup, optional): The process group to work on. If None,
315
+ the default process group will be used.
316
+ init_rrefs (bool, optional): Whether or not to initialize
317
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
318
+ Need to initialize the RPC Framework if specified as ``True``.
319
+ Default: ``False``.
320
+
321
+ Returns:
322
+ A :class:`ShardedTensor` object on each rank
323
+ """
324
+ sharded_tensor = ShardedTensor(
325
+ sharding_spec,
326
+ *size,
327
+ dtype=dtype,
328
+ layout=layout,
329
+ requires_grad=requires_grad,
330
+ pin_memory=pin_memory,
331
+ memory_format=memory_format,
332
+ process_group=process_group,
333
+ init_rrefs=init_rrefs,
334
+ )
335
+ torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type]
336
+ return sharded_tensor
337
+
338
+ def init_from_local_shards(
339
+ local_shards: List[Shard],
340
+ *global_size,
341
+ process_group=None,
342
+ init_rrefs=False) -> ShardedTensor:
343
+ """
344
+ Creates an :class:`ShardedTensor` from local shards and the global metadata.
345
+ Needs to be called on all ranks in an SPMD fashion.
346
+
347
+ Args:
348
+ local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list
349
+ of shards that represent the local shards on this rank.
350
+ global_size (int...): a list, tuple, or `torch.Size` of integers defining the
351
+ shape of the overall sharded tensor.
352
+
353
+ Keyword args:
354
+ process_group (ProcessGroup, optional): The process group to work on. If None,
355
+ the default process group will be used.
356
+ init_rrefs (bool, optional): Whether or not to initialize
357
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
358
+ Need to initialize the RPC Framework if specified as ``True``.
359
+ Default: ``False``.
360
+
361
+ Returns:
362
+ A :class:`ShardedTensor` object handle on this rank
363
+
364
+
365
+ Examples:
366
+ Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),
367
+ each shard have a (5, 5) local tensor, we can do it like below:
368
+
369
+ on rank 0:
370
+ >>> # xdoctest: +SKIP("not distributed")
371
+ >>> local_shard_metadata = ShardMetadata(
372
+ >>> shard_offsets=[0, 0],
373
+ >>> shard_lengths=[5, 5],
374
+ >>> placement="rank:0/cuda:0"
375
+ >>> )
376
+ >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
377
+ >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
378
+
379
+ on rank 1:
380
+ >>> # xdoctest: +SKIP("not distributed")
381
+ >>> local_shard_metadata = ShardMetadata(
382
+ >>> shard_offsets=[5, 0],
383
+ >>> shard_lengths=[5, 5],
384
+ >>> placement="rank:1/cuda:1"
385
+ >>> )
386
+ >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
387
+ >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
388
+ """
389
+ return ShardedTensor._init_from_local_shards(
390
+ local_shards,
391
+ *global_size,
392
+ process_group=process_group,
393
+ init_rrefs=init_rrefs
394
+ )
395
+
396
+ def state_dict_hook(module, destination, prefix, local_metadata):
397
+ """
398
+ Hook to add ShardedTensor to Module's ``state_dict``. Needs to be
399
+ registered to the Module using
400
+ :meth:`torch.nn.Module._register_state_dict_hook`.
401
+ """
402
+ for submodule_name, submodule in module.named_modules():
403
+ for attr_name, attr in submodule.__dict__.items():
404
+ if isinstance(attr, ShardedTensor):
405
+ mod_prefix = prefix + submodule_name
406
+ key = mod_prefix + ('.' if mod_prefix else '') + attr_name
407
+ destination[key] = attr
408
+
409
+ def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
410
+ """
411
+ Pre-load state dict hook to add ShardedTensor to the module.
412
+ """
413
+ for submodule_name, submodule in module.named_modules():
414
+ for attr_name in submodule.__dict__.keys():
415
+ mod_prefix = prefix + submodule_name
416
+ key = mod_prefix + ('.' if mod_prefix else '') + attr_name
417
+ if key in state_dict:
418
+ if isinstance(state_dict[key], ShardedTensor):
419
+ setattr(submodule, attr_name, state_dict[key])
420
+
421
+ def custom_sharded_op_impl(func):
422
+ """
423
+ Provides a way for users to write their own custom sharded operator. This
424
+ can be used to override existing ShardedTensor operators or write a new
425
+ one not supported by ShardedTensor. If the operator in question is covered
426
+ by ``__torch_function__`` dispatch and has a ShardedTensor as any of its
427
+ parameters, the function provided will be invoked for that operator.
428
+
429
+ Example::
430
+ >>> # xdoctest: +SKIP
431
+ >>> @custom_sharded_op_impl(torch.nn.functional.linear)
432
+ >>> def my_custom_sharded_linear(types, args, kwargs, process_group):
433
+ >>> ...
434
+ >>> # xdoctest: +SKIP("Undefined variables")
435
+ >>> input = torch.rand(10, 32)
436
+ >>> weight = sharded_tensor.rand(32, 16)
437
+ >>> bias = torch.rand(16)
438
+ >>> # This will call 'my_custom_sharded_linear'
439
+ >>> torch.nn.functional.linear(input, weight, bias)
440
+
441
+ The types, args and kwargs parameters are the same parameters that are
442
+ passed to ``__torch_function__`` dispatch API
443
+ (https://pytorch.org/docs/stable/notes/extending.html#extending-torch).
444
+ There is an additional ``process_group`` parameter which is the
445
+ process_group used for the ShardedTensor and can be used by
446
+ implementations for communications within a sharded implementation.
447
+
448
+ Args:
449
+ func(Callable): Torch function for which we want to provide a sharded
450
+ implementation (ex: torch.nn.functional.linear)
451
+ """
452
+ return functools.partial(
453
+ _decorator_func,
454
+ op=func,
455
+ op_table=_CUSTOM_SHARDED_OPS
456
+ )
457
+
458
+ def _sharded_op_impl(func):
459
+ """
460
+ Decorator to register a default sharded op.
461
+ """
462
+ return functools.partial(
463
+ _decorator_func,
464
+ op=func,
465
+ op_table=_SHARDED_OPS
466
+ )
467
+
468
+ # Import all builtin sharded ops
469
+ from ._ops import * # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc ADDED
Binary file (430 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc ADDED
Binary file (8.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed._shard.sharded_tensor as sharded_tensor
3
+ from torch.distributed._shard.sharded_tensor import (
4
+ _sharded_op_impl,
5
+ )
6
+
7
+ def validate_param(param, param_name):
8
+ if param is None:
9
+ raise ValueError(f"param: {param_name} shouldn't be None!")
10
+
11
+ @_sharded_op_impl(torch.nn.init.uniform_)
12
+ def uniform_(types, args=(), kwargs=None, pg=None):
13
+ r"""
14
+ Fills the Tensor in tensor.local_shards with values drawn from the uniform
15
+ distribution :math:`\mathcal{U}(a, b)`.
16
+ Args:
17
+ tensor: tensor sharded across devices
18
+ a: the lower bound of the uniform distribution
19
+ b: the upper bound of the uniform distribution
20
+ """
21
+ validate_param(kwargs, "kwargs")
22
+ sharded_tensor = kwargs["tensor"]
23
+ validate_param(sharded_tensor, "tensor")
24
+ a = kwargs['a']
25
+ validate_param(a, "a")
26
+ b = kwargs['b']
27
+ validate_param(b, "b")
28
+
29
+ for shard in sharded_tensor.local_shards():
30
+ torch.nn.init.uniform_(shard.tensor, a=a, b=b)
31
+ return sharded_tensor
32
+
33
+ @_sharded_op_impl(torch.nn.init.normal_)
34
+ def normal_(types, args=(), kwargs=None, pg=None):
35
+ r"""
36
+ Fills the Tensors in tensor.local_shards with values drawn from the normal
37
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
38
+ Args:
39
+ tensor: tensor sharded across devices
40
+ mean: the mean of the normal distribution
41
+ std: the standard deviation of the normal distribution
42
+ """
43
+ validate_param(kwargs, "kwargs")
44
+ sharded_tensor = kwargs["tensor"]
45
+ validate_param(sharded_tensor, "tensor")
46
+ mean = kwargs['mean']
47
+ validate_param(mean, "mean")
48
+ std = kwargs['std']
49
+ validate_param(std, "std")
50
+
51
+ for shard in sharded_tensor.local_shards():
52
+ torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
53
+ return sharded_tensor
54
+
55
+ @_sharded_op_impl(torch.nn.init.kaiming_uniform_)
56
+ def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
57
+ r"""
58
+ Fills the Tensors in tensor.local_shards with values according to the method
59
+ described in `Delving deep into rectifiers: Surpassing human-level
60
+ performance on ImageNet classification` - He, K. et al. (2015), using a
61
+ uniform distribution. The resulting tensor will have values sampled from
62
+ :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
63
+ .. math::
64
+ \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
65
+ Also known as He initialization.
66
+ Args:
67
+ tensor: tensor sharded across devices
68
+ a: the negative slope of the rectifier used after this layer (only
69
+ used with ``'leaky_relu'``)
70
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
71
+ preserves the magnitude of the variance of the weights in the
72
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
73
+ backwards pass.
74
+ nonlinearity: the non-linear function (`nn.functional` name),
75
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
76
+ """
77
+ validate_param(kwargs, "kwargs")
78
+ sharded_tensor = kwargs["tensor"]
79
+ validate_param(sharded_tensor, "tensor")
80
+ a = kwargs['a']
81
+ validate_param(a, "a")
82
+ mode = kwargs['mode']
83
+ validate_param(mode, "mode")
84
+ nonlinearity = kwargs['nonlinearity']
85
+ validate_param(nonlinearity, "nonlinearity")
86
+
87
+ for shard in sharded_tensor.local_shards():
88
+ torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
89
+ return sharded_tensor
90
+
91
+ @_sharded_op_impl(torch.nn.init.constant_)
92
+ def constant_(types, args=(), kwargs=None, pg=None):
93
+ r"""
94
+ Fills the input ShardedTensor with the value \text{val}val.
95
+ Args:
96
+ tensor: tensor sharded across devices
97
+ val: the value to fill the tensor with
98
+ """
99
+ validate_param(kwargs, "kwargs")
100
+ sharded_tensor = kwargs["tensor"]
101
+ validate_param(sharded_tensor, "tensor")
102
+ val = kwargs['val']
103
+ validate_param(val, "val")
104
+ for shard in sharded_tensor.local_shards():
105
+ torch.nn.init.constant_(shard.tensor, val=val)
106
+ return sharded_tensor
107
+
108
+ tensor_like_creation_op_map = {
109
+ torch.full_like: sharded_tensor.full,
110
+ torch.empty_like: sharded_tensor.empty,
111
+ torch.zeros_like: sharded_tensor.zeros,
112
+ torch.ones_like: sharded_tensor.ones,
113
+ torch.rand_like: sharded_tensor.rand,
114
+ torch.randn_like: sharded_tensor.randn,
115
+ }
116
+
117
+ # tensor ops that behave the same as the default tensor
118
+ def register_tensor_creation_op(op):
119
+ @_sharded_op_impl(op)
120
+ def tensor_creation_op(types, args=(), kwargs=None, pg=None):
121
+ """
122
+ Handles ``__torch_function__`` dispatch for tensor creation ops that
123
+ takes a ShardedTensor as argument, such as ``torch.zeros_like`` or
124
+ ``torch.full_like``.
125
+ """
126
+ creation_op = tensor_like_creation_op_map.get(op, None)
127
+ if creation_op is None:
128
+ raise RuntimeError(f"Tensor creation {op} not supported!")
129
+ if kwargs is None:
130
+ kwargs = {}
131
+
132
+ st = args[0]
133
+
134
+ new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator]
135
+ return new_st
136
+
137
+
138
+ register_tensor_creation_op(torch.full_like)
139
+ register_tensor_creation_op(torch.empty_like)
140
+ register_tensor_creation_op(torch.zeros_like)
141
+ register_tensor_creation_op(torch.ones_like)
142
+ register_tensor_creation_op(torch.rand_like)
143
+ register_tensor_creation_op(torch.randn_like)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py ADDED
@@ -0,0 +1,1253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations # type: ignore[attr-defined]
2
+ from dataclasses import dataclass
3
+ from typing import (
4
+ Callable,
5
+ Dict,
6
+ List,
7
+ Optional,
8
+ Sequence,
9
+ Tuple,
10
+ cast,
11
+ )
12
+ import copy
13
+ import warnings
14
+ from functools import reduce
15
+ import weakref
16
+
17
+ import threading
18
+ import torch
19
+ import torch.distributed as dist
20
+ from torch.distributed import rpc
21
+ from torch.distributed import distributed_c10d
22
+ from torch.distributed._shard.metadata import ShardMetadata
23
+ import torch.distributed._shard.sharding_spec as shard_spec
24
+ from torch.distributed._shard.sharding_spec.api import (
25
+ _dispatch_custom_op,
26
+ _has_custom_op,
27
+ )
28
+ from torch.distributed._shard.sharding_spec._internals import (
29
+ check_tensor,
30
+ validate_non_overlapping_shards_metadata,
31
+ )
32
+ from torch.distributed._shard._utils import (
33
+ DEPRECATE_MSG,
34
+ )
35
+
36
+ from .metadata import TensorProperties, ShardedTensorMetadata
37
+ from .shard import Shard
38
+ from .reshard import reshuffle_local_shard, reshard_local_shard
39
+ from .utils import (
40
+ _flatten_tensor_size,
41
+ _parse_and_validate_remote_device,
42
+ _validate_output_tensor_for_gather,
43
+ build_metadata_from_local_shards,
44
+ build_global_metadata
45
+ )
46
+ from torch.distributed.remote_device import _remote_device
47
+ from torch.utils import _pytree as pytree
48
+ import operator
49
+
50
+ # Tracking for sharded tensor objects.
51
+ _sharded_tensor_lock = threading.Lock()
52
+ _sharded_tensor_current_id = 0
53
+ _sharded_tensor_map: Dict[int, weakref.ReferenceType[ShardedTensor]] = {}
54
+
55
+ # Default sharded ops
56
+ _SHARDED_OPS: Dict[Callable, Callable] = {}
57
+
58
+ # Customized user ops
59
+ _CUSTOM_SHARDED_OPS: Dict[Callable, Callable] = {}
60
+
61
+ def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
62
+ with _sharded_tensor_lock:
63
+ if sharded_tensor_id not in _sharded_tensor_map:
64
+ raise RuntimeError(
65
+ f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
66
+
67
+ sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
68
+ if sharded_tensor is None:
69
+ raise RuntimeError('ShardedTensor weakref has been deallocated')
70
+ else:
71
+ sharded_tensor._register_remote_shards(rrefs, rpc_rank)
72
+
73
+ class ShardedTensorBase(torch.Tensor):
74
+ _sharding_spec: shard_spec.ShardingSpec
75
+ _metadata: ShardedTensorMetadata
76
+ _local_shards: List[Shard]
77
+
78
+ def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
79
+ # Use __new__ to construct a wrapper tensor, for recording tensor
80
+ # properties and logging purposes.
81
+ torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor")
82
+
83
+ # check sharding spec and build sharded tensor metadata
84
+ if not isinstance(sharding_spec, shard_spec.ShardingSpec):
85
+ raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}")
86
+
87
+ sizes = _flatten_tensor_size(size)
88
+ dtype = kwargs["dtype"]
89
+ layout = kwargs["layout"]
90
+ pin_memory = kwargs["pin_memory"]
91
+ requires_grad = kwargs["requires_grad"]
92
+
93
+ if dtype is None:
94
+ dtype = torch.get_default_dtype()
95
+
96
+ tensor_properties = TensorProperties(
97
+ dtype, layout, requires_grad, pin_memory=pin_memory
98
+ )
99
+ sharded_tensor_metadata = sharding_spec.build_metadata(
100
+ sizes, tensor_properties=tensor_properties
101
+ )
102
+
103
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
104
+ cls,
105
+ sizes,
106
+ dtype=dtype,
107
+ layout=layout,
108
+ pin_memory=pin_memory,
109
+ requires_grad=requires_grad,
110
+ )
111
+ # set sharding spec
112
+ r._sharding_spec = sharding_spec
113
+ # set metadata
114
+ r._metadata = sharded_tensor_metadata
115
+ # set local shards
116
+ r._local_shards = []
117
+ return r
118
+
119
+ def metadata(self) -> ShardedTensorMetadata:
120
+ """
121
+ Returns a :class:`ShardedTensorMetadata` object corresponding to the
122
+ metadata for the entire tensor.
123
+ """
124
+ return self._metadata
125
+
126
+ def local_shards(self) -> List[Shard]:
127
+ """
128
+ Returns a list of :class:`Shard' corresponding to the
129
+ local shards for this rank. Returns an empty list if the current rank
130
+ does not host any shards for this Tensor.
131
+ """
132
+ return self._local_shards
133
+
134
+ @classmethod
135
+ def _init_from_local_shards_and_global_metadata(
136
+ cls,
137
+ local_shards: List[Shard],
138
+ sharded_tensor_metadata: ShardedTensorMetadata,
139
+ sharding_spec=None,
140
+ ) -> ShardedTensorBase:
141
+ """
142
+ Initialize a ShardedTensorBase with local shards and a global
143
+ ShardedTensorMetadata built on each rank.
144
+ Warning: This API is experimental and subject to change. It does
145
+ not do cross rank validations, and fully rely on the user
146
+ for the correctness of sharded_tensor_metadata on each rank
147
+ """
148
+ shards_metadata = sharded_tensor_metadata.shards_metadata
149
+ tensor_properties = sharded_tensor_metadata.tensor_properties
150
+
151
+ if len(shards_metadata) == 0:
152
+ raise ValueError("shards_metadata must not be empty!")
153
+
154
+ if tensor_properties.layout != torch.strided:
155
+ raise ValueError("Only torch.strided layout is currently supported")
156
+
157
+ if sharding_spec is None:
158
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)
159
+ else:
160
+ spec = sharding_spec
161
+
162
+ sharded_tensor_base = ShardedTensorBase.__new__(
163
+ ShardedTensor,
164
+ spec,
165
+ sharded_tensor_metadata.size,
166
+ dtype=tensor_properties.dtype,
167
+ layout=tensor_properties.layout,
168
+ pin_memory=tensor_properties.pin_memory,
169
+ requires_grad=tensor_properties.requires_grad,
170
+ )
171
+
172
+ # check if shards_metadata have overlap shards
173
+ validate_non_overlapping_shards_metadata(shards_metadata)
174
+
175
+ # check if the shards_metadata is compatible with overall size of the sharded tensor.
176
+ check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
177
+
178
+ # done validation, add local_shards
179
+ sharded_tensor_base._local_shards = local_shards
180
+ return sharded_tensor_base
181
+
182
+ @classmethod
183
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
184
+ raise RuntimeError(
185
+ f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} "
186
+ "but the there is no custom __torch_dispatch__ implementation for it."
187
+ )
188
+
189
+ class ShardedTensor(ShardedTensorBase):
190
+ """
191
+ ShardedTensor is an torch.Tensor subclass to represent Tensors that are sharded
192
+ across multiple devices and multiple processes.
193
+
194
+ ShardedTensor is initialized in an SPMD like fashion where each rank
195
+ initializes the ShardedTensor. The ShardedTensor object on each rank
196
+ then only stores the local shard for the Tensor and provides global
197
+ metadata for all the shards.
198
+
199
+ ShardedTensor doesn't provide any Tensor like operations but is a wrapper
200
+ providing the Tensor representing the local shard and the global metadata.
201
+ Using these, users can build their custom distributed._sharded computations
202
+ on top of this primitive. The local shards are all initialized using the
203
+ create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
204
+ torch.empty
205
+
206
+ Args:
207
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
208
+ describing how to shard the Tensor.
209
+ size (int...): a sequence of integers defining the shape of the output
210
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
211
+
212
+ Keyword args:
213
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
214
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
215
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
216
+ Default: ``torch.strided``.
217
+ requires_grad (bool, optional): If autograd should record operations on the
218
+ returned tensor. Default: ``False``.
219
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
220
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
221
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
222
+ returned Tensor. Default: ``torch.contiguous_format``.
223
+ init_rrefs (bool, optional): Whether or not to initialize
224
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
225
+ Need to initialize the RPC Framework if specified as ``True``.
226
+ Default: ``False``.
227
+
228
+ .. note:: ShardedTensor uses collectives to do various operations, i.e. it
229
+ uses all_gather to do cross rank validations. For NCCL-based process
230
+ groups, internal tensor representations of objects must be moved to the
231
+ GPU device before communication takes place. In this case, the device
232
+ used is given by ``torch.cuda.current_device()`` and it is the user's
233
+ responsibility to ensure that this is set so that each rank has an
234
+ individual GPU, via ``torch.cuda.set_device()``
235
+
236
+ """
237
+ def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
238
+ self = super().__new__(cls, sharding_spec, *size, **kwargs)
239
+ return self
240
+
241
+ def __init__(
242
+ self,
243
+ sharding_spec: shard_spec.ShardingSpec,
244
+ *size,
245
+ dtype=None,
246
+ layout=torch.strided,
247
+ requires_grad=False,
248
+ pin_memory=False,
249
+ memory_format=torch.contiguous_format,
250
+ process_group=None,
251
+ init_rrefs=False,
252
+ ):
253
+ # prepare initialization, initialize fields like
254
+ # _process_group, _local_shards, etc.
255
+ self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
256
+
257
+ if layout != torch.strided:
258
+ raise ValueError('Only torch.strided layout is currently supported')
259
+
260
+ if memory_format != torch.contiguous_format:
261
+ raise ValueError('Only torch.contiguous_format memory_format is currently supported')
262
+
263
+ self._metadata.tensor_properties.memory_format = memory_format
264
+
265
+ current_rank = dist.get_rank(self._process_group)
266
+
267
+ for shard_metadata in self._metadata.shards_metadata:
268
+ rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
269
+ if rank == current_rank:
270
+ local_tensor = _create_tensor_from_params(
271
+ shard_metadata.shard_sizes,
272
+ local_device=device,
273
+ tensor_properties=self._metadata.tensor_properties
274
+ )
275
+ self._local_shards.append(Shard(local_tensor, shard_metadata))
276
+
277
+ # do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
278
+ self._post_init()
279
+
280
+ def _prepare_init(self, process_group=None, init_rrefs=False):
281
+ self._init_rrefs = init_rrefs
282
+ self._sharded_tensor_id = None
283
+
284
+ self._process_group = (
285
+ process_group
286
+ if process_group is not None
287
+ else distributed_c10d._get_default_group()
288
+ )
289
+
290
+ self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
291
+
292
+ def _post_init(self):
293
+ # Initialize RPC if available.
294
+ if self._init_rrefs:
295
+ with _sharded_tensor_lock:
296
+ global _sharded_tensor_current_id, _sharded_tensor_map
297
+ self._sharded_tensor_id = _sharded_tensor_current_id
298
+ _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
299
+ _sharded_tensor_current_id += 1
300
+
301
+ if not rpc._is_current_rpc_agent_set():
302
+ raise RuntimeError(
303
+ 'RPC Framework needs to be initialized using'
304
+ ' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
305
+ self._init_rpc()
306
+
307
+ def __del__(self):
308
+ # Clean up the global map.
309
+ with _sharded_tensor_lock:
310
+ global _sharded_tensor_current_id, _sharded_tensor_map
311
+ if (
312
+ hasattr(self, "_sharded_tensor_id")
313
+ and self._sharded_tensor_id in _sharded_tensor_map
314
+ ):
315
+ _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
316
+
317
+ def _init_rpc(self):
318
+ # Validate PG and RPC ranks match.
319
+ pg_rank = dist.get_rank()
320
+ rpc_rank = rpc.get_worker_info().id
321
+ if pg_rank != rpc_rank:
322
+ raise ValueError(
323
+ f'Default ProcessGroup and RPC ranks must be '
324
+ f'the same for ShardedTensor, found process group rank: '
325
+ f'{pg_rank} and RPC rank: {rpc_rank}'
326
+ )
327
+
328
+ self._remote_shards = {}
329
+
330
+ # Gather all the sharded tensor ids.
331
+ worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
332
+ rank_to_name = {}
333
+ name_to_rank = {}
334
+
335
+ for worker_info in worker_infos:
336
+ rank_to_name[worker_info.id] = worker_info.name
337
+ name_to_rank[worker_info.name] = worker_info.id
338
+
339
+ all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
340
+
341
+ # Share the local shards to the entire world.
342
+ futs = []
343
+ rpc_rank = rpc.get_worker_info().id
344
+ for rank in range(dist.get_world_size()):
345
+ # Skip self.
346
+ if rank == dist.get_rank():
347
+ continue
348
+
349
+ if len(self.local_shards()) != 0:
350
+ rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
351
+ fut = rpc.rpc_async(
352
+ rank,
353
+ _register_remote_shards,
354
+ args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
355
+ futs.append(fut)
356
+
357
+ torch.futures.wait_all(futs)
358
+
359
+ # Barrier for all RPCs to finish on all ranks.
360
+ rpc.api._all_gather(None)
361
+
362
+ def _get_preferred_device(self) -> torch.device:
363
+ """
364
+ Return the preferred device to be used when creating tensors for collectives.
365
+ This method takes into account the associated process group
366
+ """
367
+ if dist.get_backend(self._process_group) == dist.Backend.NCCL:
368
+ return torch.device(torch.cuda.current_device())
369
+ return torch.device("cpu")
370
+
371
+ def gather( # type: ignore[override]
372
+ self,
373
+ dst: int = 0,
374
+ out: Optional[torch.Tensor] = None,
375
+ enforce_dtype: bool = False,
376
+ dtype: Optional[torch.dtype] = None,
377
+ ) -> None:
378
+ """
379
+ Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
380
+ sharded tensor.
381
+
382
+ The API needs to be called on all ranks in SPMD fashion. All ranks should have
383
+ the same ``dst``. ``out`` should be a tensor of the same size as the overall
384
+ size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
385
+
386
+ Args:
387
+ dst(int): The rank where full tensor is constructed.
388
+ Default: 0
389
+ out (:class `torch.Tensor`, optional): The output full tensor.
390
+ Must to be provided ONLY on ``dst`` rank.
391
+ Default: ``None``
392
+ enforce_dtype (bool): Deprecated, please use dtype instead. Force the
393
+ gathered tensors to be the same type as input and output.
394
+ dtype (torch.dtype): Force the gathered tensors to be this dtype.
395
+ Default: ``None``
396
+ """
397
+ def shard_size(shard_md):
398
+ return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined]
399
+
400
+ if enforce_dtype:
401
+ warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
402
+
403
+ rank = dist.get_rank(self._process_group)
404
+ full_size = self.metadata().size
405
+ _validate_output_tensor_for_gather(rank, dst, full_size, out)
406
+
407
+ local_shards = self.local_shards()
408
+ world_size = dist.get_world_size(self._process_group)
409
+ rank_sizes = [0 for _ in range(world_size)]
410
+ max_rank_size = 0
411
+ shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {}
412
+ # collect sizes
413
+ for shard_md in self.metadata().shards_metadata:
414
+ shard_rank = cast(_remote_device, shard_md.placement).rank()
415
+ assert shard_rank is not None
416
+
417
+ shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank])
418
+ rank_sizes[shard_rank] += shard_size(shard_md)
419
+ max_rank_size = max(max_rank_size, rank_sizes[shard_rank])
420
+
421
+ gather_list: Optional[List[torch.Tensor]]
422
+ if rank == dst:
423
+ assert out is not None
424
+ if enforce_dtype:
425
+ # enforce_dtype is deprecated. Do it for backward compatibility.
426
+ dtype = out.dtype
427
+ # TODO make it as a view of out tensor
428
+ gather_list = [torch.empty((max_rank_size,), device=out.device, dtype=dtype) for _ in range(world_size)]
429
+ else:
430
+ gather_list = None
431
+
432
+ with torch.no_grad():
433
+ if enforce_dtype and len(local_shards) > 0:
434
+ # enforce_dtype is deprecated. Do it for backward compatibility.
435
+ dtype = local_shards[0].tensor.dtype
436
+ data = torch.empty(max_rank_size, device=self._get_preferred_device(), dtype=dtype)
437
+
438
+ for shard in local_shards:
439
+ src = shard.tensor.flatten()
440
+ if src.nelement() == 0 :
441
+ warnings.warn("Gathering a tensor with zero elements on rank " + str(rank))
442
+ return
443
+ shard_offset = shard_placement[shard.metadata][1]
444
+ data[shard_offset: shard_offset + src.numel()].copy_(src)
445
+
446
+ dist.gather(
447
+ tensor=data,
448
+ gather_list=gather_list,
449
+ dst=dst,
450
+ group=self._process_group,
451
+ )
452
+ if rank != dst:
453
+ return
454
+ # In _validate_output_tensor_for_gather, we raise if out == None and rank == dst
455
+ out = cast(torch.Tensor, out)
456
+ assert gather_list is not None
457
+
458
+ full_size = self.metadata().size
459
+ dims = len(full_size)
460
+ for shard_md in self.metadata().shards_metadata:
461
+ rank, rank_offset = shard_placement[shard_md]
462
+ tensor = gather_list[rank]
463
+ tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)]
464
+ tensor = tensor.view(shard_md.shard_sizes)
465
+
466
+ out_narrow_view = out
467
+ for dim in range(dims):
468
+ out_narrow_view = out_narrow_view.narrow(
469
+ dim,
470
+ shard_md.shard_offsets[dim],
471
+ shard_md.shard_sizes[dim],
472
+ )
473
+
474
+ out_narrow_view.copy_(tensor)
475
+
476
+ def cpu(
477
+ self,
478
+ memory_format=torch.preserve_format,
479
+ process_group=None
480
+ ) -> ShardedTensor:
481
+ """
482
+ Returns a copy of this object in CPU memory.
483
+
484
+ If this ShardedTensor is already on CPU memory, then no copy is
485
+ performed and original object is returned.
486
+
487
+ .. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might
488
+ need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo),
489
+ it is the user's responsiblity to explicitly pass in a new process_group that
490
+ is compatible with CPU.
491
+ """
492
+ # TODO: make this a __torch_function__ op once ShardedTensor becomes a
493
+ # torch.Tensor subclass, see https://github.com/pytorch/pytorch/issues/75402
494
+ if memory_format != torch.preserve_format and \
495
+ memory_format != torch.contiguous_format:
496
+ raise RuntimeError("Only `torch.contiguous_format` or "
497
+ "`torch.preserve_format` is supported!")
498
+ all_on_cpu = True
499
+ for meta in self.metadata().shards_metadata:
500
+ all_on_cpu &= (meta.placement.device().type == "cpu") # type: ignore[union-attr]
501
+
502
+ # if every shard is already on CPU, return the original object
503
+ if all_on_cpu:
504
+ return self
505
+
506
+ # if not, returns a copy of this object on CPU
507
+ list_shards: List[Shard] = []
508
+ # move all local shards to cpu, and change metadata
509
+ for shard in self._local_shards:
510
+ cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg]
511
+ metadata = copy.deepcopy(shard.metadata)
512
+ metadata.placement._device = torch.device("cpu") # type: ignore[union-attr]
513
+ list_shards.append(
514
+ Shard(cpu_tensor, metadata)
515
+ )
516
+
517
+ st_meta = copy.deepcopy(self.metadata())
518
+ for meta in st_meta.shards_metadata:
519
+ if meta.placement.device().type != "cpu": # type: ignore[union-attr]
520
+ meta.placement._device = torch.device("cpu") # type: ignore[union-attr]
521
+
522
+ pg = self._process_group if process_group is None else process_group
523
+ st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata(
524
+ list_shards,
525
+ sharded_tensor_metadata=st_meta,
526
+ process_group=pg,
527
+ init_rrefs=self._init_rrefs
528
+ )
529
+ return st_cpu
530
+
531
+ def cuda(
532
+ self,
533
+ device=None,
534
+ non_blocking=False,
535
+ memory_format=torch.preserve_format,
536
+ process_group=None
537
+ ) -> ShardedTensor:
538
+ """
539
+ Returns a copy of this object in CUDA memory, if the original ShardedTensor
540
+ is on CPU, we will move the local shard to the current GPU device of each
541
+ process in a SPMD fashion.
542
+ If this ShardedTensor is already on CUDA memory and local shards on each rank are
543
+ already on current device, we still returns a new ShardedTensor object with new
544
+ metadata, but no underlying data movements are performed.
545
+ .. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might
546
+ need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL),
547
+ it is the user's responsiblity to explicitly pass in a new process_group that
548
+ is compatible with GPU.
549
+ """
550
+ if memory_format != torch.preserve_format and \
551
+ memory_format != torch.contiguous_format:
552
+ raise RuntimeError("Only `torch.contiguous_format` or "
553
+ "`torch.preserve_format` is supported!")
554
+
555
+ if device is not None:
556
+ device = torch.device(device) if isinstance(device, str) else device
557
+ assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), \
558
+ '''Only device without device id (e.g. "cpu" or "cuda") is expected for ShardedTensor!'''
559
+
560
+ current_device = torch.device(torch.cuda.current_device())
561
+ # returns a copy of ShardedTensor on CUDA current device
562
+ list_shards: List[Shard] = []
563
+ # move all local shards to current device, and change metadata
564
+ # if local shards already on the current device, there's no
565
+ # real data movement, only the metadata are copied.
566
+ for shard in self._local_shards:
567
+ cuda_tensor = shard.tensor.cuda(
568
+ device=current_device,
569
+ non_blocking=non_blocking,
570
+ memory_format=memory_format
571
+ ) # type: ignore[call-arg]
572
+ metadata = copy.deepcopy(shard.metadata)
573
+ metadata.placement._device = current_device # type: ignore[union-attr]
574
+
575
+ list_shards.append(
576
+ Shard(cuda_tensor, metadata)
577
+ )
578
+
579
+ st_meta = copy.deepcopy(self.metadata())
580
+ for meta in st_meta.shards_metadata:
581
+ if meta.placement.device().type != "cuda": # type: ignore[union-attr]
582
+ meta.placement._device = current_device # type: ignore[union-attr]
583
+
584
+ pg = self._process_group if process_group is None else process_group
585
+ # we need to use `init_from_local_shards` to communicate between ranks
586
+ # and update the sharding spec/shards metadata.
587
+ st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata(
588
+ list_shards,
589
+ sharded_tensor_metadata=st_meta,
590
+ process_group=pg,
591
+ init_rrefs=self._init_rrefs
592
+ )
593
+ return st_cuda
594
+
595
+ def to(self, *args, **kwargs) -> ShardedTensor:
596
+ current_device: torch.device
597
+ if self._local_shards:
598
+ current_device = self._local_shards[0].tensor.device
599
+ elif self._process_group._get_backend_name() == "gloo":
600
+ current_device = torch.device("cpu")
601
+ else:
602
+ current_device = torch.device(torch.cuda.current_device())
603
+ current_dtype = self.dtype
604
+ device_to = current_device
605
+ dtype_to = current_dtype
606
+ if len(args) == 1:
607
+ if isinstance(args[0], torch.dtype):
608
+ dtype_to = args[0]
609
+ elif isinstance(args[0], torch.device):
610
+ device_to = args[0]
611
+ elif isinstance(args[0], (str, int)):
612
+ device_to = torch.device(args[0])
613
+ elif isinstance(args[0], torch.Tensor):
614
+ dtype_to = args[0].dtype
615
+ device_to = args[0].device
616
+ else:
617
+ raise RuntimeError(f"ShardedTensor.to() have wrong arguments: {args}")
618
+ elif len(args) == 2:
619
+ device_to, dtype_to = args
620
+ else:
621
+ dtype_to = kwargs.get("dtype", current_dtype)
622
+ device_to = kwargs.get("device", current_device)
623
+
624
+ device_to = torch.device(device_to) if isinstance(device_to, (str, int)) else device_to
625
+
626
+ if device_to.type == "cuda":
627
+ # if device_to set to cuda, set to current device even
628
+ # if user specify the device index.
629
+ current_idx = torch.cuda.current_device()
630
+ if device_to.index != current_idx:
631
+ warnings.warn("ShardedTensor.to only move tensor to its current device"
632
+ "If you want to put to different device, use `reshard` instead.")
633
+ device_to = torch.device(current_idx)
634
+
635
+ copy_tensor = kwargs.get("copy", False)
636
+ non_blocking = kwargs.get("non_blocking", False)
637
+ memory_format = kwargs.get("memory_format", torch.preserve_format)
638
+ process_group = kwargs.get("process_group", None)
639
+
640
+ if not copy_tensor and dtype_to == current_dtype and device_to == current_device:
641
+ # already have correct dtype and device, return itself
642
+ return self
643
+
644
+ # returns a copy of ShardedTensor on CUDA current device
645
+ list_shards: List[Shard] = []
646
+
647
+ for shard in self._local_shards:
648
+ new_tensor = shard.tensor.to( # type: ignore[call-overload]
649
+ device=device_to,
650
+ dtype=dtype_to,
651
+ non_blocking=non_blocking,
652
+ copy=copy_tensor,
653
+ memory_format=memory_format
654
+ )
655
+ metadata = copy.deepcopy(shard.metadata)
656
+ if metadata.placement is not None:
657
+ metadata.placement._device = device_to
658
+ list_shards.append(Shard(new_tensor, metadata))
659
+
660
+ # update metadata
661
+ st_meta = copy.deepcopy(self.metadata())
662
+ st_meta.tensor_properties.dtype = dtype_to
663
+ for meta in st_meta.shards_metadata:
664
+ meta.placement._device = device_to # type: ignore[union-attr]
665
+
666
+ pg = self._process_group if process_group is None else process_group
667
+ # we need to use `init_from_local_shards` to communicate between ranks
668
+ # and update the sharding spec/shards metadata.
669
+ st_to = ShardedTensor._init_from_local_shards_and_global_metadata(
670
+ list_shards,
671
+ sharded_tensor_metadata=st_meta,
672
+ process_group=pg,
673
+ init_rrefs=self._init_rrefs
674
+ )
675
+ return st_to
676
+
677
+
678
+ @classmethod
679
+ def _init_from_local_shards(
680
+ cls,
681
+ local_shards: List[Shard],
682
+ *global_size,
683
+ process_group=None,
684
+ init_rrefs=False,
685
+ ):
686
+ # STEP 1: Validate the Shardmetadatas locally
687
+ process_group = (
688
+ process_group
689
+ if process_group is not None
690
+ else distributed_c10d._get_default_group()
691
+ )
692
+ current_rank = dist.get_rank(process_group)
693
+ world_size = dist.get_world_size(process_group)
694
+
695
+ local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
696
+ global_tensor_size = _flatten_tensor_size(global_size)
697
+
698
+ if len(local_shards) > 0:
699
+ local_sharded_tensor_metadata = \
700
+ build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
701
+
702
+ # STEP 2. Validate metadata across ranks, and build a global sharded tensor
703
+ # metadata by gathering local ShardedTensorMetadata
704
+ gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
705
+ if world_size > 1:
706
+ gathered_metadatas = [None for _ in range(world_size)]
707
+
708
+ dist.all_gather_object(
709
+ gathered_metadatas,
710
+ local_sharded_tensor_metadata,
711
+ group=process_group
712
+ )
713
+ else:
714
+ gathered_metadatas = [local_sharded_tensor_metadata]
715
+
716
+ global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
717
+ tensor_properties = global_sharded_tensor_metadata.tensor_properties
718
+
719
+ # STEP 3: Validation done, create the actual ShardedTensor and populate fields
720
+ # prepare initialization
721
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(
722
+ global_sharded_tensor_metadata.shards_metadata
723
+ )
724
+ sharded_tensor = cls.__new__(cls,
725
+ spec,
726
+ global_sharded_tensor_metadata.size,
727
+ dtype=tensor_properties.dtype,
728
+ layout=tensor_properties.layout,
729
+ pin_memory=tensor_properties.pin_memory,
730
+ requires_grad=tensor_properties.requires_grad)
731
+ sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
732
+
733
+ # attach local_shards to the ShardedTensor created
734
+ sharded_tensor._local_shards = local_shards
735
+
736
+ # run post initialization, i.e. map registration, rpc initialization
737
+ sharded_tensor._post_init()
738
+ return sharded_tensor
739
+
740
+ @classmethod
741
+ def _init_from_local_tensor(
742
+ cls,
743
+ local_tensor: torch.Tensor,
744
+ sharding_spec: shard_spec.ShardingSpec,
745
+ *global_size: Sequence[int],
746
+ process_group: Optional[dist.ProcessGroup] = None,
747
+ init_rrefs=False,
748
+ ) -> ShardedTensor:
749
+ """
750
+ Initialize a ShardedTensor given only one local tensor, global sharded tensor
751
+ size and sharding spec on each rank.
752
+
753
+ Args:
754
+ local_tensor (Tensor): Single tensor of local shard stored in each rank.
755
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
756
+ The specification describing how to shard the Tensor.
757
+ global_size (Sequence[int]): Size of the sharded tensor.
758
+ process_group (ProcessGroup, optional): The process group to aggregate on.
759
+ Default: None
760
+ init_rrefs (bool, optional): Whether or not to initialize
761
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
762
+ Need to initialize the RPC Framework if specified as ``True``.
763
+ Default: ``False``.
764
+
765
+ Returns:
766
+ A :class:`ShardedTensor` sharded based on the given sharding_spec with local
767
+ tensor stored in the current rank.
768
+
769
+ Examples:
770
+ >>> # xdoctest: +SKIP
771
+ >>> # All tensors below are of torch.int64 type.
772
+ >>> # We have 2 process groups, 2 ranks.
773
+ >>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
774
+ >>> local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2]))
775
+ >>> local_tensor
776
+ tensor([[1, 2, 3, 4]]) # Rank 0
777
+ tensor([[3, 4, 5, 6]]) # Rank 1
778
+ >>> sharding_dim = 0
779
+ >>> sharding_spec = ChunkShardingSpec(
780
+ dim=sharding_dim,
781
+ placements=[
782
+ "rank:0/cuda:0",
783
+ "rank:1/cuda:1",
784
+ ],
785
+ )
786
+ >>> st = ShardedTensor._init_from_local_tensor(local_tensor, sharding_spec, [2, 4])
787
+ >>> st
788
+ ShardedTensor(
789
+ ShardedTensorMetadata(
790
+ shards_metadata=[
791
+ ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1, 4], placement=rank:0/cuda:0),
792
+ ShardMetadata(shard_offsets=[1, 0], shard_sizes=[1, 4], placement=rank:1/cuda:1),
793
+ ],
794
+ size=torch.Size([2, 4])
795
+ )
796
+ >>> st.local_tensor()
797
+ tensor([1, 2, 3, 4]) # Rank 0
798
+ tensor([3, 4, 5, 6]) # Rank 1
799
+
800
+ Warning: This API is experimental and subject to change. It lacks of a fully across
801
+ rank validations, and we only validate the local shard on the current rank.
802
+ We fully rely on the user to ensure local tensor is sharded based on the
803
+ sharding spec.
804
+ """
805
+ warnings.warn(DEPRECATE_MSG)
806
+
807
+ if not local_tensor.is_contiguous():
808
+ raise ValueError('local_tensor is not a contiguous Tensor.')
809
+
810
+ global_tensor_size = _flatten_tensor_size(global_size)
811
+ tensor_properties = TensorProperties(
812
+ dtype=local_tensor.dtype,
813
+ layout=local_tensor.layout,
814
+ requires_grad=local_tensor.requires_grad,
815
+ memory_format=torch.contiguous_format,
816
+ pin_memory=local_tensor.is_pinned())
817
+ sharded_tensor_metadata = sharding_spec.build_metadata(
818
+ global_tensor_size,
819
+ tensor_properties
820
+ )
821
+
822
+ process_group = (
823
+ process_group
824
+ if process_group is not None
825
+ else distributed_c10d._get_default_group()
826
+ )
827
+ current_rank = dist.get_rank(process_group)
828
+
829
+ local_shards: List[Shard] = []
830
+ for shard_metadata in sharded_tensor_metadata.shards_metadata:
831
+ rank, device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
832
+ if rank == current_rank:
833
+ local_shards.append(Shard(local_tensor, shard_metadata))
834
+
835
+ # TODO: figure out what the API should behave when some rank have no shard
836
+ # see https://github.com/pytorch/pytorch/issues/7313
837
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
838
+ local_shards,
839
+ sharded_tensor_metadata,
840
+ process_group=process_group,
841
+ init_rrefs=init_rrefs,
842
+ sharding_spec=sharding_spec,
843
+ )
844
+
845
+ @classmethod
846
+ def _init_from_local_shards_and_global_metadata( # type: ignore[override]
847
+ cls,
848
+ local_shards: List[Shard],
849
+ sharded_tensor_metadata: ShardedTensorMetadata,
850
+ process_group=None,
851
+ init_rrefs=False,
852
+ sharding_spec=None,
853
+ ) -> ShardedTensor:
854
+ """
855
+ Initialize a ShardedTensor with local shards and a global
856
+ ShardedTensorMetadata built on each rank.
857
+
858
+ Warning: This API is experimental and subject to change. It does
859
+ not do cross rank validations, and fully rely on the user
860
+ for the correctness of sharded_tensor_metadata on each rank
861
+ """
862
+ process_group = (
863
+ process_group
864
+ if process_group is not None
865
+ else distributed_c10d._get_default_group()
866
+ )
867
+ current_rank = dist.get_rank(process_group)
868
+
869
+ shards_metadata = sharded_tensor_metadata.shards_metadata
870
+
871
+ local_shard_metadatas = []
872
+
873
+ # collect local shard metadatas from the global sharded_tensor_metadata
874
+ for shard_metadata in shards_metadata: # type: ignore[attr-defined]
875
+ rank, local_device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
876
+
877
+ if current_rank == rank:
878
+ local_shard_metadatas.append(shard_metadata)
879
+
880
+ if len(local_shards) != len(local_shard_metadatas):
881
+ raise RuntimeError(
882
+ f'Number of local shards ({len(local_shards)}) does not match number of local '
883
+ f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
884
+ f'on rank ({current_rank}) '
885
+ )
886
+
887
+ shards_metadata = sharded_tensor_metadata.shards_metadata
888
+ tensor_properties = sharded_tensor_metadata.tensor_properties
889
+
890
+ if len(shards_metadata) == 0:
891
+ raise ValueError("shards_metadata must not be empty!")
892
+
893
+ if tensor_properties.layout != torch.strided:
894
+ raise ValueError("Only torch.strided layout is currently supported")
895
+
896
+ if sharding_spec is None:
897
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)
898
+ else:
899
+ spec = sharding_spec
900
+
901
+ sharded_tensor = ShardedTensor.__new__(
902
+ ShardedTensor,
903
+ spec,
904
+ sharded_tensor_metadata.size,
905
+ dtype=tensor_properties.dtype,
906
+ layout=tensor_properties.layout,
907
+ pin_memory=tensor_properties.pin_memory,
908
+ requires_grad=tensor_properties.requires_grad,
909
+ )
910
+
911
+ def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
912
+ tensor_property_or_metadata = (
913
+ "tensor property" if is_property else "local ShardMetadata"
914
+ )
915
+ if expected != actual:
916
+ raise ValueError(
917
+ f"Local shards' tensor {prop_name} property is incompatible with "
918
+ f"{tensor_property_or_metadata} on rank {rank}: "
919
+ f"{tensor_property_or_metadata} {prop_name}={expected}, "
920
+ f"local shard tensor {prop_name}={actual}."
921
+ )
922
+
923
+ for shard in local_shards:
924
+ shard_meta = shard.metadata
925
+ local_shard_tensor = shard.tensor
926
+ placement = shard_meta.placement
927
+ assert placement is not None, "Must specify placement for `Shard`!"
928
+ rank = placement.rank()
929
+ local_device = placement.device()
930
+
931
+ _raise_if_mismatch(
932
+ tensor_properties.layout,
933
+ local_shard_tensor.layout,
934
+ "layout",
935
+ rank,
936
+ True,
937
+ )
938
+ if not local_shard_tensor.is_contiguous():
939
+ raise ValueError(
940
+ "Only torch.contiguous_format memory_format is currently supported"
941
+ )
942
+
943
+ _raise_if_mismatch(
944
+ shard_meta.shard_sizes,
945
+ list(local_shard_tensor.size()),
946
+ "size",
947
+ rank,
948
+ )
949
+ _raise_if_mismatch(
950
+ tensor_properties.pin_memory,
951
+ local_shard_tensor.is_pinned(),
952
+ "pin_memory",
953
+ rank,
954
+ True,
955
+ )
956
+ _raise_if_mismatch(local_device, local_shard_tensor.device, "device", rank)
957
+ _raise_if_mismatch(
958
+ tensor_properties.dtype,
959
+ local_shard_tensor.dtype,
960
+ "dtype",
961
+ rank,
962
+ True,
963
+ )
964
+ _raise_if_mismatch(
965
+ tensor_properties.requires_grad,
966
+ local_shard_tensor.requires_grad,
967
+ "requires_grad",
968
+ rank,
969
+ True,
970
+ )
971
+
972
+ # check if shards_metadata have overlap shards
973
+ validate_non_overlapping_shards_metadata(shards_metadata)
974
+
975
+ # check if the shards_metadata is compatible with overall size of the sharded tensor.
976
+ check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
977
+
978
+ # done validation, add local_shards
979
+ sharded_tensor._local_shards = local_shards
980
+ sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
981
+
982
+ # run post initialization, i.e. map registration, rpc initialization
983
+ sharded_tensor._post_init()
984
+ return sharded_tensor
985
+
986
+ def sharding_spec(self) -> shard_spec.ShardingSpec:
987
+ """
988
+ Returns the ShardingSpec for the tensor.
989
+ """
990
+ return self._sharding_spec
991
+
992
+ def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor:
993
+ """
994
+ Reshard a sharded tensor given the ``resharding_spec``. For now, we only support
995
+ single local shard.
996
+
997
+ If ``resharding_spec`` is same as the original one, this becomes a no-op.
998
+ If only ``resharding_spec`` shares the same sharding dim with the original one,
999
+ we swap local shards directly.
1000
+ For more generic cases, we merge different shards across different ranks and split
1001
+ the local shards based on the ``resharding_spec`` via `all_to_all` collective API.
1002
+
1003
+ Args:
1004
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
1005
+ specification describing how the tensor is sharded.
1006
+
1007
+ Returns:
1008
+ A :class:`ShardedTensor` object whose local shards are resharded.
1009
+
1010
+ Examples:
1011
+ >>> # xdoctest: +SKIP
1012
+ >>> # We have 2 process groups, 2 ranks.
1013
+ >>> tensor = torch.arange(4, dtype=torch.int64) + 1 + 2 * rank
1014
+ >>> tensor = torch.stack([tensor, tensor])
1015
+ >>> tensor
1016
+ tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) # Rank 0
1017
+ tensor([[3, 4, 5, 6], [3, 4, 5, 6]]) # Rank 1
1018
+ tensor([[5, 6, 7, 8], [5, 6, 7, 8]]) # Rank 2
1019
+ tensor([[7, 8, 9, 10], [7, 8, 9, 10]]) # Rank 3
1020
+ >>> sharding_dim = 0
1021
+ >>> spec = ChunkShardingSpec(
1022
+ dim=sharding_dim,
1023
+ placements=[
1024
+ "rank:0/cuda:0",
1025
+ "rank:1/cuda:1",
1026
+ "rank:2/cuda:2",
1027
+ "rank:3/cuda:3",
1028
+ ],
1029
+ )
1030
+ >>> current_offsets = [0] * 2
1031
+ >>> current_offsets[0] = rank * 2
1032
+ >>> shard_metadata = ShardMetadata(
1033
+ shard_offsets=copy.deepcopy(current_offsets),
1034
+ shard_sizes=tensor.size(),
1035
+ placement=spec.placements[rank],
1036
+ )
1037
+ >>> local_shards = [
1038
+ Shard(
1039
+ tensor=tensor,
1040
+ metadata=shard_metadata,
1041
+ )
1042
+ ]
1043
+ >>> st = ShardedTensor._init_from_local_shards(local_shards, tensor.size())
1044
+ >>> sharding_dim = 1
1045
+ >>> resharding_spec = ChunkShardingSpec(
1046
+ dim=sharding_dim,
1047
+ placements=[
1048
+ "rank:0/cuda:0",
1049
+ "rank:1/cuda:1",
1050
+ "rank:2/cuda:2",
1051
+ "rank:3/cuda:3",
1052
+ ],
1053
+ )
1054
+ >>> st.reshard(resharding_spec)
1055
+ >>> tensor = st.local_shards()[0].tensor
1056
+ >>> tensor
1057
+ tensor([[1], [1], [3], [3], [5], [5], [7], [7]]) # Rank 0
1058
+ tensor([[2], [2], [4], [4], [6], [6], [8], [8]]) # Rank 1
1059
+ tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2
1060
+ tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3
1061
+ """
1062
+ warnings.warn(DEPRECATE_MSG)
1063
+
1064
+ if (
1065
+ not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or
1066
+ not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec)
1067
+ ):
1068
+ raise NotImplementedError("Only ChunkShardingSpec supported for reshard.")
1069
+ if (len(self.local_shards()) != 1):
1070
+ raise NotImplementedError("Only single local shard supported for reshard.")
1071
+
1072
+ if self._sharding_spec.dim == resharding_spec.dim: # type: ignore[attr-defined]
1073
+ if self._sharding_spec.placements == resharding_spec.placements: # type: ignore[attr-defined]
1074
+ return self
1075
+ else:
1076
+ local_shards, shards_metadata = reshuffle_local_shard(
1077
+ self.local_tensor(),
1078
+ self.size(), # type: ignore[arg-type]
1079
+ self._sharding_spec,
1080
+ resharding_spec,
1081
+ self._process_group,
1082
+ )
1083
+ else:
1084
+ local_shards, shards_metadata = reshard_local_shard(
1085
+ self.local_tensor(),
1086
+ self.size(), # type: ignore[arg-type]
1087
+ self._sharding_spec,
1088
+ resharding_spec,
1089
+ self._process_group,
1090
+ )
1091
+ self._local_shards = local_shards
1092
+ self._metadata.shards_metadata = shards_metadata
1093
+ self._sharding_spec = resharding_spec
1094
+ return self
1095
+
1096
+ def local_tensor(self) -> torch.Tensor:
1097
+ """
1098
+ Return local tensor for a sharded_tensor. For now we only support single local shard.
1099
+
1100
+ Returns:
1101
+ A :class:`torch.Tensor` of the local shard.
1102
+ """
1103
+ if len(self.local_shards()) != 1:
1104
+ raise NotImplementedError("Only single local shard is supported.")
1105
+ return self.local_shards()[0].tensor
1106
+
1107
+ @classmethod
1108
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
1109
+ def dispatch(st: ShardedTensor, func: Callable):
1110
+ # Dispatch to custom user provided op first if it exists.
1111
+ if func in _CUSTOM_SHARDED_OPS:
1112
+ return _CUSTOM_SHARDED_OPS[func](types, args, kwargs, st._process_group)
1113
+
1114
+ # Dispatch to custom sharding spec op if it has one.
1115
+ if _has_custom_op(st._sharding_spec, func):
1116
+ return _dispatch_custom_op(
1117
+ st._sharding_spec,
1118
+ func,
1119
+ types,
1120
+ args,
1121
+ kwargs,
1122
+ st._process_group
1123
+ )
1124
+
1125
+ if func in _SHARDED_OPS:
1126
+ return _SHARDED_OPS[func](types, args, kwargs, st._process_group)
1127
+
1128
+ raise RuntimeError(
1129
+ f"torch function '{func.__name__}', with args: {args} and "
1130
+ f"kwargs: {kwargs} not supported for ShardedTensor!")
1131
+
1132
+ warnings.warn(DEPRECATE_MSG)
1133
+ # Find ShardedTensor instance to get process_group and sharding_spec.
1134
+ st_instance = None
1135
+
1136
+ def find_sharded_tensor(e):
1137
+ nonlocal st_instance
1138
+ if st_instance is None and isinstance(e, ShardedTensor):
1139
+ st_instance = e
1140
+
1141
+ pytree.tree_map_(find_sharded_tensor, args)
1142
+ pytree.tree_map_(find_sharded_tensor, kwargs)
1143
+
1144
+ if st_instance is not None:
1145
+ return dispatch(st_instance, func)
1146
+
1147
+ raise RuntimeError(
1148
+ f"torch function '{func.__name__}', with args: {args} and "
1149
+ f"kwargs: {kwargs} not supported for ShardedTensor!")
1150
+
1151
+ def is_pinned(self) -> bool: # type: ignore[override]
1152
+ """
1153
+ Returns True if the sharded tensor (each local shard) resides in pinned memory.
1154
+ """
1155
+ return self._metadata.tensor_properties.pin_memory
1156
+
1157
+ def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
1158
+ self._remote_shards[rpc_rank] = remote_shards
1159
+
1160
+ def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
1161
+ """
1162
+ Returns a Dict[int, RRef] with keys being the RPC rank and values
1163
+ being RRefs to shards on that rank. Need to initialize the
1164
+ RPC framework for this functionality.
1165
+
1166
+ Raises an exception if ShardedTensor was created with ``init_rrefs=False``
1167
+ """
1168
+ if not self._init_rrefs:
1169
+ raise RuntimeError(
1170
+ 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
1171
+ )
1172
+ return self._remote_shards
1173
+
1174
+ def __hash__(self):
1175
+ return id(self)
1176
+
1177
+ def __repr__(self):
1178
+ return f'ShardedTensor({self._metadata})'
1179
+
1180
+ @dataclass
1181
+ class ProcessGroupState:
1182
+ """
1183
+ State for ser-de of process group
1184
+ """
1185
+ local_rank: int
1186
+ global_rank: int
1187
+ local_world_size: int
1188
+ global_world_size: int
1189
+
1190
+ def __getstate__(self):
1191
+ pg_state = ShardedTensor.ProcessGroupState(
1192
+ distributed_c10d.get_rank(self._process_group),
1193
+ distributed_c10d.get_rank(),
1194
+ distributed_c10d.get_world_size(self._process_group),
1195
+ distributed_c10d.get_world_size(),
1196
+ )
1197
+
1198
+ return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
1199
+
1200
+ def __setstate__(self, state):
1201
+ self._sharded_tensor_id = None
1202
+ if not distributed_c10d.is_initialized():
1203
+ raise RuntimeError(
1204
+ 'Need to initialize default process group using '
1205
+ '"init_process_group" before loading ShardedTensor')
1206
+
1207
+ self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
1208
+
1209
+ # Setup process group
1210
+ from torch.distributed._shard.api import _get_current_process_group
1211
+ self._process_group = _get_current_process_group()
1212
+
1213
+ # Validate process group.
1214
+ local_rank = distributed_c10d.get_rank(self._process_group)
1215
+ if pg_state.local_rank != local_rank:
1216
+ raise RuntimeError(
1217
+ f'Local rank at save time was {pg_state.local_rank}, but at '
1218
+ f'load time was {local_rank}')
1219
+
1220
+ global_rank = distributed_c10d.get_rank()
1221
+ if pg_state.global_rank != global_rank:
1222
+ raise RuntimeError(
1223
+ f'Global rank at save time was {pg_state.global_rank}, but at '
1224
+ f'load time was {global_rank}')
1225
+
1226
+ local_world_size = distributed_c10d.get_world_size(self._process_group)
1227
+ if pg_state.local_world_size != local_world_size:
1228
+ raise RuntimeError(
1229
+ f'Local world size at save time was {pg_state.local_world_size}, '
1230
+ f'but at load time was {local_world_size}')
1231
+
1232
+ global_world_size = distributed_c10d.get_world_size()
1233
+ if pg_state.global_world_size != global_world_size:
1234
+ raise RuntimeError(
1235
+ f'Global world size at save time was {pg_state.global_world_size}, '
1236
+ f'but at load time was {global_world_size}')
1237
+
1238
+ self._post_init()
1239
+
1240
+
1241
+ def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties):
1242
+ """ Helper to construct tensor from size, device and common params. """
1243
+ dtype = tensor_properties.dtype
1244
+ layout = tensor_properties.layout
1245
+ requires_grad = tensor_properties.requires_grad
1246
+ memory_format = tensor_properties.memory_format
1247
+ pin_memory = tensor_properties.pin_memory
1248
+
1249
+ return torch.empty(
1250
+ *size, dtype=dtype, layout=layout,
1251
+ device=local_device, requires_grad=requires_grad,
1252
+ memory_format=memory_format, pin_memory=pin_memory
1253
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ from typing import List, Tuple
11
+
12
+ from torch.distributed._shard.sharded_tensor.logging_handlers import (
13
+ _log_handlers,
14
+ )
15
+
16
+ __all__: List[str] = []
17
+
18
+
19
+ def _get_or_create_logger() -> logging.Logger:
20
+ logging_handler, log_handler_name = _get_logging_handler()
21
+ logger = logging.getLogger(f"sharding-spec-{log_handler_name}")
22
+ logger.setLevel(logging.DEBUG)
23
+ formatter = logging.Formatter(
24
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
25
+ )
26
+ logging_handler.setFormatter(formatter)
27
+ logger.propagate = False
28
+ logger.addHandler(logging_handler)
29
+ return logger
30
+
31
+
32
+ def _get_logging_handler(
33
+ destination: str = "default",
34
+ ) -> Tuple[logging.Handler, str]:
35
+ log_handler = _log_handlers[destination]
36
+ log_handler_name = type(log_handler).__name__
37
+ return (log_handler, log_handler_name)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ from typing import Dict, List
11
+
12
+ __all__: List[str] = []
13
+
14
+ _log_handlers: Dict[str, logging.Handler] = {
15
+ "default": logging.NullHandler(),
16
+ }
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from enum import Enum
3
+ from typing import List
4
+
5
+ import torch
6
+ from torch.distributed._shard.metadata import ShardMetadata
7
+
8
+ class MEM_FORMAT_ENCODING(Enum):
9
+ TORCH_CONTIGUOUS_FORMAT = 0
10
+ TORCH_CHANNELS_LAST = 1
11
+ TORCH_PRESERVE_FORMAT = 2
12
+
13
+ @dataclass
14
+ class TensorProperties:
15
+ """ Properties used to create :class:`Tensor` """
16
+
17
+ # Regular tensor fields
18
+ dtype: torch.dtype = field(default=torch.get_default_dtype())
19
+ layout: torch.layout = field(default=torch.strided)
20
+ requires_grad: bool = False
21
+ memory_format: torch.memory_format = field(default=torch.contiguous_format)
22
+ pin_memory: bool = False
23
+
24
+ def __getstate__(self):
25
+ # Since torch.memory_format cannot be pickled!
26
+ memory_format = self.memory_format
27
+ if memory_format == torch.contiguous_format:
28
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT
29
+ elif memory_format == torch.channels_last:
30
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST
31
+ elif memory_format == torch.preserve_format:
32
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT
33
+ else:
34
+ raise RuntimeError(f'Invalid torch.memory_format: {memory_format}')
35
+
36
+ return (
37
+ self.dtype,
38
+ self.layout,
39
+ self.requires_grad,
40
+ mem_format_encoding,
41
+ self.pin_memory,
42
+ )
43
+
44
+ def __setstate__(
45
+ self,
46
+ state,
47
+ ):
48
+ (self.dtype, self.layout, self.requires_grad, mem_format_encoding, self.pin_memory) = state
49
+
50
+ if mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT:
51
+ memory_format = torch.contiguous_format
52
+ elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST:
53
+ memory_format = torch.channels_last
54
+ elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT:
55
+ memory_format = torch.preserve_format
56
+ else:
57
+ raise RuntimeError(f'Invalid torch.memory_format encoding: {mem_format_encoding}')
58
+
59
+ self.memory_format = memory_format
60
+
61
+ @staticmethod
62
+ def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties":
63
+ return TensorProperties(
64
+ dtype=tensor.dtype,
65
+ layout=tensor.layout,
66
+ requires_grad=tensor.requires_grad,
67
+ memory_format=torch.contiguous_format,
68
+ pin_memory=tensor.is_pinned()
69
+ )
70
+ @dataclass
71
+ class ShardedTensorMetadata:
72
+ """
73
+ Represents metadata for :class:`ShardedTensor`
74
+ """
75
+
76
+ # Metadata about each shard of the Tensor
77
+ shards_metadata: List[ShardMetadata] = field(default_factory=list)
78
+
79
+ # Size of each dim of the overall Tensor.
80
+ size: torch.Size = field(default=torch.Size([]))
81
+
82
+ tensor_properties: TensorProperties = field(default_factory=TensorProperties)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import List, Tuple
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch._C._distributed_c10d import (
7
+ ProcessGroup,
8
+ )
9
+ import torch.distributed._shard.sharding_spec as shard_spec
10
+ from torch.distributed._shard.sharding_spec._internals import (
11
+ get_split_size,
12
+ get_chunked_dim_size,
13
+ )
14
+ from torch.distributed.nn.functional import (
15
+ all_to_all,
16
+ all_to_all_single,
17
+ )
18
+ from torch.distributed._shard.metadata import ShardMetadata
19
+
20
+ from .shard import Shard
21
+
22
+
23
+ def get_idx_from_placements(placements, current_rank) -> int:
24
+ """
25
+ Return the position of the current rank in the given placements.
26
+
27
+ Args:
28
+ placements(List[Union[_remote_device, str]]):
29
+ Specifies the placement of each shard of the Tensor. The size of
30
+ the list represents the number of shards to be created. This could
31
+ be a list of
32
+ :class:`torch.distributed._remote_device`'s. This list
33
+ could also contain a string which represents remote
34
+ device as accepted by
35
+ :class:`torch.distributed._remote_device`
36
+ current_rank (int): number of current device.
37
+
38
+ Returns:
39
+ A int which contains the position of current device in the placement list.
40
+ """
41
+ for idx, placement in enumerate(placements): # type: ignore[attr-defined]
42
+ if current_rank == placement.rank(): # type: ignore[union-attr]
43
+ return idx
44
+ raise RuntimeError('current_rank not in the placement.')
45
+
46
+
47
+ def build_reshard_metadata(
48
+ st_size: torch.Size,
49
+ sharding_spec: shard_spec.ShardingSpec,
50
+ world_size: int,
51
+ ) -> Tuple[List[ShardMetadata], List[int]]:
52
+ """
53
+ Based the given sharding spec, we calculate the offset and local shard size.
54
+ We then build a ShardMetadata on top of the calculation result.
55
+
56
+ Args:
57
+ st_size (torch.Size): The size of the sharded tensor.
58
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
59
+ specification describing how the tensor is sharded.
60
+ world_size (int): number of ranks.
61
+
62
+ Returns:
63
+ A Tuple of the followings:
64
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
65
+ offsets, lengths and device placement.
66
+ A List[int] which contains the ranks in the order of placement.
67
+ """
68
+ shard_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
69
+ shards_metadata = [None] * world_size
70
+ ranks = []
71
+ offsets = [0] * len(st_size)
72
+ split_size = get_split_size(st_size[shard_dim], world_size)
73
+ for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
74
+ ranks.append(placement.rank())
75
+ sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx)
76
+ local_tensor_size = list(st_size)
77
+ local_tensor_size[shard_dim] = sharded_dim_size
78
+ shards_metadata[placement.rank()] = ShardMetadata( # type: ignore[call-overload]
79
+ shard_offsets=copy.deepcopy(offsets),
80
+ shard_sizes=local_tensor_size,
81
+ placement=placement,
82
+ )
83
+ offsets[shard_dim] += sharded_dim_size
84
+ return shards_metadata, ranks # type: ignore[return-value]
85
+
86
+
87
+ def reshuffle_local_shard(
88
+ local_shard: torch.Tensor,
89
+ st_size: torch.Size,
90
+ sharding_spec: shard_spec.ShardingSpec,
91
+ resharding_spec: shard_spec.ShardingSpec,
92
+ pg: ProcessGroup,
93
+ ) -> Tuple[List[Shard], List[ShardMetadata]]:
94
+ """
95
+ Reshuffle the local shard directly when the reshard dim is same as the original
96
+ sharding dim. Logically we do this in two step:
97
+ 1. To collect all shards based on original sharding spec.
98
+ 2. Reshard the tensor based on the given resharding spec.
99
+
100
+ In reality, we consolidate the two steps into one by sending the local tensor to
101
+ the new shard directly based on the resharding spec.
102
+
103
+ Args:
104
+ local_shard (Tensor): Local tensor stored in the current rank.
105
+ st_size (torch.Size): The size of the sharded tensor.
106
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
107
+ specification describing how the tensor is sharded originally.
108
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
109
+ specification describing how the tensor will be resharded.
110
+ pg (ProcessGroup): The process group to aggregate on.
111
+
112
+ Returns:
113
+ A Tuple of the followings:
114
+ A List[`Shard`] which contains the local tensor and its metadata.
115
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
116
+ offsets, lengths and device placement.
117
+ """
118
+ current_rank = dist.get_rank(pg)
119
+ world_size = dist.get_world_size(pg)
120
+ # Build shards_metadata first.
121
+ shards_metadata, ranks = build_reshard_metadata(
122
+ st_size, resharding_spec, world_size
123
+ )
124
+ # Get input split size for all2all.
125
+ reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
126
+ split_size = get_split_size(st_size[reshard_dim], world_size)
127
+ input_split_sizes = [0] * world_size
128
+ idx = get_idx_from_placements(sharding_spec.placements, current_rank) # type: ignore[attr-defined]
129
+ new_rank = resharding_spec.placements[idx].rank() # type: ignore[union-attr, attr-defined]
130
+ input_split_sizes[new_rank] = local_shard.size(reshard_dim)
131
+ # Get output split size for all2all.
132
+ output_split_sizes = [0] * world_size
133
+ new_idx = ranks.index(current_rank)
134
+ sharded_dim_size = get_chunked_dim_size(st_size[reshard_dim], split_size, new_idx)
135
+ output_split_sizes[new_rank] = sharded_dim_size
136
+ # Get gathered_input for all2all.
137
+ local_shard = local_shard.transpose(0, reshard_dim).contiguous()
138
+ gathered_input_size = list(local_shard.size())
139
+ gathered_input_size[0] = sharded_dim_size
140
+ gathered_input = torch.empty(gathered_input_size, device=local_shard.device, dtype=local_shard.dtype)
141
+ # all2all.
142
+ local_shard = all_to_all_single(
143
+ gathered_input,
144
+ local_shard,
145
+ input_split_sizes=input_split_sizes,
146
+ output_split_sizes=output_split_sizes,
147
+ group=pg,
148
+ )
149
+ local_tensor = local_shard.transpose(0, reshard_dim).contiguous()
150
+ local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
151
+ return local_shards, shards_metadata
152
+
153
+
154
+ def reshard_local_shard(
155
+ local_tensor: torch.Tensor,
156
+ st_size: torch.Size,
157
+ sharding_spec: shard_spec.ShardingSpec,
158
+ resharding_spec: shard_spec.ShardingSpec,
159
+ pg: ProcessGroup,
160
+ ) -> Tuple[List[Shard], List[ShardMetadata]]:
161
+ """
162
+ Reshard a sharded tensor given the ``resharding_spec``. When the reshard dim is
163
+ different from the original sharding dim, we need to do two steps logically:
164
+ 1. To collect all shards based on original sharding spec.
165
+ 2. Reshard the tensor based on the given resharding spec.
166
+
167
+ In reality, we consolidate the two steps into one by sending each rank the new
168
+ shard based on the resharding spec.
169
+
170
+ Args:
171
+ local_tensor (Tensor): Local tensor stored in the current rank.
172
+ st_size (torch.Size): The size of the sharded tensor.
173
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
174
+ specification describing how the tensor is sharded originally.
175
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
176
+ specification describing how the tensor will be resharded.
177
+ pg (ProcessGroup): The process group to aggregate on.
178
+
179
+ Returns:
180
+ A Tuple of the followings:
181
+ A List[`Shard`] which contains the local tensor and its metadata.
182
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
183
+ offsets, lengths and device placement.
184
+ """
185
+ current_rank = dist.get_rank(pg)
186
+ world_size = dist.get_world_size(pg)
187
+ current_sharding_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
188
+ reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
189
+
190
+ # Build shards_metadata first.
191
+ shards_metadata, ranks = build_reshard_metadata(
192
+ st_size, resharding_spec, world_size
193
+ )
194
+
195
+ # Compute expected size
196
+ input_split_sizes = []
197
+ for metadata in shards_metadata:
198
+ input_split_sizes.append(metadata.shard_sizes[reshard_dim])
199
+ rearrange_input = any(ranks[i] > ranks[i + 1] for i in range(len(ranks) - 1))
200
+
201
+ if rearrange_input:
202
+ # Need to re-arrange reshard_dim of local_tensor before all2all.
203
+ indices: List[int] = []
204
+ for metadata in shards_metadata:
205
+ offset_start_idx = metadata.shard_offsets[reshard_dim]
206
+ split_size = metadata.shard_sizes[reshard_dim]
207
+ indices += range(offset_start_idx, offset_start_idx + split_size)
208
+ local_tensor = local_tensor.index_select(
209
+ reshard_dim, torch.tensor(indices, device=local_tensor.device)
210
+ )
211
+
212
+ # Because reshard_dim != original shard_dim. We need to compute the
213
+ # size of tensor from each rank.
214
+ output_tensor_list = [torch.tensor(1)] * world_size
215
+ split_size = get_split_size(st_size[current_sharding_dim], world_size)
216
+ rearrange_output_list = False
217
+ indices = []
218
+ for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
219
+ sharded_dim_size = get_chunked_dim_size(
220
+ st_size[current_sharding_dim], split_size, idx
221
+ )
222
+ output_tensor_size = list(st_size)
223
+ output_tensor_size[current_sharding_dim] = sharded_dim_size
224
+ output_tensor_size[reshard_dim] = input_split_sizes[current_rank]
225
+ output_tensor_list[
226
+ placement.rank()
227
+ ] = torch.empty( # type: ignore[union-attr, index]
228
+ output_tensor_size, device=local_tensor.device, dtype=local_tensor.dtype
229
+ )
230
+ indices.append(placement.rank()) # type: ignore[union-attr, index, arg-type]
231
+ if idx != placement.rank(): # type: ignore[union-attr]
232
+ rearrange_output_list = True
233
+
234
+ # Perform autograd enabled all2all.
235
+ input_tensor_tuple = torch.split(local_tensor, input_split_sizes, dim=reshard_dim)
236
+ input_tensor_list = [tensor.contiguous() for tensor in input_tensor_tuple]
237
+ output_tensor_list = all_to_all(
238
+ output_tensor_list,
239
+ input_tensor_list,
240
+ group=pg,
241
+ )
242
+
243
+ if rearrange_output_list:
244
+ # Need to re-arrange original shard_dim of output_tensor_list.
245
+ output_tensor_list = [output_tensor_list[idx] for idx in indices] # type: ignore[call-overload]
246
+ local_tensor = torch.cat(output_tensor_list, dim=current_sharding_dim)
247
+ local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
248
+ return local_shards, shards_metadata
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List
3
+
4
+ import torch
5
+ from torch.distributed._shard.metadata import ShardMetadata
6
+ from torch.distributed.remote_device import _remote_device
7
+
8
+
9
+ @dataclass
10
+ class Shard:
11
+ """
12
+ Container which holds the data for a shard as a Tensor and also
13
+ the associated metadata for that shard.
14
+
15
+ Args:
16
+ tensor(torch.Tensor): Local tensor for the shard.
17
+ metadata(:class `torch.distributed._shard.sharded_tensor.ShardMetadata`):
18
+ The metadata for the shard, including offsets, lengths and device placement.
19
+ """
20
+ __slots__ = ['tensor', 'metadata']
21
+ tensor: torch.Tensor
22
+ metadata: ShardMetadata
23
+
24
+ def __post_init__(self):
25
+ # verification between local tensor and metadata
26
+ if list(self.tensor.size()) != self.metadata.shard_sizes:
27
+ raise ValueError(
28
+ "Shard tensor size does not match with metadata.shard_lengths! "
29
+ f"Found shard tensor size: {list(self.tensor.size())}, "
30
+ f"metadata.shard_lengths: {self.metadata.shard_sizes}, "
31
+ )
32
+ placement_device = self.metadata.placement
33
+ if placement_device is not None and placement_device.device() != self.tensor.device:
34
+ raise ValueError(
35
+ f"Local shard tensor device does not match with local Shard's placement! "
36
+ f"Found local shard tensor device: {self.tensor.device}, "
37
+ f"local shard metadata placement device: {placement_device.device()}"
38
+ )
39
+
40
+ @classmethod
41
+ def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int):
42
+ """
43
+ Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank.
44
+
45
+ Args:
46
+ tensor(torch.Tensor): Local tensor for the shard.
47
+ shard_offsets(List[int]): List of integers specify the offset
48
+ of the shard on each dimension.
49
+ rank(int): Specify the rank for the shard.
50
+ """
51
+ shard_sizes = list(tensor.size())
52
+ placement = _remote_device(f"rank:{rank}/{str(tensor.device)}")
53
+ shard_meta = ShardMetadata(
54
+ shard_offsets=shard_offsets,
55
+ shard_sizes=shard_sizes,
56
+ placement=placement
57
+ )
58
+ return Shard(tensor, shard_meta)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections.abc
2
+ import copy
3
+ from typing import Optional, List, Sequence
4
+
5
+ import torch
6
+ from torch.distributed import distributed_c10d
7
+ from torch.distributed import rpc
8
+ from torch.distributed._shard.sharding_spec._internals import (
9
+ check_tensor,
10
+ validate_non_overlapping_shards_metadata,
11
+ )
12
+
13
+ from torch.distributed._shard.metadata import ShardMetadata
14
+ from .metadata import TensorProperties, ShardedTensorMetadata
15
+ from .shard import Shard
16
+
17
+ def _parse_and_validate_remote_device(pg, remote_device):
18
+ if remote_device is None:
19
+ raise ValueError("remote device is None")
20
+
21
+ worker_name = remote_device.worker_name()
22
+ rank = remote_device.rank()
23
+ device = remote_device.device()
24
+
25
+ # Validate rank, skip validation if rank is not part of process group.
26
+ if not distributed_c10d._rank_not_in_group(pg):
27
+ if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)):
28
+ raise ValueError(f'Invalid rank: {rank}')
29
+
30
+ if worker_name is not None:
31
+ if not rpc._is_current_rpc_agent_set():
32
+ raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}')
33
+
34
+ workers = rpc._get_current_rpc_agent().get_worker_infos()
35
+ for worker in workers:
36
+ if worker.name == worker_name:
37
+ return worker.id, device
38
+
39
+ raise ValueError(f'Invalid worker name: {worker_name}')
40
+
41
+ return rank, device
42
+
43
+ def _validate_output_tensor_for_gather(
44
+ my_rank: int,
45
+ dst_rank: int,
46
+ size: torch.Size,
47
+ dst_tensor: Optional[torch.Tensor],
48
+ ) -> None:
49
+ if dst_rank == my_rank:
50
+ if dst_tensor is None:
51
+ raise ValueError(
52
+ f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}"
53
+ )
54
+ if tuple(size) != (dst_tensor.size()):
55
+ raise ValueError(
56
+ f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())},"
57
+ f"but should be {tuple(size)}"
58
+ )
59
+ elif dst_tensor:
60
+ raise ValueError(
61
+ "Argument ``dst_tensor`` must NOT be specified "
62
+ "on non-destination ranks."
63
+ )
64
+
65
+ def _flatten_tensor_size(size) -> torch.Size:
66
+ """
67
+ Checks if tensor size is valid, then flatten/return a torch.Size object.
68
+ """
69
+ if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):
70
+ dims = list(*size)
71
+ else:
72
+ dims = list(size)
73
+
74
+ for dim in dims:
75
+ if not isinstance(dim, int):
76
+ raise TypeError(f'size has to be a sequence of ints, found: {dims}')
77
+
78
+ return torch.Size(dims)
79
+
80
+ def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True):
81
+ if is_local:
82
+ assert isinstance(ranks, int)
83
+ if expected != actual:
84
+ raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! "
85
+ f"Found one local shard tensor {prop_name}={expected}, "
86
+ f"the other local shard tensor {prop_name}={actual}.")
87
+ else:
88
+ # compare failure check across ranks, ranks list should have two rank
89
+ assert len(ranks) == 2
90
+ if expected != actual:
91
+ raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! "
92
+ f"Found {prop_name}={expected} on rank:{ranks[0]}, "
93
+ f"and {prop_name}={actual} on rank:{ranks[1]}.")
94
+
95
+
96
+ def build_metadata_from_local_shards(
97
+ local_shards: List[Shard],
98
+ global_size: torch.Size,
99
+ current_rank: int,
100
+ pg: distributed_c10d.ProcessGroup
101
+ ) -> ShardedTensorMetadata:
102
+
103
+ assert len(local_shards) > 0, "must have local shards!"
104
+ local_shard_metadatas: List[ShardMetadata] = []
105
+
106
+ first_shard_dtype = local_shards[0].tensor.dtype
107
+ first_shard_layout = local_shards[0].tensor.layout
108
+ first_shard_requires_grad = local_shards[0].tensor.requires_grad
109
+ first_shard_is_pinned = local_shards[0].tensor.is_pinned()
110
+
111
+ # 1). Validate local tensors and associated metadatas
112
+ for local_shard in local_shards:
113
+ local_shard_tensor = local_shard.tensor
114
+ local_shard_meta = local_shard.metadata
115
+ local_shard_metadatas.append(local_shard_meta)
116
+ rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement)
117
+
118
+ if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout:
119
+ raise ValueError(
120
+ f'Only torch.strided layout is currently supported, but found '
121
+ f'{local_shard_tensor.layout} on rank:{current_rank}!'
122
+ )
123
+
124
+ if not local_shard_tensor.is_contiguous():
125
+ raise ValueError('Only torch.contiguous_format memory_format is currently supported!')
126
+
127
+ if rank != current_rank:
128
+ raise ValueError(
129
+ f"Local shard metadata's rank does not match with the rank in its process group! "
130
+ f'Found current rank in the process group: {current_rank}, '
131
+ f"local ShardMetadata placement's rank: {rank}"
132
+ )
133
+ if local_shard_tensor.device != local_device:
134
+ raise ValueError(
135
+ f"Local shard tensor device does not match with local Shard's placement! "
136
+ f"Found local shard tensor device: {local_shard_tensor.device}, "
137
+ f"local shard metadata placement device: {local_device}"
138
+ )
139
+
140
+ _raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
141
+ _raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank)
142
+ _raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank)
143
+ _raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank)
144
+
145
+ # 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then
146
+ # do all_gather to collect local_sharded_tensor_metadata from all ranks
147
+ local_tensor_properties = TensorProperties(
148
+ dtype=first_shard_dtype,
149
+ layout=first_shard_layout,
150
+ requires_grad=first_shard_requires_grad,
151
+ memory_format=torch.contiguous_format,
152
+ pin_memory=first_shard_is_pinned
153
+ )
154
+
155
+ local_sharded_tensor_metadata = ShardedTensorMetadata(
156
+ shards_metadata=local_shard_metadatas,
157
+ size=global_size,
158
+ tensor_properties=local_tensor_properties)
159
+
160
+ return local_sharded_tensor_metadata
161
+
162
+
163
+ def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]):
164
+ global_sharded_tensor_metadata = None
165
+ global_metadata_rank = 0
166
+
167
+ for rank, rank_metadata in enumerate(gathered_metadatas):
168
+ if rank_metadata is None:
169
+ continue
170
+
171
+ if global_sharded_tensor_metadata is None:
172
+ global_sharded_tensor_metadata = copy.deepcopy(rank_metadata)
173
+ global_metadata_rank = rank
174
+ else:
175
+ _raise_if_mismatch(global_sharded_tensor_metadata.size,
176
+ rank_metadata.size,
177
+ "global_size",
178
+ [global_metadata_rank, rank],
179
+ is_local=False)
180
+
181
+ # don't need to check layout and memory format as we already checked in local shards validation stage
182
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype,
183
+ rank_metadata.tensor_properties.dtype,
184
+ "dtype",
185
+ [global_metadata_rank, rank],
186
+ is_local=False)
187
+
188
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad,
189
+ rank_metadata.tensor_properties.requires_grad,
190
+ "requires_grad",
191
+ [global_metadata_rank, rank],
192
+ is_local=False)
193
+
194
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory,
195
+ rank_metadata.tensor_properties.pin_memory,
196
+ "pin_memory",
197
+ [global_metadata_rank, rank],
198
+ is_local=False)
199
+ # pass all validations, extend shards metadata
200
+ global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata)
201
+
202
+ if global_sharded_tensor_metadata is not None:
203
+ # check if shards_metadata have overlap shards
204
+ validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata)
205
+
206
+ # check if the shards_metadata is compatible with global size of the sharded tensor.
207
+ check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size)
208
+ else:
209
+ raise ValueError("ShardedTensor have no local shards on all ranks!")
210
+
211
+ return global_sharded_tensor_metadata
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import torch.nn as nn
3
+
4
+ class Sharder(abc.ABC):
5
+ """
6
+ This is an interface which allows user to create more advanced
7
+ sharding strategies that are not easily be composed by the
8
+ `ShardingSpec`.
9
+
10
+ :class:`torch.distributed._shard.sharding_plan.ShardingPlan` could
11
+ take an object of the `Sharder` and call `shard` to shard the module,
12
+ then replace the original module with sharded module returned.
13
+ """
14
+ @abc.abstractmethod
15
+ def shard(self, module: nn.Module) -> nn.Module:
16
+ """
17
+ Shard a module base on the implementation of this method, and
18
+ return the sharded version of the module.
19
+
20
+ Args:
21
+ module (:class:`torch.nn.Module`):
22
+ The module to apply sharding to.
23
+ Returns:
24
+ A :class:`torch.nn.Module` object that represents a module
25
+ that's already been sharded.
26
+ """
27
+ pass
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .api import (
2
+ ShardingPlan,
3
+ ShardingPlanner
4
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (277 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import torch.nn as nn
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Dict, List, Optional, Union
6
+
7
+ from torch.distributed._shard.sharder import Sharder
8
+ from torch.distributed._shard.sharding_spec import ShardingSpec
9
+
10
+ @dataclass
11
+ class ShardingPlan:
12
+ """
13
+ Representation of a sharding plan, describes how to shard a module
14
+ across hosts. `plan` is used to shard module parameters according to the spec provided,
15
+ `output_plan` and `return_local_tensor` are optional, they are used to specify the output
16
+ layout of a module with a spec, and when to convert back to data parallel fashion.
17
+
18
+ Args:
19
+ plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`,
20
+ :class:`torch.distributed._shard.sharder.Sharder`]):
21
+ a dict describes how to shard a module, there're currently two ways to shard a module:
22
+ 1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of
23
+ a parameter to a `ShardingSpec`.
24
+ 2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module
25
+ to a `Sharder` object.
26
+ output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional):
27
+ a dict specifies the layout of a module's output which produces a ShardedTensor,
28
+ keyed by the name of module to ShardingSpec("" in key means the root module).
29
+ Default: `None`
30
+ return_local_tensor (List[str], optional): a list of string, each element enables
31
+ a module's sharded output to be returned as a Tensor from its local shards to
32
+ ensure further processing in a data parallel fashion. ("" in list means the
33
+ root module).
34
+ Default: None
35
+ Example:
36
+ Suppose we want to shard a module with two linear layers and then run it with DDP, we also
37
+ want to convert the output of the second linear layer back to DDP, we can do it as follows:
38
+
39
+ >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
40
+ >>> class MyModule(nn.Module):
41
+ >>> def __init__(self):
42
+ >>> super().__init__()
43
+ >>> self.fc1 = nn.Linear()
44
+ >>> self.gelu = nn.GELU()
45
+ >>> self.fc2 = nn.Linear()
46
+ >>> self.relu = nn.Linear()
47
+ >>>
48
+ >>> def forward(self, input):
49
+ >>> return self.relu(self.fc2(self.gelu(self.fc1(input))))
50
+
51
+
52
+ >>> # xdoctest: +SKIP("Undefined spec1, spec2)
53
+ >>> sharding_plan = ShardingPlan(
54
+ >>> plan={
55
+ >>> "fc1.weight": spec1,
56
+ >>> "fc2.weight": spec2
57
+ >>> },
58
+ >>> output_plan={
59
+ >>> "fc2": output_spec
60
+ >>> },
61
+ >>> return_local_tensor=["fc2"]
62
+ >>> )
63
+ """
64
+ plan: Dict[str, Union[ShardingSpec, Sharder]]
65
+ output_plan: Optional[Dict[str, ShardingSpec]] = None
66
+ return_local_tensor: Optional[List[str]] = None
67
+
68
+
69
+ class ShardingPlanner(abc.ABC):
70
+ """
71
+ Default ShardingPlanner interface, can be extended and
72
+ implement advanced sharding strategies.
73
+ """
74
+ @abc.abstractmethod
75
+ def build_plan(self, module: nn.Module) -> ShardingPlan:
76
+ """
77
+ Given a nn.Module, define how to shard the module across
78
+ ranks, return a ShardingPlan
79
+ Args:
80
+ module (:class:`torch.nn.Module`):
81
+ The module to apply sharding to.
82
+ Returns:
83
+ A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that
84
+ represents how to shard the module.
85
+ """
86
+ pass
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import Optional, Sequence
3
+
4
+ # Import all builtin dist tensor ops
5
+ import torch
6
+ import torch.distributed._tensor.ops
7
+ import torch.distributed._tensor.random as random
8
+ from torch.distributed._tensor._utils import compute_local_shape
9
+ from torch.distributed._tensor.api import distribute_module, distribute_tensor, DTensor
10
+ from torch.distributed._tensor.ops.utils import normalize_to_torch_size
11
+ from torch.distributed._tensor.placement_types import Placement, Replicate, Shard
12
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
13
+
14
+ # All public APIs from dtensor package
15
+ __all__ = [
16
+ "DTensor",
17
+ "DeviceMesh",
18
+ "distribute_tensor",
19
+ "distribute_module",
20
+ "init_device_mesh,",
21
+ "Shard",
22
+ "Replicate",
23
+ ]
24
+
25
+
26
+ def _dtensor_init_helper(
27
+ init_op,
28
+ size: torch.Size,
29
+ device_mesh=None,
30
+ placements=None,
31
+ **kwargs,
32
+ ) -> DTensor:
33
+ # if device_mesh is None, use the one from mesh resources
34
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
35
+ kwargs["device"] = device_mesh.device_type
36
+
37
+ # set default placements to replicated if not specified
38
+ placements = placements or tuple(Replicate() for _ in range(device_mesh.ndim))
39
+
40
+ # check device_mesh againts placements
41
+ assert device_mesh.ndim == len(
42
+ placements
43
+ ), "mesh dimension does not match the length of placements"
44
+
45
+ assert kwargs["layout"] == torch.strided, "layout value not supported!"
46
+ torch_stride = torch._prims_common.make_contiguous_strides_for(size)
47
+
48
+ # get local tensor shape
49
+ local_shape = compute_local_shape(size, device_mesh, placements)
50
+ # initialize the local tensor
51
+ if init_op == torch.full:
52
+ fill_value = kwargs.pop("fill_value", 0)
53
+ local_tensor = init_op(local_shape, fill_value, **kwargs)
54
+ elif init_op == torch.rand or init_op == torch.randn:
55
+ # this tensor meta is not used except `shape`
56
+ dtype = kwargs.get("dtype", torch.get_default_dtype())
57
+
58
+ from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta
59
+
60
+ tensor_meta = TensorMeta(size, (0,), dtype)
61
+ spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta)
62
+
63
+ if random.is_rng_supported_mesh(device_mesh) and not random._rng_tracker:
64
+ random._rng_tracker = random.OffsetBasedRNGTracker()
65
+
66
+ assert random._rng_tracker is not None
67
+ with random._rng_tracker._distribute_region(spec):
68
+ local_tensor = init_op(local_shape, **kwargs)
69
+ else:
70
+ local_tensor = init_op(local_shape, **kwargs)
71
+
72
+ return DTensor(
73
+ local_tensor=local_tensor,
74
+ device_mesh=device_mesh,
75
+ placements=tuple(placements),
76
+ shape=size,
77
+ dtype=local_tensor.dtype,
78
+ stride=torch_stride,
79
+ requires_grad=kwargs["requires_grad"],
80
+ )
81
+
82
+
83
+ def ones(
84
+ *size,
85
+ dtype: Optional[torch.dtype] = None,
86
+ layout: torch.layout = torch.strided,
87
+ requires_grad: bool = False,
88
+ device_mesh: Optional[DeviceMesh] = None,
89
+ placements: Optional[Sequence[Placement]] = None,
90
+ ) -> DTensor:
91
+ """
92
+ Returns a :class:`DTensor` filled with the scalar value 1, with the shape defined
93
+ by the variable argument ``size``.
94
+
95
+ Args:
96
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
97
+ Can be a variable number of arguments or a collection like a list or tuple.
98
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
99
+
100
+ Keyword args:
101
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
102
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
103
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
104
+ Default: ``torch.strided``.
105
+ requires_grad (bool, optional): If autograd should record operations on the
106
+ returned :class:`DTensor`. Default: ``False``.
107
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
108
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
109
+
110
+ Returns:
111
+ A :class:`DTensor` object on each rank
112
+ """
113
+ torch_size = normalize_to_torch_size(size)
114
+
115
+ return _dtensor_init_helper(
116
+ torch.ones,
117
+ torch_size,
118
+ dtype=dtype,
119
+ layout=layout,
120
+ requires_grad=requires_grad,
121
+ device_mesh=device_mesh,
122
+ placements=placements,
123
+ )
124
+
125
+
126
+ def empty(
127
+ *size,
128
+ dtype: Optional[torch.dtype] = None,
129
+ layout: torch.layout = torch.strided,
130
+ requires_grad: bool = False,
131
+ device_mesh: Optional[DeviceMesh] = None,
132
+ placements: Optional[Sequence[Placement]] = None,
133
+ ) -> DTensor:
134
+ """
135
+ Returns a :class:`DTensor` filled with uninitialized data. The shape of the :class:`DTensor`
136
+ is defined by the variable argument ``size``.
137
+
138
+ Args:
139
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
140
+ Can be a variable number of arguments or a collection like a list or tuple.
141
+ E.g.: empty(1,2,3..) or empty([1,2,3..]) or empty((1,2,3..))
142
+
143
+ Keyword args:
144
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
145
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).\
146
+ layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`.
147
+ Default: ``torch.strided``.
148
+ requires_grad (bool, optional): If autograd should record operations on the
149
+ returned :class:`DTensor`. Default: ``False``.
150
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
151
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
152
+
153
+ Returns:
154
+ A :class:`DTensor` object on each rank
155
+ """
156
+ torch_size = normalize_to_torch_size(size)
157
+
158
+ return _dtensor_init_helper(
159
+ torch.empty,
160
+ torch_size,
161
+ dtype=dtype,
162
+ layout=layout,
163
+ requires_grad=requires_grad,
164
+ device_mesh=device_mesh,
165
+ placements=placements,
166
+ )
167
+
168
+
169
+ def full(
170
+ size,
171
+ fill_value,
172
+ *,
173
+ dtype: Optional[torch.dtype] = None,
174
+ layout: torch.layout = torch.strided,
175
+ requires_grad: bool = False,
176
+ device_mesh: Optional[DeviceMesh] = None,
177
+ placements: Optional[Sequence[Placement]] = None,
178
+ ) -> DTensor:
179
+ """
180
+ Returns a :class:`DTensor` filled with ``fill_value``. The scalar value type should match
181
+ ``device_mesh.device_type``.
182
+
183
+ Args:
184
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
185
+ Can be a variable number of arguments or a collection like a list or tuple.
186
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
187
+ fill_value(Scalar): the value to fill the output tensor with.
188
+
189
+ Keyword args:
190
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
191
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
192
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
193
+ Default: ``torch.strided``.
194
+ requires_grad (bool, optional): If autograd should record operations on the
195
+ returned :class:`DTensor`. Default: ``False``.
196
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
197
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
198
+
199
+ Returns:
200
+ A :class:`DTensor` object on each rank
201
+ """
202
+ torch_size = normalize_to_torch_size(size)
203
+
204
+ return _dtensor_init_helper(
205
+ torch.full,
206
+ torch_size,
207
+ fill_value=fill_value,
208
+ dtype=dtype,
209
+ layout=layout,
210
+ requires_grad=requires_grad,
211
+ device_mesh=device_mesh,
212
+ placements=placements,
213
+ )
214
+
215
+
216
+ def rand(
217
+ *size,
218
+ requires_grad: bool = False,
219
+ dtype: Optional[torch.dtype] = None,
220
+ layout: torch.layout = torch.strided,
221
+ device_mesh: Optional[DeviceMesh] = None,
222
+ placements: Optional[Sequence[Placement]] = None,
223
+ ) -> DTensor:
224
+ """
225
+ Returns a :class:`DTensor` filled with random numbers from a uniform distribution
226
+ on the interval ``[0, 1)``. The shape of the tensor is defined by the variable
227
+ argument ``size``.
228
+
229
+ Args:
230
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
231
+ Can be a variable number of arguments or a collection like a list or tuple.
232
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
233
+
234
+ Keyword args:
235
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
236
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
237
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
238
+ Default: ``torch.strided``.
239
+ requires_grad (bool, optional): If autograd should record operations on the
240
+ returned :class:`DTensor`. Default: ``False``.
241
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
242
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
243
+
244
+ Returns:
245
+ A :class:`DTensor` object on each rank
246
+ """
247
+ torch_size = normalize_to_torch_size(size)
248
+
249
+ return _dtensor_init_helper(
250
+ torch.rand,
251
+ torch_size,
252
+ dtype=dtype,
253
+ layout=layout,
254
+ requires_grad=requires_grad,
255
+ device_mesh=device_mesh,
256
+ placements=placements,
257
+ )
258
+
259
+
260
+ def randn(
261
+ *size,
262
+ requires_grad: bool = False,
263
+ dtype: Optional[torch.dtype] = None,
264
+ layout: torch.layout = torch.strided,
265
+ device_mesh: Optional[DeviceMesh] = None,
266
+ placements: Optional[Sequence[Placement]] = None,
267
+ ) -> DTensor:
268
+ """
269
+ Returns a :class:`DTensor` filled with random numbers from a normal distribution
270
+ with mean 0 and variance 1. The shape of the tensor is defined by the variable
271
+ argument ``size``.
272
+
273
+ Args:
274
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
275
+ Can be a variable number of arguments or a collection like a list or tuple.
276
+ E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..))
277
+
278
+ Keyword args:
279
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
280
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
281
+ layout (:class:`torch.layout`, optional): the desired layout of returned DTensor.
282
+ Default: ``torch.strided``.
283
+ requires_grad (bool, optional): If autograd should record operations on the
284
+ returned :class:`DTensor`. Default: ``False``.
285
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks.
286
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
287
+
288
+ Returns:
289
+ A :class:`DTensor` object on each rank
290
+ """
291
+ torch_size = normalize_to_torch_size(size)
292
+
293
+ return _dtensor_init_helper(
294
+ torch.randn,
295
+ torch_size,
296
+ dtype=dtype,
297
+ layout=layout,
298
+ requires_grad=requires_grad,
299
+ device_mesh=device_mesh,
300
+ placements=placements,
301
+ )
302
+
303
+
304
+ def zeros(
305
+ *size,
306
+ requires_grad: bool = False,
307
+ dtype: Optional[torch.dtype] = None,
308
+ layout: torch.layout = torch.strided,
309
+ device_mesh: Optional[DeviceMesh] = None,
310
+ placements: Optional[Sequence[Placement]] = None,
311
+ ) -> DTensor:
312
+ """
313
+ Returns a :class:`DTensor` filled with the scalar value 0.
314
+
315
+ Args:
316
+ size (int...): a sequence of integers defining the shape of the output :class:`DTensor`.
317
+ Can be a variable number of arguments or a collection like a list or tuple.
318
+ E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..))
319
+ Keyword args:
320
+ requires_grad (bool, optional): If autograd should record operations on the
321
+ returned :class:`DTensor`. Default: ``False``.
322
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`.
323
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
324
+ layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`.
325
+ Default: ``torch.strided``.
326
+ device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks
327
+ placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate``
328
+
329
+ Returns:
330
+ A :class:`DTensor` object on each rank
331
+ """
332
+ torch_size = normalize_to_torch_size(size)
333
+
334
+ return _dtensor_init_helper(
335
+ torch.zeros,
336
+ torch_size,
337
+ dtype=dtype,
338
+ layout=layout,
339
+ requires_grad=requires_grad,
340
+ device_mesh=device_mesh,
341
+ placements=placements,
342
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc ADDED
Binary file (347 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc ADDED
Binary file (8.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc ADDED
Binary file (8.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+ from dataclasses import dataclass
4
+ from functools import lru_cache
5
+
6
+ from typing import List, Optional
7
+
8
+ import torch
9
+ import torch.distributed._tensor.placement_types as placement_types
10
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
11
+ from torch.distributed.distributed_c10d import (
12
+ all_to_all,
13
+ broadcast,
14
+ get_global_rank,
15
+ get_rank,
16
+ get_world_size,
17
+ GroupMember,
18
+ ProcessGroup,
19
+ scatter,
20
+ Work,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ # TODO: we need to migrate these APIs to be functional collectives
27
+
28
+
29
+ def mesh_scatter(
30
+ output: torch.Tensor,
31
+ scatter_list: List[torch.Tensor],
32
+ mesh: DeviceMesh,
33
+ mesh_dim: int = 0,
34
+ async_op: bool = False,
35
+ ) -> Optional[Work]:
36
+ """
37
+ scatter a list of tensors to a device mesh dimension. We by default
38
+ use the first rank of the mesh dimension as the source of truth, i.e
39
+ for a 2d mesh [[0, 1], [2, 3]], if we scatter on mesh_dim = 1, we will
40
+ scatter the tensor list on rank 0 to rank 0/1, and tensor list on rank
41
+ 2 to rank 2/3.
42
+
43
+ Args:
44
+ output (torch.Tensor): the tensor to receive the scattered list.
45
+ scatter_list (List[torch.Tensor]): the tensor list to be scattered.
46
+ mesh_dim (int, optional): indicate which mesh dimension we want
47
+ to scatter on, we by default choose the first rank on the
48
+ mesh dimension as source of truth.
49
+
50
+ Returns:
51
+ A :class:`Work` object
52
+ """
53
+ # TODO: Ideally we should use the meta tensor way
54
+ # (to register a meta kernel for the collective op)
55
+ # so that it would avoid the communication. Need to
56
+ # remove the check below once that is done.
57
+ if output.is_meta:
58
+ return None
59
+ dim_group = mesh.get_group(mesh_dim)
60
+ assert isinstance(dim_group, ProcessGroup)
61
+ # src need to be global rank
62
+ src_for_dim = 0
63
+
64
+ if dim_group is not GroupMember.WORLD:
65
+ src_for_dim = get_global_rank(dim_group, 0)
66
+
67
+ if src_for_dim == get_rank():
68
+ fut = scatter(
69
+ output,
70
+ scatter_list=scatter_list,
71
+ src=src_for_dim,
72
+ group=dim_group,
73
+ async_op=async_op,
74
+ )
75
+ else:
76
+ fut = scatter(
77
+ output,
78
+ scatter_list=None,
79
+ src=src_for_dim,
80
+ group=dim_group,
81
+ async_op=async_op,
82
+ )
83
+
84
+ return fut
85
+
86
+
87
+ def mesh_broadcast(
88
+ tensor: torch.Tensor,
89
+ mesh: DeviceMesh,
90
+ mesh_dim: int = 0,
91
+ async_op: bool = False,
92
+ ) -> Optional[Work]:
93
+ """
94
+ broadcast the tensor to a device mesh dimension. We by default
95
+ use the first rank of the mesh dimension as the source of truth, i.e
96
+ for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will
97
+ broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2
98
+ to rank 2/3.
99
+
100
+ Args:
101
+ tensor (torch.Tensor): tensor to broadcast.
102
+ mesh_dim (int, optional): indicate which mesh dimension we want
103
+ to scatter on, we by default choose the first rank on the
104
+ mesh dimension as source of truth.
105
+
106
+ Returns:
107
+ A :class:`Work` object
108
+ """
109
+ # TODO: Ideally we should use the meta tensor way
110
+ # (to register a meta kernel for the collective op)
111
+ # so that it would avoid the communication. Need to
112
+ # remove the check below once that is done.
113
+ if tensor.is_meta:
114
+ return None
115
+ dim_group = mesh.get_group(mesh_dim)
116
+ assert isinstance(dim_group, ProcessGroup)
117
+ # src need to be global rank
118
+ src_for_dim = 0
119
+ if dim_group is not GroupMember.WORLD:
120
+ src_for_dim = get_global_rank(dim_group, 0)
121
+
122
+ return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op)
123
+
124
+
125
+ # TODO: test uneven split on GLOO and NCCL
126
+ def mesh_all_to_all(
127
+ output_tensor_list: List[torch.Tensor],
128
+ input_tensor_list: List[torch.Tensor],
129
+ mesh: DeviceMesh,
130
+ mesh_dim: int = 0,
131
+ async_op: bool = False,
132
+ ) -> Optional[Work]:
133
+ dim_group = mesh.get_group(mesh_dim)
134
+ assert isinstance(dim_group, ProcessGroup)
135
+
136
+ work = None
137
+ # no direct dist.all_to_all support on 'gloo' so we manually do scatters
138
+ if mesh.device_type == "cpu":
139
+ logger.warning(
140
+ "ProcessGroupGloo does not support all_to_all, falling back with scatters!"
141
+ )
142
+ # TODO: pull the handle of uneven case in #492
143
+ dim_group_size = get_world_size(dim_group)
144
+ for i in range(dim_group_size):
145
+ # src need to be global rank
146
+ src_for_dim = i
147
+ if dim_group is not GroupMember.WORLD:
148
+ src_for_dim = get_global_rank(dim_group, i)
149
+
150
+ work = scatter(
151
+ output_tensor_list[i],
152
+ input_tensor_list if mesh.get_rank() == src_for_dim else [],
153
+ group=dim_group,
154
+ src=src_for_dim,
155
+ async_op=async_op,
156
+ )
157
+ else:
158
+ work = all_to_all(
159
+ output_tensor_list,
160
+ input_tensor_list,
161
+ dim_group,
162
+ async_op=async_op,
163
+ )
164
+ return work
165
+
166
+
167
+ def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int:
168
+ assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
169
+ return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape)
170
+
171
+
172
+ @dataclass
173
+ class MeshTopoInfo:
174
+ """
175
+ Mesh information for collective cost estimation
176
+ """
177
+
178
+ mesh: DeviceMesh
179
+ mesh_dim_devices: List[int]
180
+ mesh_dim_bandwidth: List[float]
181
+ mesh_dim_latency: List[float]
182
+
183
+ @staticmethod
184
+ @lru_cache(None)
185
+ def build_from_mesh(mesh: DeviceMesh) -> "MeshTopoInfo":
186
+ # Generate mesh topology info for intra-host/inter-host communication pattern
187
+ # Note that we made bunch of assumptions for simplicity:
188
+ # 1. we assume the mesh is homogeneous, and it's gpu/nccl model
189
+ # 2. we assume gpu arch is Ampere or Hopper
190
+ # 3. we assume collectives are all ring base algo for now
191
+ num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type)
192
+ # the base bw number (intra-node), GB/s
193
+ base_bw = 87.7
194
+ mesh_dim_bandwidth = [base_bw] * mesh.ndim
195
+ # the latency in terms of us (intra-node, nv-link)
196
+ mesh_dim_latency = [0.6] * mesh.ndim
197
+ mesh_dim_devices = [1] * mesh.ndim
198
+
199
+ total_num_devices = 1
200
+ for mesh_dim in reversed(range(mesh.ndim)):
201
+ num_devices = mesh.size(mesh_dim)
202
+ mesh_dim_devices[mesh_dim] = num_devices
203
+ total_num_devices *= num_devices
204
+ if total_num_devices > num_devices_per_host:
205
+ # magic number for inter-host communication bandwidth/latency factor
206
+ # This number assumes latest GPU arch, i.e. Ampere or Hopper
207
+ # TODO: see if we need to tweak this or offer a way for user
208
+ # to specify the bandwidths/latency
209
+ mesh_dim_bandwidth[mesh_dim] *= 0.22
210
+ # set to ethernet latency for inter-host
211
+ mesh_dim_latency[mesh_dim] = 2.7
212
+
213
+ return MeshTopoInfo(
214
+ mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency
215
+ )
216
+
217
+
218
+ def allgather_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
219
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
220
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
221
+ num_hops = num_devices_on_mesh_dim - 1
222
+ # base latency + comm latency
223
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] # us
224
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth # s
225
+ return latency + bw * 1e6 # rescale to us
226
+
227
+
228
+ def allreduce_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float:
229
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
230
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
231
+ # allreduce have almost 2x comm bytes compare to allgather/reduce_scatter
232
+ num_hops = 2 * num_devices_on_mesh_dim - 1
233
+
234
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
235
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
236
+ return latency + bw * 1e6
237
+
238
+
239
+ def reduce_scatter_cost(
240
+ bytes_gb: float,
241
+ mesh_topo: MeshTopoInfo,
242
+ mesh_dim: int,
243
+ ) -> float:
244
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim]
245
+ mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim]
246
+ num_hops = num_devices_on_mesh_dim - 1
247
+ # base latency + comm latency
248
+ latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim]
249
+ bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth
250
+ return latency + bw * 1e6
251
+
252
+
253
+ def redistribute_cost(
254
+ current_spec: "placement_types.DTensorSpec",
255
+ target_spec: "placement_types.DTensorSpec",
256
+ ) -> float:
257
+ """
258
+ This function returns the cost of redistribute from current to target DTensorSpec.
259
+
260
+ NOTE:
261
+ 1. Only consider communication cost here, since computation costs for redistribute
262
+ are quite trival (i.e. we only need to narrow or simple division)
263
+ 2. Only consider redistribute cost on same mesh, cross mesh communication cost is
264
+ not quite needed for operator strategy estimation/selection.
265
+ """
266
+ if current_spec.mesh != target_spec.mesh:
267
+ # make infinite cost if meshes are not same
268
+ # TODO: see if we want to support this once there's cross mesh communication
269
+ return float("inf")
270
+
271
+ if current_spec.is_replicated():
272
+ # short-cut:
273
+ # comm cost is 0 if current spec is already full replication
274
+ return 0.0
275
+
276
+ mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh)
277
+ cost = 0.0
278
+ comm_bytes_gb = (
279
+ spec_to_bytes(current_spec) / current_spec.num_shards / 1024 / 1024 / 1024
280
+ )
281
+ # Transformation that considered for redistribute cost:
282
+ # 1. allgather 2. alltoall
283
+ # 3. allreduce 4. reduce_scatter
284
+ for i, (current, target) in enumerate(
285
+ zip(current_spec.placements, target_spec.placements)
286
+ ):
287
+ if current == target:
288
+ continue
289
+
290
+ num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[i]
291
+ if current.is_shard() and target.is_replicate():
292
+ # allgather gives larger comm bytes
293
+ comm_bytes_gb *= num_devices_on_mesh_dim
294
+ # add up allgather comm cost
295
+ cost += allgather_cost(comm_bytes_gb, mesh_topo, i)
296
+ elif current.is_shard() and target.is_shard():
297
+ # should be alltoall comm, since we haven't implement it yet, add penalty
298
+ # to favor allgather instead
299
+ cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + 1.0
300
+ elif current.is_partial() and target.is_replicate():
301
+ # add up allreduce comm cost
302
+ cost += allreduce_cost(comm_bytes_gb, mesh_topo, i)
303
+ elif current.is_partial() and target.is_shard():
304
+ # add up reduce_scatter comm cost
305
+ cost += reduce_scatter_cost(comm_bytes_gb, mesh_topo, i)
306
+ # after reduce_scatter the comm bytes for further collectives halved.
307
+ comm_bytes_gb /= num_devices_on_mesh_dim
308
+ elif current.is_shard() and target.is_partial():
309
+ # ban shard -> partial as it does not make sense to perform
310
+ # this redistribute
311
+ return float("inf")
312
+
313
+ return cost
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/api.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import inspect
3
+ import warnings
4
+ from typing import Any, Callable, cast, Optional, Sequence, Tuple
5
+
6
+ import torch
7
+
8
+ import torch.distributed._tensor.dispatch as op_dispatch
9
+ import torch.distributed._tensor.random as random
10
+ import torch.nn as nn
11
+ from torch.distributed._tensor._collective_utils import mesh_broadcast
12
+ from torch.distributed._tensor._utils import compute_global_tensor_info
13
+ from torch.distributed._tensor.placement_types import (
14
+ DTensorSpec,
15
+ Placement,
16
+ Replicate,
17
+ Shard,
18
+ TensorMeta,
19
+ )
20
+ from torch.distributed._tensor.random import (
21
+ is_rng_supported_mesh,
22
+ OffsetBasedRNGTracker,
23
+ )
24
+ from torch.distributed._tensor.redistribute import (
25
+ Redistribute,
26
+ redistribute_local_tensor,
27
+ )
28
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
29
+
30
+
31
+ __all__ = ["DTensor", "distribute_tensor", "distribute_module"]
32
+
33
+ aten = torch.ops.aten
34
+
35
+
36
+ # NOTE [Autograd interaction between torch.Tensor]
37
+ #
38
+ # The autograd functions defined below are being used by the public
39
+ # facing APIs (i.e. from_local, to_local) to ensure our DTensor
40
+ # works together with torch.Tensor within autograd engine. This
41
+ # allows DistributedTensor to exist on part of the module hierarchy
42
+ # and still able to calculate gradients across the torch.Tensor and
43
+ # DistributedTensor boundary.
44
+ # As an example, we have the a module that consists of submodules
45
+ # A, B, and C, the execution flow would be like:
46
+ # input(torch.Tensor) -> Module A -> Module B -> Module C -> output (torch.Tensor)
47
+ #
48
+ # Suppose I only want to make Module B be a sharded module with
49
+ # DistributedTensor params, we would need to make the following
50
+ # flow to work:
51
+ #
52
+ # input(torch.Tensor) -> Module A
53
+ # -> DTensor input -> Sharded Module B -> DTensor output
54
+ # -> output (torch.Tensor) -> Module C -> output (torch.Tensor)
55
+ #
56
+ # We need the conversion from Module A to DTensor input, which is
57
+ # `from_local`, and conversion from DTensor output to output, which
58
+ # is `to_local`, thus these two functions must be Autograd functions.
59
+ #
60
+ class _ToTorchTensor(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward( # type: ignore[override]
63
+ ctx,
64
+ input: "DTensor",
65
+ grad_placements: Optional[Sequence[Placement]],
66
+ ):
67
+ ctx.dtensor_spec = input._spec
68
+ ctx.grad_placements = grad_placements
69
+ local_tensor = input._local_tensor
70
+
71
+ # We need to return a fresh Tensor object there as autograd metadata
72
+ # will be inplaced into it. So we don't want to pollute the Tensor
73
+ # object stored in the _local_tensor of this DTensor.
74
+ return local_tensor.view_as(local_tensor)
75
+
76
+ @staticmethod
77
+ def backward(ctx, grad_output: torch.Tensor): # type: ignore[override]
78
+ dtensor_spec = ctx.dtensor_spec
79
+ mesh = dtensor_spec.mesh
80
+ grad_placements = ctx.grad_placements
81
+ dtensor_meta = dtensor_spec.tensor_meta
82
+
83
+ _, tensor_stride = compute_global_tensor_info(
84
+ grad_output, mesh, dtensor_spec.placements
85
+ )
86
+ tensor_stride = tuple(tensor_stride)
87
+ grad_placements = grad_placements or dtensor_spec.placements
88
+
89
+ return (
90
+ DTensor(
91
+ grad_output,
92
+ mesh,
93
+ grad_placements,
94
+ shape=dtensor_meta.shape,
95
+ dtype=dtensor_meta.dtype,
96
+ requires_grad=grad_output.requires_grad,
97
+ stride=tensor_stride,
98
+ ),
99
+ None,
100
+ )
101
+
102
+
103
+ class _FromTorchTensor(torch.autograd.Function):
104
+ @staticmethod
105
+ def forward( # type: ignore[override]
106
+ ctx, # pyre-ignore[2]: Parameter must be annotated.
107
+ input: torch.Tensor,
108
+ device_mesh: DeviceMesh,
109
+ placements: Tuple[Placement, ...],
110
+ run_check: bool,
111
+ shape: Optional[torch.Size] = None,
112
+ stride: Optional[Tuple[int, ...]] = None,
113
+ ) -> "DTensor":
114
+ ctx.previous_placement = placements
115
+ ctx.previous_device_mesh = device_mesh
116
+
117
+ if shape and stride:
118
+ tensor_shape, tensor_stride = shape, stride
119
+ elif not shape and not stride:
120
+ # if it's not by default run_check, we assume user is certain that each
121
+ # rank has the same tensor shape, and we just use that to calculate the
122
+ # global shape
123
+ global_shape, global_stride = compute_global_tensor_info(
124
+ input, device_mesh, placements
125
+ )
126
+ tensor_shape, tensor_stride = torch.Size(global_shape), tuple(global_stride)
127
+ else:
128
+ raise RuntimeError(
129
+ f"Found shape:{shape}, stride:{stride}.",
130
+ "Please pass both shape and stride at the same time.",
131
+ )
132
+
133
+ if device_mesh.get_coordinate() is None:
134
+ # if the global rank is not participating in the device mesh, we
135
+ # simply set the local tensor to an empty tensor
136
+ input = input.new_empty(0, requires_grad=input.requires_grad)
137
+ elif run_check:
138
+ # TODO: by default check tensor metas across rank
139
+ # TODO: See if we need to make this run_check logic
140
+ # have a corresponding backward.
141
+ for idx, placement in enumerate(placements):
142
+ if placement.is_replicate():
143
+ # broadcast rank 0 tensor to all ranks
144
+ # only broadcast if run_check is True
145
+ input = input.contiguous()
146
+ mesh_broadcast(input, device_mesh, mesh_dim=idx)
147
+
148
+ # We want a fresh Tensor object that shares memory with the input tensor
149
+ dist_tensor = DTensor(
150
+ input.view_as(input),
151
+ device_mesh,
152
+ placements,
153
+ shape=tensor_shape,
154
+ dtype=input.dtype,
155
+ # requires_grad of the dist tensor depends on if input
156
+ # requires_grad or not
157
+ requires_grad=input.requires_grad,
158
+ stride=tensor_stride,
159
+ )
160
+ return dist_tensor
161
+
162
+ @staticmethod
163
+ def backward(ctx, grad_output: "DTensor"): # type: ignore[override]
164
+ previous_placement = ctx.previous_placement
165
+ previous_device_mesh = ctx.previous_device_mesh
166
+
167
+ # reshard to the placement when creating DistributedTensor
168
+ # so that the gradient layout matches, and we could return
169
+ # local gradients directly
170
+ if grad_output.placements != previous_placement:
171
+ current_spec = grad_output._spec
172
+ target_spec = DTensorSpec(
173
+ previous_device_mesh,
174
+ previous_placement,
175
+ tensor_meta=grad_output._spec.tensor_meta,
176
+ )
177
+ local_tensor = grad_output._local_tensor
178
+ output = redistribute_local_tensor(
179
+ local_tensor, current_spec, target_spec, is_backward=True
180
+ )
181
+ # TODO: return the redistributed local tensor directly without
182
+ # differentiable backward. see if this make sense for all cases.
183
+ return output, None, None, None, None, None
184
+
185
+ # TODO: backward is also differentiable now, add a test
186
+ # to test higher level gradients.
187
+ return grad_output.to_local(), None, None, None, None, None
188
+
189
+
190
+ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
191
+ _local_tensor: torch.Tensor
192
+ _spec: DTensorSpec
193
+ __slots__ = ["_local_tensor", "_spec"]
194
+
195
+ # class attribute that handles operator placements propagation
196
+ # rules, keyed by aten op name, value is propagation func
197
+ _op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher()
198
+
199
+ @staticmethod
200
+ def __new__(
201
+ cls,
202
+ local_tensor: torch.Tensor,
203
+ device_mesh: DeviceMesh,
204
+ placements: Tuple[Placement, ...],
205
+ *,
206
+ shape: torch.Size,
207
+ dtype: torch.dtype,
208
+ requires_grad: bool,
209
+ stride: Tuple[int, ...],
210
+ ) -> "DTensor":
211
+ """
212
+ Construct a DTensor from a local tensor, device mesh, and placement and
213
+ other tensor properties (i.e. shape, requires_grad, strides, etc).
214
+ Note: This is not a public API and it's only supposed to be used by the
215
+ operator implementations and internals. If you want to construct a
216
+ DTensor from a local tensor, consider using `DTensor.from_local`, if
217
+ you want to construct a DTensor from a "global" tensor (where you
218
+ already have tensor initialized and want to shard this tensor),
219
+ consider using `distribute_tensor`.
220
+ """
221
+ if local_tensor.requires_grad and not requires_grad:
222
+ warnings.warn(
223
+ "To construct DTensor from torch.Tensor, it's recommended to "
224
+ "use local_tensor.detach() and make requires_grad consistent."
225
+ )
226
+
227
+ # new method instruct wrapper tensor from local_tensor and add
228
+ # placement spec, it does not do actual distribution
229
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
230
+ cls,
231
+ shape,
232
+ strides=stride,
233
+ dtype=dtype,
234
+ device=local_tensor.device,
235
+ layout=local_tensor.layout,
236
+ requires_grad=requires_grad,
237
+ )
238
+
239
+ tensor_meta = TensorMeta(shape, stride, dtype)
240
+ # deepcopy and set spec
241
+ r._spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta)
242
+ r._local_tensor = local_tensor
243
+ return r
244
+
245
+ # pyre-fixme[14]: `__repr__` overrides method defined in `DTensor` inconsistently.
246
+ # pyre-fixme[3]: Return type must be annotated.
247
+ def __repr__(self):
248
+ # TODO: consider all_gather the local tensors for better debugging
249
+ return f"DTensor(local_tensor={self._local_tensor}, device_mesh={self._spec.mesh}, placements={self._spec.placements})"
250
+
251
+ def __tensor_flatten__(self):
252
+ """
253
+ protocol to inform how to flatten a DTensor to local tensor
254
+ for PT2 tracing
255
+ """
256
+ return ["_local_tensor"], (self._spec, self.requires_grad)
257
+
258
+ @staticmethod
259
+ def __tensor_unflatten__(inner_tensors, flatten_spec, outer_size, outer_stride):
260
+ assert (
261
+ flatten_spec is not None
262
+ ), "Expecting spec to be not None from `__tensor_flatten__` return value!"
263
+ local_tensor = inner_tensors["_local_tensor"]
264
+ spec, requires_grad = flatten_spec
265
+ return DTensor(
266
+ local_tensor,
267
+ spec.mesh,
268
+ spec.placements,
269
+ shape=outer_size,
270
+ dtype=spec.tensor_meta.dtype,
271
+ requires_grad=requires_grad,
272
+ stride=outer_stride,
273
+ )
274
+
275
+ @classmethod
276
+ # pyre-fixme[3]: Return type must be annotated.
277
+ # pyre-fixme[2]: Parameter must be annotated.
278
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
279
+ return DTensor._op_dispatcher.dispatch(
280
+ func,
281
+ args,
282
+ kwargs or {},
283
+ )
284
+
285
+ @staticmethod
286
+ def from_local(
287
+ local_tensor: torch.Tensor,
288
+ device_mesh: Optional[DeviceMesh] = None,
289
+ placements: Optional[Sequence[Placement]] = None,
290
+ *,
291
+ run_check: bool = True,
292
+ shape: Optional[torch.Size] = None,
293
+ stride: Optional[Tuple[int, ...]] = None,
294
+ ) -> "DTensor":
295
+ """
296
+ Create a :class:`DTensor` from a local torch.Tensor on each rank
297
+ according to the `device_mesh` and `placements` specified.
298
+
299
+ Args:
300
+ local_tensor (torch.Tensor): local torch.Tensor on each rank.
301
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the
302
+ tensor, if not specified, must be called under a DeviceMesh
303
+ context manager, default: None
304
+ placements (List[:class:`Placement`], optional): the placements that
305
+ describes how to place the local torch.Tensor on DeviceMesh, must
306
+ have the same number of elements as `device_mesh.ndim`. If not
307
+ specified, we will by default replicate the tensor across the
308
+ `device_mesh` from the first rank of each dimension of the `device_mesh`.
309
+
310
+ Keyword args:
311
+ run_check (bool, optional): indicate whether to run check across ranks
312
+ to check meta information and data. if have :class:`Replicate` in
313
+ `placements`, the data on first rank of the device mesh dimension
314
+ will be broadcasted to other ranks.
315
+ shape (torch.Size, optional): A List of int which specifies the size of
316
+ DTensor which build on top of `local_tensor`. Note this needs to be
317
+ provided if the shape of `local_tensor` are different across the ranks.
318
+ If not provided, `shape` will be computed assuming the given distributed
319
+ tensor is evenly sharded across ranks.
320
+ stride (tuple, optional): A List of int which specifies the stride of DTensor.
321
+ If not provided, `stride` will be computed assuming the given distributed
322
+ tensor is evenly sharded across ranks.
323
+
324
+ Returns:
325
+ A :class:`DTensor` object
326
+
327
+ .. note:: `from_local` is differentiable, the `requires_grad` of the created
328
+ `DTensor` object will depend on if `local_tensor` requires_grad or not.
329
+ """
330
+ # if same shape/dtype, no need to run_check, if not, must allgather
331
+ # the metadatas to check the size/dtype across ranks
332
+ # There should be no data communication unless there's replication
333
+ # strategy, where we broadcast the replication from the first rank
334
+ # in the mesh dimension
335
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
336
+ device_type = device_mesh.device_type
337
+
338
+ # convert the local tensor to desired device base on device mesh's device_type
339
+ if device_type != local_tensor.device.type and not local_tensor.is_meta:
340
+ local_tensor = local_tensor.to(device_type)
341
+
342
+ # set default placements to replicated if not specified
343
+ if placements is None:
344
+ placements = [Replicate() for _ in range(device_mesh.ndim)]
345
+ else:
346
+ placements = list(placements)
347
+ for idx, placement in enumerate(placements):
348
+ # normalize shard dim to be positive
349
+ if placement.is_shard():
350
+ placement = cast(Shard, placement)
351
+ if placement.dim < 0:
352
+ placements[idx] = Shard(placement.dim + local_tensor.ndim)
353
+
354
+ # `from_local` is differentiable, and the gradient of the dist tensor this function
355
+ # created should flow back the gradients to the local_tensor, so we call an autograd
356
+ # function to construct the dist tensor instead.
357
+ return _FromTorchTensor.apply( # pyre-ignore[16]: autograd func
358
+ local_tensor,
359
+ device_mesh,
360
+ tuple(placements),
361
+ run_check,
362
+ shape,
363
+ stride,
364
+ )
365
+
366
+ def to_local(
367
+ self, *, grad_placements: Optional[Sequence[Placement]] = None
368
+ ) -> torch.Tensor:
369
+ """
370
+ Get the local tensor of this DTensor on its current rank. For sharding it returns
371
+ a local shard of the logical tensor view, for replication it returns the replica on
372
+ its current rank.
373
+
374
+ Keyword args:
375
+ grad_placements (List[:class:`Placement`], optional): the placements describes
376
+ the future layout of any gradient layout of the Tensor returned from this
377
+ function.
378
+ `to_local` converts DTensor to local tensor and the returned local tensor
379
+ might not be used as the original DTensor layout later in the code. This
380
+ argument is the hint that user can give to autograd in case the gradient
381
+ layout of the returned tensor does not match the original DTensor layout.
382
+ If not specified, we will assume the gradient layout remains the same
383
+ as the original DTensor and use that for gradient computation.
384
+
385
+ Returns:
386
+ A :class:`torch.Tensor` or `AsyncCollectiveTensor` object. it represents the
387
+ local tensor on its current rank.
388
+
389
+ .. note:: `to_local` is differentiable, the `requires_grad` of the local tensor returned
390
+ will depend on if the `DTensor` requires_grad or not.
391
+ """
392
+ if grad_placements is not None and not isinstance(grad_placements, tuple):
393
+ grad_placements = tuple(grad_placements)
394
+ return _ToTorchTensor.apply(
395
+ self, grad_placements
396
+ ) # pyre-ignore[16]: autograd func
397
+
398
+ def redistribute(
399
+ self,
400
+ device_mesh: Optional[DeviceMesh] = None,
401
+ placements: Optional[Sequence[Placement]] = None,
402
+ *,
403
+ async_op: bool = False,
404
+ ) -> "DTensor":
405
+ """
406
+ `redistribute` performs necessary collective operations that redistribute the current
407
+ DTensor from its current placements to a new placements, or from is current DeviceMesh
408
+ to a new DeviceMesh. i.e. we can turn a Sharded DTensor to a Replicated DTensor by
409
+ specifying a Replicate placement for each dimension of the DeviceMesh.
410
+
411
+ Args:
412
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the
413
+ DTensor, if not specified, must be called under a DeviceMesh
414
+ context manager, default: None
415
+ placements (List[:class:`Placement`], optional): the new placements that
416
+ describes how to place the DTensor into the DeviceMesh, must
417
+ have the same number of elements as `device_mesh.ndim`.
418
+
419
+ Keyword args:
420
+ async_op (bool, optional): whether to perform the DTensor redistribute operation
421
+ asynchronously or not. Default: False
422
+
423
+ Returns:
424
+ A :class:`DTensor` object
425
+
426
+ .. note:: `redistribute` is differentiable.
427
+ """
428
+ # NOTE: This redistribute API currently only supports out
429
+ # of place redistribution, i.e. it always create a new
430
+ # DTensor object and leave the original one unchanged.
431
+
432
+ # if device_mesh is not specified, use the current device_mesh
433
+ device_mesh = device_mesh or self.device_mesh
434
+ # raise error if new placements not specified
435
+ if placements is None:
436
+ raise RuntimeError("placements is needed for redistribute!")
437
+
438
+ placements = list(placements)
439
+ for i, placement in enumerate(placements):
440
+ if placement.is_partial():
441
+ raise RuntimeError(
442
+ "Can not redistribute to _Partial, _Partial is for internal use only!"
443
+ )
444
+ elif isinstance(placement, Shard) and placement.dim < 0:
445
+ # normalize shard dim to be positive
446
+ placements[i] = Shard(placement.dim + self.ndim)
447
+ placements = tuple(placements)
448
+
449
+ # Early return the original DTensor if the placements are the same.
450
+ if self._spec.placements == placements:
451
+ return self
452
+
453
+ # pyre-fixme[16]: `Redistribute` has no attribute `apply`.
454
+ return Redistribute.apply(self, device_mesh, placements, async_op)
455
+
456
+ def full_tensor(
457
+ self, *, grad_placements: Optional[Sequence[Placement]] = None
458
+ ) -> torch.Tensor:
459
+ """
460
+ Return the full tensor of this DTensor. It will perform necessary collectives
461
+ to gather the local tensors from other ranks in its DeviceMesh and concatenate
462
+ them together. It's a syntatic sugar of the following code:
463
+
464
+ `dtensor.redistribute(placements=[Replicate()] * mesh.ndim).to_local()`
465
+
466
+ Keyword args:
467
+ grad_placements (List[:class:`Placement`], optional): the placements describes
468
+ the future layout of any gradient layout of the full Tensor returned from this
469
+ function.
470
+ `full_tensor` converts DTensor to a full torch.Tensor and the returned torch.tensor
471
+ might not be used as the original replicated DTensor layout later in the code. This
472
+ argument is the hint that user can give to autograd in case the gradient
473
+ layout of the returned tensor does not match the original replicated DTensor layout.
474
+ If not specified, we will assume the gradient layout of the full tensor be replicated.
475
+
476
+ Returns:
477
+ A :class:`torch.Tensor` object that represents the full tensor of this DTensor.
478
+
479
+ .. note:: `full_tensor` is differentiable.
480
+ """
481
+
482
+ redist_res = self.redistribute(
483
+ placements=[Replicate()] * self.device_mesh.ndim, async_op=False
484
+ )
485
+ return _ToTorchTensor.apply(redist_res, grad_placements)
486
+
487
+ @property
488
+ def device_mesh(self) -> DeviceMesh:
489
+ """
490
+ The :class:`DeviceMesh` attribute that associates with this DTensor object.
491
+
492
+ .. note:: device_mesh is a read-only property, it can not be set.
493
+ """
494
+ return self._spec.mesh
495
+
496
+ @property
497
+ def placements(self) -> Sequence[Placement]:
498
+ """
499
+ The placements attribute of this DTensor that describes the layout of this
500
+ DTensor on the its DeviceMesh.
501
+
502
+ .. note:: placements is a read-only property, it can not be set.
503
+ """
504
+ return self._spec.placements
505
+
506
+
507
+ def distribute_tensor(
508
+ tensor: torch.Tensor,
509
+ device_mesh: Optional[DeviceMesh] = None,
510
+ placements: Optional[Sequence[Placement]] = None,
511
+ ) -> DTensor:
512
+ """
513
+ Distribute a torch.Tensor to the `device_mesh` according to the `placements`
514
+ specified. The rank of `device_mesh` and `placements` must be the same.
515
+
516
+ Args:
517
+ tensor (torch.Tensor): torch.Tensor to be distributed. Note that if you
518
+ want to shard a tensor on a dimension that is not evenly divisible by
519
+ the number of devices in that mesh dimension, we use `torch.chunk`
520
+ semantic to shard the tensor and scatter the shards.
521
+ device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to distribute the
522
+ tensor, if not specified, must be called under a DeviceMesh context
523
+ manager, default: None
524
+ placements (List[:class:`Placement`], optional): the placements that
525
+ describes how to place the tensor on DeviceMesh, must have the same
526
+ number of elements as `device_mesh.ndim`. If not specified, we will
527
+ by default replicate the tensor across the `device_mesh` from the
528
+ first rank of each dimension of the `device_mesh`.
529
+
530
+ Returns:
531
+ A :class:`DTensor` or `XLAShardedTensor` object.
532
+
533
+ Note:
534
+ When initialize the DeviceMesh with the `xla` device_type, `distribute_tensor`
535
+ return `XLAShardedTensor` instead. see [link](https://github.com/pytorch/pytorch/issues/92909)
536
+ for more details. The XLA integration is experimental and subject to change.
537
+ """
538
+
539
+ torch._C._log_api_usage_once("torch.dtensor.distribute_tensor")
540
+
541
+ # get default device mesh if there's nothing specified
542
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
543
+ device_type = device_mesh.device_type
544
+ if device_type == "xla":
545
+ try:
546
+ # call PyTorch/XLA SPMD for `xla` backend type device mesh.
547
+ # This returns XLAShardedTensor
548
+ from torch_xla.distributed.spmd import ( # type:ignore[import]
549
+ xla_distribute_tensor,
550
+ )
551
+
552
+ return xla_distribute_tensor(
553
+ tensor, device_mesh, placements
554
+ ) # type:ignore[return-value]
555
+ except ImportError as e:
556
+ msg = "To use DTensor API with xla, you must install the torch_xla package!"
557
+ raise ImportError(msg) from e
558
+
559
+ # instantiate a RNG tracker if haven't. By default DTensor uses an
560
+ # OffsetBasedRNGTracker to perform random operators.
561
+ # TODO: the value assignment to global variable is not the ideal solution
562
+ # we can replace it in future.
563
+ if is_rng_supported_mesh(device_mesh) and not random._rng_tracker:
564
+ random._rng_tracker = OffsetBasedRNGTracker(device_type)
565
+
566
+ if not tensor.is_leaf:
567
+ raise RuntimeError(
568
+ "`distribute_tensor` should be used to distribute leaf tensors! but found non-leaf tensor!"
569
+ )
570
+
571
+ # convert tensor to the corresponding device type if it's not in that device type
572
+ if device_type != tensor.device.type and not tensor.is_meta:
573
+ tensor = tensor.to(device_type)
574
+
575
+ # set default placements to replicated if not specified
576
+ if placements is None:
577
+ placements = [Replicate() for _ in range(device_mesh.ndim)]
578
+
579
+ if len(placements) != device_mesh.ndim:
580
+ raise ValueError(
581
+ f"`placements` must have the same length as `device_mesh.ndim`! "
582
+ f"Found placements length: {len(placements)}, and device_mesh.ndim: {device_mesh.ndim}."
583
+ )
584
+ if isinstance(tensor, DTensor):
585
+ # if the tensor is already a DTensor, we just need to check if the
586
+ # device mesh and placements are the same
587
+ if tensor.device_mesh != device_mesh:
588
+ raise ValueError(
589
+ f"Cannot distribute a DTensor with device mesh {tensor.device_mesh} "
590
+ f"to a different device mesh {device_mesh}."
591
+ )
592
+ if tensor.placements != tuple(placements):
593
+ raise ValueError(
594
+ f"Cannot distribute a DTensor with placements {tensor.placements} "
595
+ f"to a different placements {placements}. do you want to call "
596
+ f"`redistribute` instead?"
597
+ )
598
+ return tensor
599
+
600
+ local_tensor = tensor
601
+
602
+ # distribute the tensor according to the placements.
603
+ placements = list(placements)
604
+ for idx, placement in enumerate(placements):
605
+ if placement.is_shard():
606
+ placement = cast(Shard, placement)
607
+ if placement.dim < 0:
608
+ # normalize shard placement dim
609
+ placement = Shard(placement.dim + tensor.ndim)
610
+ placements[idx] = placement
611
+ local_tensor = placement._shard_tensor(local_tensor, device_mesh, idx)
612
+ elif placement.is_replicate():
613
+ placement = cast(Replicate, placement)
614
+ local_tensor = placement._replicate_tensor(local_tensor, device_mesh, idx)
615
+ else:
616
+ raise RuntimeError(
617
+ f"Trying to distribute tensor with unsupported placements {placement} on device mesh dimension {idx}!"
618
+ )
619
+ placements = tuple(placements)
620
+
621
+ assert local_tensor is not None, "distributing a tensor should not be None"
622
+ # detach the local tensor passed to DTensor since after the construction
623
+ # of DTensor, autograd would work on top of DTensor instead of local tensor
624
+ return DTensor(
625
+ local_tensor.detach().requires_grad_(tensor.requires_grad),
626
+ device_mesh,
627
+ placements,
628
+ shape=tensor.size(),
629
+ dtype=tensor.dtype,
630
+ requires_grad=tensor.requires_grad,
631
+ stride=tensor.stride(),
632
+ )
633
+
634
+
635
+ def distribute_module(
636
+ module: nn.Module,
637
+ device_mesh: Optional[DeviceMesh] = None,
638
+ partition_fn: Optional[Callable[[str, nn.Module, DeviceMesh], None]] = None,
639
+ input_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None,
640
+ output_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None,
641
+ ) -> nn.Module:
642
+ """
643
+ This function converts all module parameters to :class:`DTensor` parameters
644
+ according to the `partition_fn` specified. It could also control the input or
645
+ output of the module by specifying the `input_fn` and `output_fn`. (i.e. convert
646
+ the input to :class:`DTensor`, convert the output back to torch.Tensor)
647
+ Args:
648
+ module (:class:`nn.Module`): user module to be partitioned.
649
+ device_mesh (:class:`DeviceMesh`): the device mesh to place the module.
650
+ partition_fn (Callable): the function to partition parameters (i.e. shard certain
651
+ parameters across the `device_mesh`). If `partition_fn` is not specified,
652
+ by default we replicate all module parameters of `module` across the mesh.
653
+ input_fn (Callable): specify the input distribution, i.e. could control how the
654
+ input of the module is sharded. `input_fn` will be installed as a module
655
+ `forward_pre_hook` (pre forward hook).
656
+ output_fn (Callable): specify the output distribution, i.e. could control how the
657
+ output is sharded, or convert it back to torch.Tensor. output_fn will be
658
+ installed as a module `forward_hook` (post forward hook).
659
+
660
+ Returns:
661
+ A module that contains parameters/buffers that are all `DTensor`s.
662
+
663
+ Note:
664
+ When initialize the DeviceMesh with the `xla` device_type, `distribute_module`
665
+ return nn.Module with PyTorch/XLA SPMD annotated parameters. See [link](https://github.com/pytorch/pytorch/issues/92909)
666
+ for more details. The XLA integration is experimental and subject to change.
667
+ """
668
+
669
+ torch._C._log_api_usage_once("torch.dtensor.distribute_module")
670
+
671
+ device_mesh = device_mesh or _mesh_resources.get_current_mesh()
672
+ device_type = device_mesh.device_type
673
+ if device_type == "xla":
674
+ try:
675
+ # This function annotates all module parameters for auto-partitioning with
676
+ # PyTorch/XLA SPMD or explicitly partition to :class:`XLAShardedTensor` parameters
677
+ # according to the `partition_fn` specified.
678
+ from torch_xla.distributed.spmd import ( # type:ignore[import]
679
+ xla_distribute_module,
680
+ )
681
+
682
+ return xla_distribute_module(
683
+ module, device_mesh, partition_fn, input_fn, output_fn
684
+ ) # type:ignore[return-value]
685
+ except ImportError as e:
686
+ msg = "To use DTensor API with xla, you must install the torch_xla package!"
687
+ raise ImportError(msg) from e
688
+
689
+ def replicate_module_params_buffers(m: nn.Module, mesh: DeviceMesh) -> None:
690
+ # This function loop over the immediate module parameters and
691
+ # buffers, replicate all non DTensor params/buffers to DTensor
692
+ # parameters/buffers, if they have not been partitioned in the
693
+ # partition_fn, we can't easily use `module._apply` here
694
+ # because we don't know what happened inside partition_fn as
695
+ # user could do anything, i.e. install hooks, and we want to
696
+ # preserve those.
697
+ full_replicate = [Replicate()] * mesh.ndim
698
+ for key, param in m._parameters.items():
699
+ if param is not None and not isinstance(param, DTensor):
700
+ m.register_parameter(
701
+ key,
702
+ nn.Parameter(distribute_tensor(param.data, mesh, full_replicate)),
703
+ )
704
+ for key, buffer in m._buffers.items():
705
+ if buffer is not None and not isinstance(buffer, DTensor):
706
+ m._buffers[key] = distribute_tensor(buffer, mesh, full_replicate)
707
+
708
+ if partition_fn is None:
709
+ # if partition_fn not specified, we by default replicate
710
+ # all module params/buffers
711
+ for name, submod in module.named_modules():
712
+ replicate_module_params_buffers(submod, device_mesh)
713
+ else:
714
+ # apply partition_fun to submodules
715
+ for name, submod in module.named_modules():
716
+ partition_fn(name, submod, device_mesh)
717
+ replicate_module_params_buffers(submod, device_mesh)
718
+
719
+ # register input_fn as module forward pre hook
720
+ if input_fn is not None:
721
+ # check the input_fn signature
722
+ num_args = len(inspect.signature(input_fn).parameters)
723
+ if num_args == 2:
724
+ # input_fn only takes in inputs and device mesh
725
+ warnings.warn(
726
+ "Deprecating input_fn that takes two arguments (inputs, device_mesh), "
727
+ "please use input_fn that takes in (module, inputs, device_mesh) instead!",
728
+ )
729
+ module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg]
730
+ elif num_args == 3:
731
+ # input_fn takes in module, inputs, device mesh
732
+ module.register_forward_pre_hook(
733
+ lambda mod, inputs: input_fn(mod, inputs, device_mesh)
734
+ )
735
+ else:
736
+ raise ValueError(
737
+ f"input_fn should take in 3 arguments, but got {num_args} arguments!"
738
+ )
739
+ # register output_fn as module forward hook
740
+ if output_fn is not None:
741
+ num_args = len(inspect.signature(output_fn).parameters)
742
+ if num_args == 2:
743
+ # output_fn only takes in outputs and device mesh
744
+ warnings.warn(
745
+ "Deprecating output_fn that takes two arguments (inputs, device_mesh), "
746
+ "please use output_fn that takes in (module, inputs, device_mesh) instead!",
747
+ )
748
+ module.register_forward_hook(
749
+ lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg]
750
+ )
751
+ elif num_args == 3:
752
+ module.register_forward_hook(
753
+ lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh)
754
+ )
755
+ else:
756
+ raise ValueError(
757
+ f"output_fn should take in 3 arguments, but got {num_args} arguments!"
758
+ )
759
+
760
+ return module