applied-ai-018 commited on
Commit
e9b87ea
·
verified ·
1 Parent(s): 561eaf5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py +6 -0
  4. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py +28 -0
  9. venv/lib/python3.10/site-packages/torch/distributed/_shard/api.py +290 -0
  10. venv/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py +61 -0
  11. venv/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py +61 -0
  12. venv/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py +35 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py +469 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py +9 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py +107 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py +68 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py +143 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py +12 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py +215 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py +1253 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py +37 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py +16 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py +82 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py +248 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py +58 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py +211 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py +27 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py +4 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py +86 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py +12 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91cf845b100cc8fd139d8148727274a6239222275c34f11b14b9b2b53b2c3c45
3
+ size 50332828
ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae855954f7aff680421fa2f9ce5d93710e462a53c85aa29d7b1df053fa2c766
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .api import (
2
+ _shard_tensor,
3
+ load_with_process_group,
4
+ shard_module,
5
+ shard_parameter,
6
+ )
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributed._shard.metadata import ShardMetadata
3
+ from typing import Sequence
4
+
5
+ DEPRECATE_MSG = "Please use DTensor instead and we are deprecating ShardedTensor."
6
+
7
+ def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:
8
+ """
9
+ Narrow the tensor according to ``offsets`` and ``sizes``.
10
+ """
11
+ narrowed_tensor = tensor
12
+ for idx, (offset, size) in enumerate(zip(offsets, sizes)):
13
+ if size < tensor.size(idx):
14
+ # Reshape to get shard for this rank and we don't want autograd
15
+ # recording here for the narrow op and 'local_shard' should be a
16
+ # leaf variable in the autograd graph.
17
+ narrowed_tensor = narrowed_tensor.narrow(
18
+ idx,
19
+ offset,
20
+ size
21
+ )
22
+ return narrowed_tensor
23
+
24
+ def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor:
25
+ """
26
+ Narrow the tensor according to the metadata
27
+ """
28
+ return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes)
venv/lib/python3.10/site-packages/torch/distributed/_shard/api.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import Optional
3
+ import torch
4
+ import torch.distributed as dist
5
+ import torch.nn as nn
6
+ from torch.distributed import distributed_c10d
7
+ from torch.distributed._shard.sharded_tensor import (
8
+ ShardedTensor,
9
+ )
10
+ from .sharding_spec import (
11
+ ShardingSpec,
12
+ ChunkShardingSpec
13
+ )
14
+ from .sharding_plan import (
15
+ ShardingPlan
16
+ )
17
+ from .sharder import Sharder
18
+
19
+ def _shard_tensor(
20
+ tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None
21
+ ) -> ShardedTensor:
22
+ """
23
+ Given a :class:`torch.Tensor`, it shards that tensor according to the provided
24
+ ``sharding_spec``. ``src_rank`` denotes the source rank which would be
25
+ used as the ground truth of the data which would be scattered as shards
26
+ across the rest of the ranks.
27
+
28
+ Args:
29
+ tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
30
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
31
+ describing how to shard the Tensor.
32
+
33
+ Keyword args:
34
+ src_rank (int, optional): The source rank which is used as the ground truth of
35
+ the data for the parameter that would be sharded and scattered
36
+ across the rest of the ranks.
37
+ Default: 0.
38
+ process_group (ProcessGroup, optional): The process group to work on. If None,
39
+ the default process group will be used.
40
+
41
+ Returns:
42
+ A :class:`ShardedTensor` sharded from the given tensor.
43
+
44
+ .. warning::
45
+ Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
46
+ currently supported as the ``sharding_spec``.
47
+ """
48
+ if not tensor.is_contiguous():
49
+ raise ValueError('input tensor is not a contiguous Tensor')
50
+
51
+ pg = process_group if process_group is not None else distributed_c10d._get_default_group()
52
+ world_size = dist.get_world_size(pg)
53
+ current_rank = dist.get_rank(pg)
54
+
55
+ # Validate src_rank and sharding_spec are same across all ranks.
56
+ gathered_list = [None] * world_size
57
+ dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg)
58
+
59
+ for idx, entry in enumerate(gathered_list):
60
+ if src_rank != entry[0]: # type: ignore[index]
61
+ raise ValueError(
62
+ f'src_rank={src_rank} on rank: {current_rank} does not ' # type: ignore[index]
63
+ f'match with src_rank={entry[0]} on rank: {idx}')
64
+ if sharding_spec != entry[1]: # type: ignore[index]
65
+ raise ValueError(
66
+ f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index]
67
+ f'match with sharding_spec={entry[1]} on rank: {idx}')
68
+
69
+ st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group)
70
+
71
+ return st
72
+
73
+ def shard_parameter(
74
+ module: torch.nn.Module,
75
+ param_name: str,
76
+ sharding_spec: ShardingSpec,
77
+ src_rank=0,
78
+ process_group=None):
79
+ """
80
+ Given a :class:`torch.nn.Module`, a ``param_name`` for a parameter in that
81
+ module, it shards that parameter according to the provided
82
+ ``sharding_spec``. ``src_rank`` denotes the source rank which would be
83
+ used as the ground truth of the data which would be scattered as shards
84
+ across the rest of the ranks.
85
+
86
+ This method replaces ``module.param_name`` with a
87
+ :class:`torch.distributed._sharded_tensor.ShardedTensor`
88
+
89
+ Args:
90
+ module (:class:`torch.nn.Module`): Module whose parameter needs to be sharded.
91
+ param_name (str): Name of the parameter of ``module`` that needs to be sharded.
92
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
93
+ describing how to shard the Tensor.
94
+
95
+ Keyword args:
96
+ src_rank (int, optional): The source rank which is used as the ground truth of
97
+ the data for the parameter that would be sharded and scattered
98
+ across the rest of the ranks.
99
+ Default: 0.
100
+ process_group (ProcessGroup, optional): The process group to work on. If None,
101
+ the default process group will be used.
102
+
103
+ .. warning::
104
+ Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
105
+ currently supported as the ``sharding_spec``.
106
+ """
107
+ # Perform some validation first.
108
+ if not hasattr(module, param_name):
109
+ raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`')
110
+
111
+ tensor = getattr(module, param_name)
112
+ if not isinstance(tensor, torch.Tensor):
113
+ raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}')
114
+
115
+ if not tensor.is_contiguous():
116
+ raise ValueError(f'param: {param_name} is not a contiguous Tensor')
117
+
118
+ st = _shard_tensor(tensor, sharding_spec, src_rank, process_group)
119
+
120
+ # Replace param with ShardedTensor.
121
+ module.register_parameter(param_name, nn.Parameter(st))
122
+
123
+ # Tracks the current process group in the load context manager.
124
+ _CURRENT_PROCESS_GROUP: Optional[dist.ProcessGroup] = None
125
+
126
+ @contextmanager
127
+ def load_with_process_group(process_group):
128
+ """
129
+ Context manager to set the process group with which to load a ShardedTensor.
130
+ """
131
+ global _CURRENT_PROCESS_GROUP
132
+ if _CURRENT_PROCESS_GROUP is not None:
133
+ raise RuntimeError(
134
+ 'ProcessGroup already set by previous "load_with_process_group" '
135
+ 'context manager')
136
+ _CURRENT_PROCESS_GROUP = process_group
137
+ try:
138
+ yield process_group
139
+ finally:
140
+ _CURRENT_PROCESS_GROUP = None
141
+
142
+ def _get_current_process_group():
143
+ """
144
+ Retrieves the current process group set by ``load_with_process_group``.
145
+ If not set, it just returns the default group.
146
+ """
147
+ global _CURRENT_PROCESS_GROUP
148
+ if _CURRENT_PROCESS_GROUP is None:
149
+ return distributed_c10d._get_default_group()
150
+ else:
151
+ return _CURRENT_PROCESS_GROUP
152
+
153
+ def _reshard_output(
154
+ module: torch.nn.Module,
155
+ resharding_spec: ShardingSpec) -> torch.nn.Module:
156
+ """
157
+ Hook a module with output resharding in the forward pass according
158
+ to the given ``resharding_spec``.
159
+
160
+ Args:
161
+ module (:class:`torch.nn.Module`): Module whose output needs to be resharded.
162
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
163
+ The specification describing how the output of the module will be resharded.
164
+
165
+ Returns:
166
+ A :class:`torch.nn.Module` object with reshard API hooked.
167
+ """
168
+ def hook_func(_module, _input, output):
169
+ if isinstance(output, ShardedTensor):
170
+ return output.reshard(resharding_spec)
171
+ return output
172
+ module.register_forward_hook(hook_func)
173
+ return module
174
+
175
+ def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:
176
+ """
177
+ Hook a module with local shards collection in the forward pass.
178
+
179
+ This API is typically used to convert a sharded representation back to data parallel
180
+ representation. In particular, it returns the local tensor for this Shard. If the
181
+ size along the sharding dimension for the local tensor is 1, this dimension is removed
182
+ from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically
183
+ a local Tensor of size [16] across each rank and not [1, 16] across each rank.
184
+
185
+ Args:
186
+ module (:class:`torch.nn.Module`): Module whose output is ShardedTensor and the
187
+ local tensor value needs to be returned.
188
+
189
+ Returns:
190
+ A :class:`torch.nn.Module` object with collection API hooked.
191
+ """
192
+
193
+ def hook_func(_module, _input, output):
194
+ if isinstance(output, ShardedTensor):
195
+ local_tensor = output.local_tensor()
196
+ # Squeeze the # of dimensions manually, only applicable to ChunkShardingSpec
197
+ sharding_spec = output._sharding_spec
198
+ if isinstance(sharding_spec, ChunkShardingSpec) \
199
+ and local_tensor.size(sharding_spec.dim) == 1: # type: ignore[attr-defined, arg-type]
200
+ local_tensor = local_tensor.squeeze(
201
+ output._sharding_spec.dim # type: ignore[attr-defined]
202
+ )
203
+ return local_tensor
204
+ module.register_forward_hook(hook_func)
205
+ return module
206
+
207
+ def shard_module(
208
+ module: nn.Module,
209
+ plan: ShardingPlan,
210
+ src_rank=0,
211
+ process_group=None
212
+ ):
213
+ """
214
+ Shards a given module according to the provided sharding `plan`. This method
215
+ first shards all the parameters according to the given sharding `plan`. Then if
216
+ `output_plan` and `return_local_tensor` are specified in the sharding `plan`, it
217
+ will tag the output of modules according `output_plan`, convert the module's
218
+ output back to data parallel according to `return_local_tensor`.
219
+
220
+ Needs to be called on all ranks in an SPMD fashion.
221
+
222
+ Args:
223
+ module (:class:`torch.nn.Module`): The module to apply sharding to
224
+ plan (:class:`torch.distributed._shard.sharding_plan.ShardingPlan`):
225
+ The ShardingPlan which specified param name to ShardingSpec to apply to
226
+ each parameter.
227
+
228
+ Keyword args:
229
+ src_rank (int, optional): The source rank which is used as the ground truth of
230
+ the data for the module that would be sharded and scattered across the rest
231
+ of the ranks.
232
+ Default: 0.
233
+ process_group (ProcessGroup, optional): The process group to work on. If None,
234
+ the default process group will be used.
235
+ """
236
+ # record Sharder paths for sanity check on the plan to ensure items in the plan
237
+ # does not conflict with the submodule tree that the Sharder is working with
238
+ sharder_paths = []
239
+ for name, spec in plan.plan.items():
240
+ if isinstance(spec, Sharder):
241
+ sharder_paths.append(name)
242
+
243
+ # shard the parameter according to the ShardingPlan
244
+ for name, spec in plan.plan.items():
245
+ if isinstance(spec, ShardingSpec):
246
+ # if found a sharding spec, try to shard the parameter
247
+ module_path, _, param_name = name.rpartition(".")
248
+
249
+ for sharder_path in sharder_paths:
250
+ if module_path.startswith(sharder_path):
251
+ raise RuntimeError(f"ShardingPlan is in-valid, trying to shard a parameter: {name},"
252
+ f" but there's already a Sharder entry for module {sharder_path},"
253
+ f" parameter sharding should not conflict with the submodule tree"
254
+ f" that a Sharder is working with!")
255
+
256
+ mod = module.get_submodule(module_path)
257
+ shard_parameter(
258
+ mod,
259
+ param_name,
260
+ spec,
261
+ src_rank=src_rank,
262
+ process_group=process_group
263
+ )
264
+ elif isinstance(spec, Sharder):
265
+ parent_mod_path, _, mod_name = name.rpartition(".")
266
+ if name == "":
267
+ raise KeyError("Module path must not be empty for custom sharder!")
268
+ mod = module.get_submodule(name)
269
+ parent_mod = module.get_submodule(parent_mod_path)
270
+ sharded_mod = spec.shard(mod)
271
+ # swap this submodule with the sharded module
272
+ parent_mod.mod_name = sharded_mod
273
+ else:
274
+ raise TypeError(f"Only `ShardingSpec` and `Sharder` are supported to shard '{name}'")
275
+
276
+ # reshard output if there's an entry in `reshard_output` for this module
277
+ if plan.output_plan is not None:
278
+ for module_path, output_spec in plan.output_plan.items():
279
+ if isinstance(output_spec, ShardingSpec):
280
+ mod = module.get_submodule(module_path)
281
+ _reshard_output(mod, output_spec)
282
+ else:
283
+ raise TypeError(f"Only `ShardingSpec` is supported as output_plan for '{module_path}'")
284
+ # convert the output back to data parallel for the modules appears in
285
+ # `return_local_tensor` of the plan, we will call `_collect_local_shard`
286
+ # to collect the local tensor for output of modules
287
+ if plan.return_local_tensor is not None:
288
+ for module_path in plan.return_local_tensor:
289
+ mod = module.get_submodule(module_path)
290
+ _collect_local_shard(mod)
venv/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils import _pytree as pytree
3
+ from typing import Optional
4
+
5
+ def _basic_validation(op, args=(), kwargs=None):
6
+ """
7
+ Common validation across all ops go in here.
8
+ """
9
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
10
+
11
+ if len(args) == 0 and (kwargs is None or len(kwargs) == 0):
12
+ raise ValueError(f" No input for '{op.__name__}'!")
13
+
14
+ # Validate types
15
+ has_distributed_tensor = False
16
+
17
+ def is_distributed_tensor(e):
18
+ nonlocal has_distributed_tensor
19
+ if isinstance(e, ShardedTensor):
20
+ has_distributed_tensor = True
21
+
22
+ pytree.tree_map_(is_distributed_tensor, args)
23
+ pytree.tree_map_(is_distributed_tensor, kwargs)
24
+
25
+ if not has_distributed_tensor:
26
+ raise TypeError(
27
+ f"torch function '{op.__name__}', with args: {args} and "
28
+ f"kwargs: {kwargs} are called without any distributed tensor!"
29
+ )
30
+
31
+ # Validate all distributed tensors use the same PG.
32
+ cur_pg: Optional[torch.distributed.ProcessGroup] = None
33
+
34
+ def validate_pg(e):
35
+ nonlocal cur_pg
36
+ if isinstance(e, ShardedTensor):
37
+ if cur_pg is not None and e._process_group is not cur_pg:
38
+ raise RuntimeError(
39
+ 'All distributed tensors should use the '
40
+ 'same ProcessGroup if used together in an op.'
41
+ )
42
+ cur_pg = e._process_group
43
+
44
+ pytree.tree_map_(validate_pg, args)
45
+ pytree.tree_map_(validate_pg, kwargs)
46
+
47
+ def _register_default_op(op, decorator):
48
+ @decorator(op)
49
+ def tensor_default_op(types, args=(), kwargs=None, pg=None):
50
+ """
51
+ Handles ``__torch_function__`` dispatch for the default tensor ops that
52
+ behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or
53
+ ``torch.Tensor.dtype``. We simply lower to the real op call with
54
+ DisableTorchFunctionSubclass context like ``torch.Tensor.__torch_function__``
55
+ to avoid recursions.
56
+ """
57
+ if kwargs is None:
58
+ kwargs = {}
59
+
60
+ with torch._C.DisableTorchFunctionSubclass():
61
+ return op(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Union, Optional
3
+ from functools import reduce
4
+
5
+ from torch.distributed.remote_device import _remote_device
6
+
7
+ @dataclass
8
+ class ShardMetadata:
9
+ """
10
+ Represents a shard of the overall Tensor including its
11
+ offsets, lengths and device placement.
12
+
13
+ Args:
14
+ shard_offsets(List[int]): Offsets in the original tensor indicating
15
+ the start offsets for this shard. Should have the same rank as
16
+ the original tensor.
17
+ shard_sizes(List[int]): Integers indicating the size of each
18
+ dimension for this shard. Should have the same rank as the
19
+ original tensor.
20
+ placement(:class:`torch.distributed._remote_device`):
21
+ Specifies the placement of this shard.
22
+ """
23
+
24
+ __slots__ = ['shard_offsets', 'shard_sizes', 'placement']
25
+
26
+ shard_offsets: List[int]
27
+ shard_sizes: List[int]
28
+ placement: Optional[_remote_device]
29
+
30
+ def __init__(
31
+ self,
32
+ shard_offsets: List[int],
33
+ shard_sizes: List[int],
34
+ placement: Optional[Union[str, _remote_device]] = None
35
+ ):
36
+ self.shard_offsets = shard_offsets
37
+ self.shard_sizes = shard_sizes
38
+ if isinstance(placement, str):
39
+ self.placement = _remote_device(placement)
40
+ else:
41
+ self.placement = placement
42
+ if len(self.shard_offsets) != len(self.shard_sizes):
43
+ raise ValueError(
44
+ f'shard_offsets and shard_sizes should have '
45
+ f'the same number of elements, found {len(self.shard_offsets)} '
46
+ f'and {self.shard_sizes} respectively')
47
+
48
+ for i in range(len(self.shard_offsets)):
49
+ if self.shard_offsets[i] < 0:
50
+ raise ValueError('shard_offsets should be >=0')
51
+ if self.shard_sizes[i] < 0:
52
+ raise ValueError('shard_sizes should be >= 0')
53
+
54
+ def __hash__(self):
55
+ def _hash_reduce(a, b):
56
+ return (a << 8) + hash(b)
57
+
58
+ res = reduce(_hash_reduce, self.shard_offsets, 37)
59
+ res = reduce(_hash_reduce, self.shard_sizes, res)
60
+ res = _hash_reduce(res, self.placement)
61
+ return res
venv/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from inspect import signature
3
+ from .common_op_utils import _basic_validation
4
+
5
+ """
6
+ Common utilities to register ops on ShardedTensor
7
+ and PartialTensor.
8
+ """
9
+
10
+ def _register_op(op, func, op_table):
11
+ """
12
+ Performs basic validation and registers the provided op in the given
13
+ op_table.
14
+ """
15
+ if len(signature(func).parameters) != 4:
16
+ raise TypeError(
17
+ f'Custom sharded op function expects signature: '
18
+ f'(types, args, kwargs, process_group), but received '
19
+ f'signature: {signature(func)}')
20
+
21
+ op_table[op] = func
22
+
23
+ def _decorator_func(wrapped_func, op, op_table):
24
+ """
25
+ Decorator function to register the given ``op`` in the provided
26
+ ``op_table``
27
+ """
28
+
29
+ @functools.wraps(wrapped_func)
30
+ def wrapper(types, args, kwargs, process_group):
31
+ _basic_validation(op, args, kwargs)
32
+ return wrapped_func(types, args, kwargs, process_group)
33
+
34
+ _register_op(op, wrapper, op_table)
35
+ return wrapper
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import List, TYPE_CHECKING
3
+
4
+ import torch
5
+
6
+ if TYPE_CHECKING:
7
+ from torch.distributed._shard.sharding_spec import ShardingSpec
8
+ else:
9
+ ShardingSpec = "ShardingSpec"
10
+
11
+ from .api import (
12
+ _CUSTOM_SHARDED_OPS,
13
+ _SHARDED_OPS,
14
+ Shard,
15
+ ShardedTensorBase,
16
+ ShardedTensor,
17
+ ShardedTensorMetadata,
18
+ TensorProperties,
19
+ )
20
+ from .metadata import ShardMetadata # noqa: F401
21
+ from torch.distributed._shard.op_registry_utils import _decorator_func
22
+
23
+
24
+ def empty(sharding_spec: ShardingSpec,
25
+ *size,
26
+ dtype=None,
27
+ layout=torch.strided,
28
+ requires_grad=False,
29
+ pin_memory=False,
30
+ memory_format=torch.contiguous_format,
31
+ process_group=None,
32
+ init_rrefs=False) -> ShardedTensor:
33
+ """
34
+ Returns a :class:`ShardedTensor` filled with uninitialized data.
35
+ Needs to be called on all ranks in an SPMD fashion.
36
+
37
+ Args:
38
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
39
+ describing how to shard the Tensor.
40
+ size (int...): a sequence of integers defining the shape of the output
41
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
42
+
43
+ Keyword args:
44
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
45
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
46
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
47
+ Default: ``torch.strided``.
48
+ requires_grad (bool, optional): If autograd should record operations on the
49
+ returned tensor. Default: ``False``.
50
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
51
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
52
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
53
+ returned Tensor. Default: ``torch.contiguous_format``.
54
+ process_group (ProcessGroup, optional): The process group to work on. If None,
55
+ the default process group will be used.
56
+ init_rrefs (bool, optional): Whether or not to initialize
57
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
58
+ Need to initialize the RPC Framework if specified as ``True``.
59
+ Default: ``False``.
60
+
61
+ Returns:
62
+ A :class:`ShardedTensor` object on each rank
63
+ """
64
+ return ShardedTensor(
65
+ sharding_spec,
66
+ *size,
67
+ dtype=dtype,
68
+ layout=layout,
69
+ requires_grad=requires_grad,
70
+ pin_memory=pin_memory,
71
+ memory_format=memory_format,
72
+ process_group=process_group,
73
+ init_rrefs=init_rrefs,
74
+ )
75
+
76
+ def ones(sharding_spec: ShardingSpec,
77
+ *size,
78
+ dtype=None,
79
+ layout=torch.strided,
80
+ requires_grad=False,
81
+ pin_memory=False,
82
+ memory_format=torch.contiguous_format,
83
+ process_group=None,
84
+ init_rrefs=False) -> ShardedTensor:
85
+ """
86
+ Returns a :class:`ShardedTensor` with the scalar value 1.
87
+ Needs to be called on all ranks in an SPMD fashion.
88
+
89
+ Args:
90
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
91
+ describing how to shard the Tensor.
92
+ size (int...): a sequence of integers defining the shape of the output
93
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
94
+
95
+ Keyword args:
96
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
97
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
98
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
99
+ Default: ``torch.strided``.
100
+ requires_grad (bool, optional): If autograd should record operations on the
101
+ returned tensor. Default: ``False``.
102
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
103
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
104
+ process_group (ProcessGroup, optional): The process group to work on. If None,
105
+ the default process group will be used.
106
+ init_rrefs (bool, optional): Whether or not to initialize
107
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
108
+ Need to initialize the RPC Framework if specified as ``True``.
109
+ Default: ``False``.
110
+
111
+ Returns:
112
+ A :class:`ShardedTensor` object on each rank
113
+ """
114
+ return full(
115
+ sharding_spec,
116
+ size,
117
+ fill_value=1,
118
+ dtype=dtype,
119
+ layout=layout,
120
+ requires_grad=requires_grad,
121
+ pin_memory=pin_memory,
122
+ memory_format=memory_format,
123
+ process_group=process_group,
124
+ init_rrefs=init_rrefs
125
+ )
126
+
127
+ def zeros(sharding_spec: ShardingSpec,
128
+ *size,
129
+ dtype=None,
130
+ layout=torch.strided,
131
+ requires_grad=False,
132
+ pin_memory=False,
133
+ memory_format=torch.contiguous_format,
134
+ process_group=None,
135
+ init_rrefs=False) -> ShardedTensor:
136
+ """
137
+ Returns a :class:`ShardedTensor` filled with the scalar value 0.
138
+ Needs to be called on all ranks in an SPMD fashion.
139
+
140
+ Args:
141
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
142
+ describing how to shard the Tensor.
143
+ size (int...): a sequence of integers defining the shape of the output
144
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
145
+
146
+ Keyword args:
147
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
148
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
149
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
150
+ Default: ``torch.strided``.
151
+ requires_grad (bool, optional): If autograd should record operations on the
152
+ returned tensor. Default: ``False``.
153
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
154
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
155
+ process_group (ProcessGroup, optional): The process group to work on. If None,
156
+ the default process group will be used.
157
+ init_rrefs (bool, optional): Whether or not to initialize
158
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
159
+ Need to initialize the RPC Framework if specified as ``True``.
160
+ Default: ``False``.
161
+
162
+ Returns:
163
+ A :class:`ShardedTensor` object on each rank
164
+ """
165
+ return full(
166
+ sharding_spec,
167
+ size,
168
+ fill_value=0,
169
+ dtype=dtype,
170
+ layout=layout,
171
+ requires_grad=requires_grad,
172
+ pin_memory=pin_memory,
173
+ memory_format=memory_format,
174
+ process_group=process_group,
175
+ init_rrefs=init_rrefs
176
+ )
177
+
178
+ def full(sharding_spec: ShardingSpec,
179
+ size,
180
+ fill_value,
181
+ *,
182
+ dtype=None,
183
+ layout=torch.strided,
184
+ requires_grad=False,
185
+ pin_memory=False,
186
+ memory_format=torch.contiguous_format,
187
+ process_group=None,
188
+ init_rrefs=False) -> ShardedTensor:
189
+ """
190
+ Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
191
+ is inferred from fill_value. If dtype is specified, it will override the
192
+ inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
193
+ Args:
194
+ sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
195
+ describing how to shard the Tensor.
196
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
197
+ output tensor.
198
+ fill_value (Scalar) – the value to fill the output tensor with.
199
+ Keyword args:
200
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
201
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
202
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
203
+ Default: ``torch.strided``.
204
+ requires_grad (bool, optional): If autograd should record operations on the
205
+ returned tensor. Default: ``False``.
206
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
207
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
208
+ process_group (ProcessGroup, optional): The process group to work on. If None,
209
+ the default process group will be used.
210
+ init_rrefs (bool, optional): Whether or not to initialize
211
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
212
+ Need to initialize the RPC Framework if specified as ``True``.
213
+ Default: ``False``.
214
+ Returns:
215
+ A :class:`ShardedTensor` object on each rank
216
+ """
217
+ sharded_tensor = ShardedTensor(
218
+ sharding_spec,
219
+ *size,
220
+ dtype=dtype,
221
+ layout=layout,
222
+ requires_grad=requires_grad,
223
+ pin_memory=pin_memory,
224
+ memory_format=memory_format,
225
+ process_group=process_group,
226
+ init_rrefs=init_rrefs,
227
+ )
228
+ torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]
229
+ return sharded_tensor
230
+
231
+ def rand(sharding_spec: ShardingSpec,
232
+ *size,
233
+ dtype=None,
234
+ layout=torch.strided,
235
+ requires_grad=False,
236
+ pin_memory=False,
237
+ memory_format=torch.contiguous_format,
238
+ process_group=None,
239
+ init_rrefs=False) -> ShardedTensor:
240
+ """
241
+ Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
242
+ on the interval :math:`[0, 1)`. The shape of the tensor is defined by the
243
+ variable argument `size`. Needs to be called on all ranks in an SPMD fashion.
244
+
245
+ Args:
246
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
247
+ describing how to shard the Tensor.
248
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
249
+ output tensor.
250
+
251
+ Keyword args:
252
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
253
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
254
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
255
+ Default: ``torch.strided``.
256
+ requires_grad (bool, optional): If autograd should record operations on the
257
+ returned tensor. Default: ``False``.
258
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
259
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
260
+ process_group (ProcessGroup, optional): The process group to work on. If None,
261
+ the default process group will be used.
262
+ init_rrefs (bool, optional): Whether or not to initialize
263
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
264
+ Need to initialize the RPC Framework if specified as ``True``.
265
+ Default: ``False``.
266
+
267
+ Returns:
268
+ A :class:`ShardedTensor` object on each rank
269
+ """
270
+ sharded_tensor = ShardedTensor(
271
+ sharding_spec,
272
+ *size,
273
+ dtype=dtype,
274
+ layout=layout,
275
+ requires_grad=requires_grad,
276
+ pin_memory=pin_memory,
277
+ memory_format=memory_format,
278
+ process_group=process_group,
279
+ init_rrefs=init_rrefs,
280
+ )
281
+ torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]
282
+ return sharded_tensor
283
+
284
+ def randn(sharding_spec: ShardingSpec,
285
+ *size,
286
+ dtype=None,
287
+ layout=torch.strided,
288
+ requires_grad=False,
289
+ pin_memory=False,
290
+ memory_format=torch.contiguous_format,
291
+ process_group=None,
292
+ init_rrefs=False) -> ShardedTensor:
293
+ """
294
+ Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
295
+ with mean `0` and variance `1` (also called standard normal distribution). The shape
296
+ of the tensor is defined by the variable argument `size`. Needs to be called on all ranks
297
+ in an SPMD fashion.
298
+
299
+ Args:
300
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
301
+ describing how to shard the Tensor.
302
+ size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
303
+ output tensor.
304
+
305
+ Keyword args:
306
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
307
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
308
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
309
+ Default: ``torch.strided``.
310
+ requires_grad (bool, optional): If autograd should record operations on the
311
+ returned tensor. Default: ``False``.
312
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
313
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
314
+ process_group (ProcessGroup, optional): The process group to work on. If None,
315
+ the default process group will be used.
316
+ init_rrefs (bool, optional): Whether or not to initialize
317
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
318
+ Need to initialize the RPC Framework if specified as ``True``.
319
+ Default: ``False``.
320
+
321
+ Returns:
322
+ A :class:`ShardedTensor` object on each rank
323
+ """
324
+ sharded_tensor = ShardedTensor(
325
+ sharding_spec,
326
+ *size,
327
+ dtype=dtype,
328
+ layout=layout,
329
+ requires_grad=requires_grad,
330
+ pin_memory=pin_memory,
331
+ memory_format=memory_format,
332
+ process_group=process_group,
333
+ init_rrefs=init_rrefs,
334
+ )
335
+ torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type]
336
+ return sharded_tensor
337
+
338
+ def init_from_local_shards(
339
+ local_shards: List[Shard],
340
+ *global_size,
341
+ process_group=None,
342
+ init_rrefs=False) -> ShardedTensor:
343
+ """
344
+ Creates an :class:`ShardedTensor` from local shards and the global metadata.
345
+ Needs to be called on all ranks in an SPMD fashion.
346
+
347
+ Args:
348
+ local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list
349
+ of shards that represent the local shards on this rank.
350
+ global_size (int...): a list, tuple, or `torch.Size` of integers defining the
351
+ shape of the overall sharded tensor.
352
+
353
+ Keyword args:
354
+ process_group (ProcessGroup, optional): The process group to work on. If None,
355
+ the default process group will be used.
356
+ init_rrefs (bool, optional): Whether or not to initialize
357
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
358
+ Need to initialize the RPC Framework if specified as ``True``.
359
+ Default: ``False``.
360
+
361
+ Returns:
362
+ A :class:`ShardedTensor` object handle on this rank
363
+
364
+
365
+ Examples:
366
+ Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),
367
+ each shard have a (5, 5) local tensor, we can do it like below:
368
+
369
+ on rank 0:
370
+ >>> # xdoctest: +SKIP("not distributed")
371
+ >>> local_shard_metadata = ShardMetadata(
372
+ >>> shard_offsets=[0, 0],
373
+ >>> shard_lengths=[5, 5],
374
+ >>> placement="rank:0/cuda:0"
375
+ >>> )
376
+ >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
377
+ >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
378
+
379
+ on rank 1:
380
+ >>> # xdoctest: +SKIP("not distributed")
381
+ >>> local_shard_metadata = ShardMetadata(
382
+ >>> shard_offsets=[5, 0],
383
+ >>> shard_lengths=[5, 5],
384
+ >>> placement="rank:1/cuda:1"
385
+ >>> )
386
+ >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
387
+ >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
388
+ """
389
+ return ShardedTensor._init_from_local_shards(
390
+ local_shards,
391
+ *global_size,
392
+ process_group=process_group,
393
+ init_rrefs=init_rrefs
394
+ )
395
+
396
+ def state_dict_hook(module, destination, prefix, local_metadata):
397
+ """
398
+ Hook to add ShardedTensor to Module's ``state_dict``. Needs to be
399
+ registered to the Module using
400
+ :meth:`torch.nn.Module._register_state_dict_hook`.
401
+ """
402
+ for submodule_name, submodule in module.named_modules():
403
+ for attr_name, attr in submodule.__dict__.items():
404
+ if isinstance(attr, ShardedTensor):
405
+ mod_prefix = prefix + submodule_name
406
+ key = mod_prefix + ('.' if mod_prefix else '') + attr_name
407
+ destination[key] = attr
408
+
409
+ def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
410
+ """
411
+ Pre-load state dict hook to add ShardedTensor to the module.
412
+ """
413
+ for submodule_name, submodule in module.named_modules():
414
+ for attr_name in submodule.__dict__.keys():
415
+ mod_prefix = prefix + submodule_name
416
+ key = mod_prefix + ('.' if mod_prefix else '') + attr_name
417
+ if key in state_dict:
418
+ if isinstance(state_dict[key], ShardedTensor):
419
+ setattr(submodule, attr_name, state_dict[key])
420
+
421
+ def custom_sharded_op_impl(func):
422
+ """
423
+ Provides a way for users to write their own custom sharded operator. This
424
+ can be used to override existing ShardedTensor operators or write a new
425
+ one not supported by ShardedTensor. If the operator in question is covered
426
+ by ``__torch_function__`` dispatch and has a ShardedTensor as any of its
427
+ parameters, the function provided will be invoked for that operator.
428
+
429
+ Example::
430
+ >>> # xdoctest: +SKIP
431
+ >>> @custom_sharded_op_impl(torch.nn.functional.linear)
432
+ >>> def my_custom_sharded_linear(types, args, kwargs, process_group):
433
+ >>> ...
434
+ >>> # xdoctest: +SKIP("Undefined variables")
435
+ >>> input = torch.rand(10, 32)
436
+ >>> weight = sharded_tensor.rand(32, 16)
437
+ >>> bias = torch.rand(16)
438
+ >>> # This will call 'my_custom_sharded_linear'
439
+ >>> torch.nn.functional.linear(input, weight, bias)
440
+
441
+ The types, args and kwargs parameters are the same parameters that are
442
+ passed to ``__torch_function__`` dispatch API
443
+ (https://pytorch.org/docs/stable/notes/extending.html#extending-torch).
444
+ There is an additional ``process_group`` parameter which is the
445
+ process_group used for the ShardedTensor and can be used by
446
+ implementations for communications within a sharded implementation.
447
+
448
+ Args:
449
+ func(Callable): Torch function for which we want to provide a sharded
450
+ implementation (ex: torch.nn.functional.linear)
451
+ """
452
+ return functools.partial(
453
+ _decorator_func,
454
+ op=func,
455
+ op_table=_CUSTOM_SHARDED_OPS
456
+ )
457
+
458
+ def _sharded_op_impl(func):
459
+ """
460
+ Decorator to register a default sharded op.
461
+ """
462
+ return functools.partial(
463
+ _decorator_func,
464
+ op=func,
465
+ op_table=_SHARDED_OPS
466
+ )
467
+
468
+ # Import all builtin sharded ops
469
+ from ._ops import * # noqa: F403
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc ADDED
Binary file (425 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc ADDED
Binary file (8.15 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch.distributed._shard.sharded_tensor._ops.misc_ops
2
+ import torch.distributed._shard.sharded_tensor._ops.tensor_ops
3
+
4
+ from .binary_cmp import equal, allclose
5
+ from .init import kaiming_uniform_, normal_, uniform_, constant_
6
+
7
+ # Import all ChunkShardingSpec ops
8
+ from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding import sharded_embedding
9
+ from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import sharded_embedding_bag
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (753 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc ADDED
Binary file (529 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc ADDED
Binary file (5.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from torch.distributed._shard.sharded_tensor import (
3
+ _sharded_op_impl,
4
+ Shard,
5
+ ShardedTensor,
6
+ )
7
+ from torch.distributed._shard.common_op_utils import _basic_validation
8
+
9
+ def _sharded_op_common(op, early_stop_func, extra_check):
10
+ """
11
+ Inject sharded tensor op registration with common logics executed before
12
+ different behaviors are done on either local shards or a local tensor.
13
+
14
+ Example::
15
+ >>> # xdoctest: +SKIP("Undefined variables")
16
+ >>> op = torch.transpose
17
+ >>> @_sharded_op_impl(op)
18
+ >>> @_sharded_op_common(op, early_stop_func, extra_check)
19
+ >>> def sharded_tensor_op(types, args, kwargs, process_group):
20
+ >>> ...
21
+ >>>
22
+ >>> st = sharded_tensor.rand(32, 16)
23
+ >>> st.transpose(1, 2)
24
+ >>> # This will call '_sharded_op_common'
25
+
26
+ Args:
27
+ op: The op to be registered and applied to all shards of the st.
28
+ early_stop_func (Callable, optional): the func for early stop.
29
+ Default: if ``None``, no early stop.
30
+ extra_check (Callable, optional): the func for extra condition check.
31
+ Default: if ``None``, no extra check.
32
+
33
+ Return:
34
+ func (Callable): Torch function for which we want to provide a sharded
35
+ implementation (ex: torch.transpose)
36
+ """
37
+ def decorator_sharded_func(wrapped_func):
38
+ @functools.wraps(wrapped_func)
39
+ def wrapper(types, args=(), kwargs=None, pg=None):
40
+ _basic_validation(op, args, kwargs)
41
+
42
+ st = args[0]
43
+ if kwargs is None:
44
+ kwargs = {}
45
+ if extra_check:
46
+ extra_check(*args, **kwargs)
47
+ if early_stop_func:
48
+ early_stop = early_stop_func(*args, **kwargs)
49
+ if early_stop:
50
+ return st
51
+ return wrapped_func(types, args, kwargs, pg)
52
+
53
+ return wrapper
54
+
55
+ return decorator_sharded_func
56
+
57
+ def _register_sharded_op_on_local_shards(
58
+ op, early_stop_func=None, extra_check=None, customized_func=None
59
+ ):
60
+ """
61
+ Handles ``__torch_function__`` dispatch for ops which are performed on
62
+ each shard of the sharded tensor such as elementwise op like
63
+ ``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``.
64
+
65
+ For more complicated ops, a customized func can be used to generate
66
+ the new shards and sharded tensor size.
67
+
68
+ This function expects that the original ShardingSpec for the ShardedTensor
69
+ is preserved irrespective of whether or not a customized function is used.
70
+
71
+ Args:
72
+ op: The op to be registered and applied to all shards of the st.
73
+ early_stop_func (Callable, optional): the func for early stop.
74
+ Default: if ``None``, no early stop.
75
+ extra_check (Callable, optional): the func for extra condition check.
76
+ Default: if ``None``, no extra check.
77
+ customized_func (Callable, optional): the func for customized logic
78
+ to generate new shards and sharded tensor size.
79
+ Default: if ``None``, we simply lower to the real op call with
80
+ all local shards of the st.
81
+
82
+ Return:
83
+ func (Callable): registered implementation for sharded op for
84
+ ``__torch_function__`` dispatch.
85
+ """
86
+ @_sharded_op_impl(op)
87
+ @_sharded_op_common(op, early_stop_func, extra_check)
88
+ def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None):
89
+ st = args[0]
90
+ st_metadata = st.metadata()
91
+ local_shards = st.local_shards()
92
+ local_shards_new = []
93
+ if customized_func:
94
+ local_shards_new, st_metadata = customized_func(args, kwargs, pg)
95
+ else:
96
+ for local_shard in local_shards:
97
+ args = (local_shard.tensor, *args[1:])
98
+ local_shards_new.append(
99
+ Shard(op(*args, **kwargs), local_shard.metadata)
100
+ )
101
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
102
+ local_shards_new,
103
+ st_metadata,
104
+ process_group=pg,
105
+ init_rrefs=st._init_rrefs,
106
+ sharding_spec=st.sharding_spec()
107
+ )
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+ import torch.distributed.distributed_c10d as distributed_c10d
4
+ from torch.distributed._shard.sharded_tensor import (
5
+ ShardedTensor,
6
+ _sharded_op_impl
7
+ )
8
+
9
+ def _communicate_result(result, pg):
10
+ # Gather results from all ranks.
11
+ if result:
12
+ result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device()))
13
+ else:
14
+ result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device()))
15
+
16
+ dist.all_reduce(result_tensor, group=pg)
17
+
18
+ expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg)
19
+
20
+ return torch.equal(result_tensor, expected_result)
21
+
22
+ def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None):
23
+ if len(args) != 2:
24
+ raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}')
25
+
26
+ result = True
27
+ st1 = args[0]
28
+ st2 = args[1]
29
+ if not (isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)):
30
+ raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor')
31
+
32
+ # Verify same PG
33
+ if st1._process_group != st2._process_group:
34
+ return False
35
+
36
+ if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group):
37
+ return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group)
38
+
39
+ # Verify metadata
40
+ if st1.metadata() != st2.metadata():
41
+ return _communicate_result(False, st1._process_group)
42
+
43
+ # Verify number of local shards
44
+ st1_local_shards = st1.local_shards()
45
+ st2_local_shards = st2.local_shards()
46
+ if len(st1_local_shards) != len(st2_local_shards):
47
+ return _communicate_result(False, st1._process_group)
48
+
49
+ # kwargs must be dict-like
50
+ if kwargs is None:
51
+ kwargs = {}
52
+ # Verify each local shard
53
+ for idx in range(len(st1_local_shards)):
54
+ if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata:
55
+ return _communicate_result(False, st1._process_group)
56
+ if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs):
57
+ return _communicate_result(False, st1._process_group)
58
+
59
+
60
+ return _communicate_result(True, st1._process_group)
61
+
62
+ @_sharded_op_impl(torch.equal)
63
+ def equal(types, args, kwargs, process_group):
64
+ return binary_cmp(torch.equal, types, args, kwargs, process_group)
65
+
66
+ @_sharded_op_impl(torch.allclose)
67
+ def allclose(types, args, kwargs, process_group):
68
+ return binary_cmp(torch.allclose, types, args, kwargs, process_group)
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed._shard.sharded_tensor as sharded_tensor
3
+ from torch.distributed._shard.sharded_tensor import (
4
+ _sharded_op_impl,
5
+ )
6
+
7
+ def validate_param(param, param_name):
8
+ if param is None:
9
+ raise ValueError(f"param: {param_name} shouldn't be None!")
10
+
11
+ @_sharded_op_impl(torch.nn.init.uniform_)
12
+ def uniform_(types, args=(), kwargs=None, pg=None):
13
+ r"""
14
+ Fills the Tensor in tensor.local_shards with values drawn from the uniform
15
+ distribution :math:`\mathcal{U}(a, b)`.
16
+ Args:
17
+ tensor: tensor sharded across devices
18
+ a: the lower bound of the uniform distribution
19
+ b: the upper bound of the uniform distribution
20
+ """
21
+ validate_param(kwargs, "kwargs")
22
+ sharded_tensor = kwargs["tensor"]
23
+ validate_param(sharded_tensor, "tensor")
24
+ a = kwargs['a']
25
+ validate_param(a, "a")
26
+ b = kwargs['b']
27
+ validate_param(b, "b")
28
+
29
+ for shard in sharded_tensor.local_shards():
30
+ torch.nn.init.uniform_(shard.tensor, a=a, b=b)
31
+ return sharded_tensor
32
+
33
+ @_sharded_op_impl(torch.nn.init.normal_)
34
+ def normal_(types, args=(), kwargs=None, pg=None):
35
+ r"""
36
+ Fills the Tensors in tensor.local_shards with values drawn from the normal
37
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
38
+ Args:
39
+ tensor: tensor sharded across devices
40
+ mean: the mean of the normal distribution
41
+ std: the standard deviation of the normal distribution
42
+ """
43
+ validate_param(kwargs, "kwargs")
44
+ sharded_tensor = kwargs["tensor"]
45
+ validate_param(sharded_tensor, "tensor")
46
+ mean = kwargs['mean']
47
+ validate_param(mean, "mean")
48
+ std = kwargs['std']
49
+ validate_param(std, "std")
50
+
51
+ for shard in sharded_tensor.local_shards():
52
+ torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
53
+ return sharded_tensor
54
+
55
+ @_sharded_op_impl(torch.nn.init.kaiming_uniform_)
56
+ def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
57
+ r"""
58
+ Fills the Tensors in tensor.local_shards with values according to the method
59
+ described in `Delving deep into rectifiers: Surpassing human-level
60
+ performance on ImageNet classification` - He, K. et al. (2015), using a
61
+ uniform distribution. The resulting tensor will have values sampled from
62
+ :math:`\mathcal{U}(-\text{bound}, \text{bound})` where
63
+ .. math::
64
+ \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
65
+ Also known as He initialization.
66
+ Args:
67
+ tensor: tensor sharded across devices
68
+ a: the negative slope of the rectifier used after this layer (only
69
+ used with ``'leaky_relu'``)
70
+ mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
71
+ preserves the magnitude of the variance of the weights in the
72
+ forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
73
+ backwards pass.
74
+ nonlinearity: the non-linear function (`nn.functional` name),
75
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
76
+ """
77
+ validate_param(kwargs, "kwargs")
78
+ sharded_tensor = kwargs["tensor"]
79
+ validate_param(sharded_tensor, "tensor")
80
+ a = kwargs['a']
81
+ validate_param(a, "a")
82
+ mode = kwargs['mode']
83
+ validate_param(mode, "mode")
84
+ nonlinearity = kwargs['nonlinearity']
85
+ validate_param(nonlinearity, "nonlinearity")
86
+
87
+ for shard in sharded_tensor.local_shards():
88
+ torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
89
+ return sharded_tensor
90
+
91
+ @_sharded_op_impl(torch.nn.init.constant_)
92
+ def constant_(types, args=(), kwargs=None, pg=None):
93
+ r"""
94
+ Fills the input ShardedTensor with the value \text{val}val.
95
+ Args:
96
+ tensor: tensor sharded across devices
97
+ val: the value to fill the tensor with
98
+ """
99
+ validate_param(kwargs, "kwargs")
100
+ sharded_tensor = kwargs["tensor"]
101
+ validate_param(sharded_tensor, "tensor")
102
+ val = kwargs['val']
103
+ validate_param(val, "val")
104
+ for shard in sharded_tensor.local_shards():
105
+ torch.nn.init.constant_(shard.tensor, val=val)
106
+ return sharded_tensor
107
+
108
+ tensor_like_creation_op_map = {
109
+ torch.full_like: sharded_tensor.full,
110
+ torch.empty_like: sharded_tensor.empty,
111
+ torch.zeros_like: sharded_tensor.zeros,
112
+ torch.ones_like: sharded_tensor.ones,
113
+ torch.rand_like: sharded_tensor.rand,
114
+ torch.randn_like: sharded_tensor.randn,
115
+ }
116
+
117
+ # tensor ops that behave the same as the default tensor
118
+ def register_tensor_creation_op(op):
119
+ @_sharded_op_impl(op)
120
+ def tensor_creation_op(types, args=(), kwargs=None, pg=None):
121
+ """
122
+ Handles ``__torch_function__`` dispatch for tensor creation ops that
123
+ takes a ShardedTensor as argument, such as ``torch.zeros_like`` or
124
+ ``torch.full_like``.
125
+ """
126
+ creation_op = tensor_like_creation_op_map.get(op, None)
127
+ if creation_op is None:
128
+ raise RuntimeError(f"Tensor creation {op} not supported!")
129
+ if kwargs is None:
130
+ kwargs = {}
131
+
132
+ st = args[0]
133
+
134
+ new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator]
135
+ return new_st
136
+
137
+
138
+ register_tensor_creation_op(torch.full_like)
139
+ register_tensor_creation_op(torch.empty_like)
140
+ register_tensor_creation_op(torch.zeros_like)
141
+ register_tensor_creation_op(torch.ones_like)
142
+ register_tensor_creation_op(torch.rand_like)
143
+ register_tensor_creation_op(torch.randn_like)
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributed._shard.sharded_tensor import (
3
+ _sharded_op_impl,
4
+ )
5
+
6
+ # This is used by `_apply()` within module.py to set new
7
+ # parameters after apply a certain method, we should follow
8
+ # the future behavior of overwriting the existing tensor
9
+ # instead of doing in-place change using `.data = `.
10
+ @_sharded_op_impl(torch._has_compatible_shallow_copy_type)
11
+ def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None):
12
+ return False
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import torch
3
+ from torch.distributed._shard.sharded_tensor import (
4
+ _sharded_op_impl,
5
+ Shard,
6
+ ShardedTensor,
7
+ )
8
+ from ._common import (
9
+ _register_sharded_op_on_local_shards,
10
+ )
11
+ from torch.distributed._shard.common_op_utils import _register_default_op
12
+
13
+
14
+ # Tensor properties access
15
+ _register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined]
16
+ _register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined]
17
+ _register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined]
18
+ _register_default_op(torch.Tensor.size, _sharded_op_impl)
19
+ _register_default_op(torch.Tensor.dim, _sharded_op_impl)
20
+ _register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined]
21
+ _register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl)
22
+ _register_default_op(torch.Tensor.contiguous, _sharded_op_impl)
23
+ _register_default_op(torch.Tensor.is_floating_point, _sharded_op_impl)
24
+
25
+ # __reduce_ex__ to dispatch to get_state/set_state
26
+ _register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl)
27
+
28
+ # autograd related properties
29
+ _register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined]
30
+ # TODO: set grad with a ShardedTensor that consists of all local grads
31
+ _register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr]
32
+ _register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[union-attr]
33
+ _register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined]
34
+
35
+ # device property is ambiguous as from a global prospective,
36
+ # ShardedTensor.device consists of multiple devices (might even across hosts)
37
+ # We choose to return the current device of the local tensor to represent
38
+ # the device property on each rank
39
+ @_sharded_op_impl(torch.Tensor.device.__get__)
40
+ def tensor_device(types, args=(), kwargs=None, pg=None):
41
+ self_st = args[0]
42
+ # Validate types
43
+ if not isinstance(self_st, ShardedTensor):
44
+ raise TypeError("input needs to be a ShardedTensor")
45
+ dev: torch.device
46
+ if self_st._local_shards:
47
+ dev = self_st._local_shards[0].tensor.device
48
+ elif pg and pg._get_backend_name() == "gloo":
49
+ dev = torch.device("cpu")
50
+ else:
51
+ dev = torch.device(torch.cuda.current_device())
52
+ return dev
53
+
54
+ @_sharded_op_impl(torch.Tensor.is_meta.__get__) # type: ignore[attr-defined]
55
+ def st_is_meta(types, args=(), kwargs=None, pg=None):
56
+ return args[0].local_tensor().is_meta
57
+
58
+
59
+ def sharded_type_as_check(*args, **kwargs):
60
+ """
61
+ Perform extra checks for the sharded_type_as op such as the input needs to
62
+ be either a Tensor or ShardedTensor.
63
+
64
+ Args: same as ``torch.Tensor.type_as``.
65
+
66
+ Return: None
67
+ """
68
+ if len(args) < 2:
69
+ raise ValueError("Needs to give a tensor to cast type as!")
70
+ if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor):
71
+ raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!")
72
+
73
+
74
+ def same_dtype(*args, **kwargs):
75
+ """
76
+ When the dtype is the same, return the original ShardedTensor.
77
+
78
+ Args: same as ``torch.Tensor.type_as``.
79
+
80
+ Return (bool): Whether to return early or not.
81
+ """
82
+ return args[0].dtype == args[1].dtype
83
+
84
+
85
+ def sharded_type_as(args, kwargs, pg):
86
+ """
87
+ Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op.
88
+
89
+ Args: same as ``torch.Tensor.type_as``.
90
+
91
+ Return:
92
+ new_local_shards (List[Shard]): Local shards for the new sharded tensor.
93
+ st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
94
+ """
95
+ st = args[0]
96
+ tensor = args[1]
97
+ if isinstance(tensor, ShardedTensor):
98
+ tensor = tensor.local_tensor()
99
+ new_local_shards = []
100
+ for shard in st.local_shards():
101
+ new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))
102
+ st_meta = copy.deepcopy(st._metadata)
103
+ st_meta.tensor_properties.dtype = tensor.dtype
104
+ return new_local_shards, st_meta
105
+
106
+
107
+ _register_sharded_op_on_local_shards(
108
+ torch.Tensor.type_as,
109
+ early_stop_func=same_dtype,
110
+ extra_check=sharded_type_as_check,
111
+ customized_func=sharded_type_as,
112
+ )
113
+
114
+
115
+ def sharded_deepcopy(args, kwargs, pg):
116
+ # NOTE: we directly implement deepcopy magic method
117
+ # instead of using the default tensor.__deepcopy__
118
+ # and implement clone(). This is because the default
119
+ # tensor deepcopy copies every attribute, but the
120
+ # process_group in ShardedTensor cannot be deep copied.
121
+ self_st = args[0]
122
+ new_local_shards = copy.deepcopy(self_st.local_shards())
123
+ new_metadata = copy.deepcopy(self_st.metadata())
124
+ return new_local_shards, new_metadata
125
+
126
+
127
+ _register_sharded_op_on_local_shards(
128
+ torch.Tensor.__deepcopy__,
129
+ customized_func=sharded_deepcopy,
130
+ )
131
+
132
+
133
+ @_sharded_op_impl(torch.Tensor.copy_)
134
+ def sharded_inplace_copy(types, args, kwargs, pg):
135
+ # NOTE: inplace op don't need to rewrap
136
+ kwargs = {} if kwargs is None else kwargs
137
+ self_st = args[0]
138
+ new_st = args[1]
139
+ nonblocking = kwargs.get("non_blocking", False)
140
+ for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
141
+ if local_shard.metadata != new_shard.metadata:
142
+ raise RuntimeError(
143
+ "inplace copy can only happen between two ShardedTensor with same metadata!"
144
+ )
145
+ for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
146
+ local_shard.tensor.copy_(new_shard.tensor, nonblocking)
147
+
148
+ return self_st
149
+
150
+
151
+ def sharded_clone(args, kwargs, pg):
152
+ self_st = args[0]
153
+ desire_memory_format = kwargs.get("memory_format", None)
154
+ if desire_memory_format and desire_memory_format != torch.preserve_format:
155
+ raise RuntimeError("Only support torch.preserve_format for ShardedTensor!")
156
+ cloned_local_shards = [
157
+ Shard(
158
+ local_shard.tensor.clone(memory_format=desire_memory_format),
159
+ metadata=copy.deepcopy(local_shard.metadata),
160
+ )
161
+ for local_shard in self_st.local_shards()
162
+ ]
163
+ new_metadata = copy.deepcopy(self_st.metadata())
164
+ return cloned_local_shards, new_metadata
165
+
166
+
167
+ _register_sharded_op_on_local_shards(
168
+ torch.Tensor.clone,
169
+ customized_func=sharded_clone,
170
+ )
171
+
172
+
173
+ def sharded_detach(args, kwargs, pg):
174
+ self_st = args[0]
175
+ detached_local_shards = [
176
+ Shard(
177
+ local_shard.tensor.detach(),
178
+ metadata=copy.deepcopy(local_shard.metadata),
179
+ )
180
+ for local_shard in self_st.local_shards()
181
+ ]
182
+ new_metadata = copy.deepcopy(self_st.metadata())
183
+ new_metadata.tensor_properties.requires_grad = False
184
+ return detached_local_shards, new_metadata
185
+
186
+
187
+ _register_sharded_op_on_local_shards(
188
+ torch.Tensor.detach,
189
+ customized_func=sharded_detach,
190
+ )
191
+
192
+
193
+ @_sharded_op_impl(torch.Tensor.requires_grad_)
194
+ def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None):
195
+ self_st = args[0]
196
+ # Validate types
197
+ if not isinstance(self_st, ShardedTensor):
198
+ raise TypeError("input needs to be a ShardedTensor")
199
+
200
+ if kwargs is None:
201
+ kwargs = {}
202
+
203
+ requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True)
204
+ if requires_grad == self_st.requires_grad:
205
+ return self_st
206
+
207
+ for local_shard in self_st.local_shards():
208
+ local_shard.tensor.requires_grad_(requires_grad)
209
+
210
+ # update the wrapper class property
211
+ with torch._C.DisableTorchFunctionSubclass():
212
+ self_st.requires_grad_(requires_grad)
213
+ # update the metadata in the meanwhile
214
+ self_st._metadata.tensor_properties.requires_grad = requires_grad
215
+ return self_st
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py ADDED
@@ -0,0 +1,1253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations # type: ignore[attr-defined]
2
+ from dataclasses import dataclass
3
+ from typing import (
4
+ Callable,
5
+ Dict,
6
+ List,
7
+ Optional,
8
+ Sequence,
9
+ Tuple,
10
+ cast,
11
+ )
12
+ import copy
13
+ import warnings
14
+ from functools import reduce
15
+ import weakref
16
+
17
+ import threading
18
+ import torch
19
+ import torch.distributed as dist
20
+ from torch.distributed import rpc
21
+ from torch.distributed import distributed_c10d
22
+ from torch.distributed._shard.metadata import ShardMetadata
23
+ import torch.distributed._shard.sharding_spec as shard_spec
24
+ from torch.distributed._shard.sharding_spec.api import (
25
+ _dispatch_custom_op,
26
+ _has_custom_op,
27
+ )
28
+ from torch.distributed._shard.sharding_spec._internals import (
29
+ check_tensor,
30
+ validate_non_overlapping_shards_metadata,
31
+ )
32
+ from torch.distributed._shard._utils import (
33
+ DEPRECATE_MSG,
34
+ )
35
+
36
+ from .metadata import TensorProperties, ShardedTensorMetadata
37
+ from .shard import Shard
38
+ from .reshard import reshuffle_local_shard, reshard_local_shard
39
+ from .utils import (
40
+ _flatten_tensor_size,
41
+ _parse_and_validate_remote_device,
42
+ _validate_output_tensor_for_gather,
43
+ build_metadata_from_local_shards,
44
+ build_global_metadata
45
+ )
46
+ from torch.distributed.remote_device import _remote_device
47
+ from torch.utils import _pytree as pytree
48
+ import operator
49
+
50
+ # Tracking for sharded tensor objects.
51
+ _sharded_tensor_lock = threading.Lock()
52
+ _sharded_tensor_current_id = 0
53
+ _sharded_tensor_map: Dict[int, weakref.ReferenceType[ShardedTensor]] = {}
54
+
55
+ # Default sharded ops
56
+ _SHARDED_OPS: Dict[Callable, Callable] = {}
57
+
58
+ # Customized user ops
59
+ _CUSTOM_SHARDED_OPS: Dict[Callable, Callable] = {}
60
+
61
+ def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
62
+ with _sharded_tensor_lock:
63
+ if sharded_tensor_id not in _sharded_tensor_map:
64
+ raise RuntimeError(
65
+ f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
66
+
67
+ sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
68
+ if sharded_tensor is None:
69
+ raise RuntimeError('ShardedTensor weakref has been deallocated')
70
+ else:
71
+ sharded_tensor._register_remote_shards(rrefs, rpc_rank)
72
+
73
+ class ShardedTensorBase(torch.Tensor):
74
+ _sharding_spec: shard_spec.ShardingSpec
75
+ _metadata: ShardedTensorMetadata
76
+ _local_shards: List[Shard]
77
+
78
+ def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
79
+ # Use __new__ to construct a wrapper tensor, for recording tensor
80
+ # properties and logging purposes.
81
+ torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor")
82
+
83
+ # check sharding spec and build sharded tensor metadata
84
+ if not isinstance(sharding_spec, shard_spec.ShardingSpec):
85
+ raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}")
86
+
87
+ sizes = _flatten_tensor_size(size)
88
+ dtype = kwargs["dtype"]
89
+ layout = kwargs["layout"]
90
+ pin_memory = kwargs["pin_memory"]
91
+ requires_grad = kwargs["requires_grad"]
92
+
93
+ if dtype is None:
94
+ dtype = torch.get_default_dtype()
95
+
96
+ tensor_properties = TensorProperties(
97
+ dtype, layout, requires_grad, pin_memory=pin_memory
98
+ )
99
+ sharded_tensor_metadata = sharding_spec.build_metadata(
100
+ sizes, tensor_properties=tensor_properties
101
+ )
102
+
103
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
104
+ cls,
105
+ sizes,
106
+ dtype=dtype,
107
+ layout=layout,
108
+ pin_memory=pin_memory,
109
+ requires_grad=requires_grad,
110
+ )
111
+ # set sharding spec
112
+ r._sharding_spec = sharding_spec
113
+ # set metadata
114
+ r._metadata = sharded_tensor_metadata
115
+ # set local shards
116
+ r._local_shards = []
117
+ return r
118
+
119
+ def metadata(self) -> ShardedTensorMetadata:
120
+ """
121
+ Returns a :class:`ShardedTensorMetadata` object corresponding to the
122
+ metadata for the entire tensor.
123
+ """
124
+ return self._metadata
125
+
126
+ def local_shards(self) -> List[Shard]:
127
+ """
128
+ Returns a list of :class:`Shard' corresponding to the
129
+ local shards for this rank. Returns an empty list if the current rank
130
+ does not host any shards for this Tensor.
131
+ """
132
+ return self._local_shards
133
+
134
+ @classmethod
135
+ def _init_from_local_shards_and_global_metadata(
136
+ cls,
137
+ local_shards: List[Shard],
138
+ sharded_tensor_metadata: ShardedTensorMetadata,
139
+ sharding_spec=None,
140
+ ) -> ShardedTensorBase:
141
+ """
142
+ Initialize a ShardedTensorBase with local shards and a global
143
+ ShardedTensorMetadata built on each rank.
144
+ Warning: This API is experimental and subject to change. It does
145
+ not do cross rank validations, and fully rely on the user
146
+ for the correctness of sharded_tensor_metadata on each rank
147
+ """
148
+ shards_metadata = sharded_tensor_metadata.shards_metadata
149
+ tensor_properties = sharded_tensor_metadata.tensor_properties
150
+
151
+ if len(shards_metadata) == 0:
152
+ raise ValueError("shards_metadata must not be empty!")
153
+
154
+ if tensor_properties.layout != torch.strided:
155
+ raise ValueError("Only torch.strided layout is currently supported")
156
+
157
+ if sharding_spec is None:
158
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)
159
+ else:
160
+ spec = sharding_spec
161
+
162
+ sharded_tensor_base = ShardedTensorBase.__new__(
163
+ ShardedTensor,
164
+ spec,
165
+ sharded_tensor_metadata.size,
166
+ dtype=tensor_properties.dtype,
167
+ layout=tensor_properties.layout,
168
+ pin_memory=tensor_properties.pin_memory,
169
+ requires_grad=tensor_properties.requires_grad,
170
+ )
171
+
172
+ # check if shards_metadata have overlap shards
173
+ validate_non_overlapping_shards_metadata(shards_metadata)
174
+
175
+ # check if the shards_metadata is compatible with overall size of the sharded tensor.
176
+ check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
177
+
178
+ # done validation, add local_shards
179
+ sharded_tensor_base._local_shards = local_shards
180
+ return sharded_tensor_base
181
+
182
+ @classmethod
183
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
184
+ raise RuntimeError(
185
+ f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} "
186
+ "but the there is no custom __torch_dispatch__ implementation for it."
187
+ )
188
+
189
+ class ShardedTensor(ShardedTensorBase):
190
+ """
191
+ ShardedTensor is an torch.Tensor subclass to represent Tensors that are sharded
192
+ across multiple devices and multiple processes.
193
+
194
+ ShardedTensor is initialized in an SPMD like fashion where each rank
195
+ initializes the ShardedTensor. The ShardedTensor object on each rank
196
+ then only stores the local shard for the Tensor and provides global
197
+ metadata for all the shards.
198
+
199
+ ShardedTensor doesn't provide any Tensor like operations but is a wrapper
200
+ providing the Tensor representing the local shard and the global metadata.
201
+ Using these, users can build their custom distributed._sharded computations
202
+ on top of this primitive. The local shards are all initialized using the
203
+ create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
204
+ torch.empty
205
+
206
+ Args:
207
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
208
+ describing how to shard the Tensor.
209
+ size (int...): a sequence of integers defining the shape of the output
210
+ tensor. Can be a variable number of arguments or a collection like a list or tuple.
211
+
212
+ Keyword args:
213
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
214
+ Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
215
+ layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
216
+ Default: ``torch.strided``.
217
+ requires_grad (bool, optional): If autograd should record operations on the
218
+ returned tensor. Default: ``False``.
219
+ pin_memory (bool, optional): If set, returned tensor would be allocated in
220
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
221
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
222
+ returned Tensor. Default: ``torch.contiguous_format``.
223
+ init_rrefs (bool, optional): Whether or not to initialize
224
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
225
+ Need to initialize the RPC Framework if specified as ``True``.
226
+ Default: ``False``.
227
+
228
+ .. note:: ShardedTensor uses collectives to do various operations, i.e. it
229
+ uses all_gather to do cross rank validations. For NCCL-based process
230
+ groups, internal tensor representations of objects must be moved to the
231
+ GPU device before communication takes place. In this case, the device
232
+ used is given by ``torch.cuda.current_device()`` and it is the user's
233
+ responsibility to ensure that this is set so that each rank has an
234
+ individual GPU, via ``torch.cuda.set_device()``
235
+
236
+ """
237
+ def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
238
+ self = super().__new__(cls, sharding_spec, *size, **kwargs)
239
+ return self
240
+
241
+ def __init__(
242
+ self,
243
+ sharding_spec: shard_spec.ShardingSpec,
244
+ *size,
245
+ dtype=None,
246
+ layout=torch.strided,
247
+ requires_grad=False,
248
+ pin_memory=False,
249
+ memory_format=torch.contiguous_format,
250
+ process_group=None,
251
+ init_rrefs=False,
252
+ ):
253
+ # prepare initialization, initialize fields like
254
+ # _process_group, _local_shards, etc.
255
+ self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
256
+
257
+ if layout != torch.strided:
258
+ raise ValueError('Only torch.strided layout is currently supported')
259
+
260
+ if memory_format != torch.contiguous_format:
261
+ raise ValueError('Only torch.contiguous_format memory_format is currently supported')
262
+
263
+ self._metadata.tensor_properties.memory_format = memory_format
264
+
265
+ current_rank = dist.get_rank(self._process_group)
266
+
267
+ for shard_metadata in self._metadata.shards_metadata:
268
+ rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
269
+ if rank == current_rank:
270
+ local_tensor = _create_tensor_from_params(
271
+ shard_metadata.shard_sizes,
272
+ local_device=device,
273
+ tensor_properties=self._metadata.tensor_properties
274
+ )
275
+ self._local_shards.append(Shard(local_tensor, shard_metadata))
276
+
277
+ # do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
278
+ self._post_init()
279
+
280
+ def _prepare_init(self, process_group=None, init_rrefs=False):
281
+ self._init_rrefs = init_rrefs
282
+ self._sharded_tensor_id = None
283
+
284
+ self._process_group = (
285
+ process_group
286
+ if process_group is not None
287
+ else distributed_c10d._get_default_group()
288
+ )
289
+
290
+ self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
291
+
292
+ def _post_init(self):
293
+ # Initialize RPC if available.
294
+ if self._init_rrefs:
295
+ with _sharded_tensor_lock:
296
+ global _sharded_tensor_current_id, _sharded_tensor_map
297
+ self._sharded_tensor_id = _sharded_tensor_current_id
298
+ _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
299
+ _sharded_tensor_current_id += 1
300
+
301
+ if not rpc._is_current_rpc_agent_set():
302
+ raise RuntimeError(
303
+ 'RPC Framework needs to be initialized using'
304
+ ' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
305
+ self._init_rpc()
306
+
307
+ def __del__(self):
308
+ # Clean up the global map.
309
+ with _sharded_tensor_lock:
310
+ global _sharded_tensor_current_id, _sharded_tensor_map
311
+ if (
312
+ hasattr(self, "_sharded_tensor_id")
313
+ and self._sharded_tensor_id in _sharded_tensor_map
314
+ ):
315
+ _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
316
+
317
+ def _init_rpc(self):
318
+ # Validate PG and RPC ranks match.
319
+ pg_rank = dist.get_rank()
320
+ rpc_rank = rpc.get_worker_info().id
321
+ if pg_rank != rpc_rank:
322
+ raise ValueError(
323
+ f'Default ProcessGroup and RPC ranks must be '
324
+ f'the same for ShardedTensor, found process group rank: '
325
+ f'{pg_rank} and RPC rank: {rpc_rank}'
326
+ )
327
+
328
+ self._remote_shards = {}
329
+
330
+ # Gather all the sharded tensor ids.
331
+ worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
332
+ rank_to_name = {}
333
+ name_to_rank = {}
334
+
335
+ for worker_info in worker_infos:
336
+ rank_to_name[worker_info.id] = worker_info.name
337
+ name_to_rank[worker_info.name] = worker_info.id
338
+
339
+ all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
340
+
341
+ # Share the local shards to the entire world.
342
+ futs = []
343
+ rpc_rank = rpc.get_worker_info().id
344
+ for rank in range(dist.get_world_size()):
345
+ # Skip self.
346
+ if rank == dist.get_rank():
347
+ continue
348
+
349
+ if len(self.local_shards()) != 0:
350
+ rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
351
+ fut = rpc.rpc_async(
352
+ rank,
353
+ _register_remote_shards,
354
+ args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
355
+ futs.append(fut)
356
+
357
+ torch.futures.wait_all(futs)
358
+
359
+ # Barrier for all RPCs to finish on all ranks.
360
+ rpc.api._all_gather(None)
361
+
362
+ def _get_preferred_device(self) -> torch.device:
363
+ """
364
+ Return the preferred device to be used when creating tensors for collectives.
365
+ This method takes into account the associated process group
366
+ """
367
+ if dist.get_backend(self._process_group) == dist.Backend.NCCL:
368
+ return torch.device(torch.cuda.current_device())
369
+ return torch.device("cpu")
370
+
371
+ def gather( # type: ignore[override]
372
+ self,
373
+ dst: int = 0,
374
+ out: Optional[torch.Tensor] = None,
375
+ enforce_dtype: bool = False,
376
+ dtype: Optional[torch.dtype] = None,
377
+ ) -> None:
378
+ """
379
+ Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
380
+ sharded tensor.
381
+
382
+ The API needs to be called on all ranks in SPMD fashion. All ranks should have
383
+ the same ``dst``. ``out`` should be a tensor of the same size as the overall
384
+ size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
385
+
386
+ Args:
387
+ dst(int): The rank where full tensor is constructed.
388
+ Default: 0
389
+ out (:class `torch.Tensor`, optional): The output full tensor.
390
+ Must to be provided ONLY on ``dst`` rank.
391
+ Default: ``None``
392
+ enforce_dtype (bool): Deprecated, please use dtype instead. Force the
393
+ gathered tensors to be the same type as input and output.
394
+ dtype (torch.dtype): Force the gathered tensors to be this dtype.
395
+ Default: ``None``
396
+ """
397
+ def shard_size(shard_md):
398
+ return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined]
399
+
400
+ if enforce_dtype:
401
+ warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
402
+
403
+ rank = dist.get_rank(self._process_group)
404
+ full_size = self.metadata().size
405
+ _validate_output_tensor_for_gather(rank, dst, full_size, out)
406
+
407
+ local_shards = self.local_shards()
408
+ world_size = dist.get_world_size(self._process_group)
409
+ rank_sizes = [0 for _ in range(world_size)]
410
+ max_rank_size = 0
411
+ shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {}
412
+ # collect sizes
413
+ for shard_md in self.metadata().shards_metadata:
414
+ shard_rank = cast(_remote_device, shard_md.placement).rank()
415
+ assert shard_rank is not None
416
+
417
+ shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank])
418
+ rank_sizes[shard_rank] += shard_size(shard_md)
419
+ max_rank_size = max(max_rank_size, rank_sizes[shard_rank])
420
+
421
+ gather_list: Optional[List[torch.Tensor]]
422
+ if rank == dst:
423
+ assert out is not None
424
+ if enforce_dtype:
425
+ # enforce_dtype is deprecated. Do it for backward compatibility.
426
+ dtype = out.dtype
427
+ # TODO make it as a view of out tensor
428
+ gather_list = [torch.empty((max_rank_size,), device=out.device, dtype=dtype) for _ in range(world_size)]
429
+ else:
430
+ gather_list = None
431
+
432
+ with torch.no_grad():
433
+ if enforce_dtype and len(local_shards) > 0:
434
+ # enforce_dtype is deprecated. Do it for backward compatibility.
435
+ dtype = local_shards[0].tensor.dtype
436
+ data = torch.empty(max_rank_size, device=self._get_preferred_device(), dtype=dtype)
437
+
438
+ for shard in local_shards:
439
+ src = shard.tensor.flatten()
440
+ if src.nelement() == 0 :
441
+ warnings.warn("Gathering a tensor with zero elements on rank " + str(rank))
442
+ return
443
+ shard_offset = shard_placement[shard.metadata][1]
444
+ data[shard_offset: shard_offset + src.numel()].copy_(src)
445
+
446
+ dist.gather(
447
+ tensor=data,
448
+ gather_list=gather_list,
449
+ dst=dst,
450
+ group=self._process_group,
451
+ )
452
+ if rank != dst:
453
+ return
454
+ # In _validate_output_tensor_for_gather, we raise if out == None and rank == dst
455
+ out = cast(torch.Tensor, out)
456
+ assert gather_list is not None
457
+
458
+ full_size = self.metadata().size
459
+ dims = len(full_size)
460
+ for shard_md in self.metadata().shards_metadata:
461
+ rank, rank_offset = shard_placement[shard_md]
462
+ tensor = gather_list[rank]
463
+ tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)]
464
+ tensor = tensor.view(shard_md.shard_sizes)
465
+
466
+ out_narrow_view = out
467
+ for dim in range(dims):
468
+ out_narrow_view = out_narrow_view.narrow(
469
+ dim,
470
+ shard_md.shard_offsets[dim],
471
+ shard_md.shard_sizes[dim],
472
+ )
473
+
474
+ out_narrow_view.copy_(tensor)
475
+
476
+ def cpu(
477
+ self,
478
+ memory_format=torch.preserve_format,
479
+ process_group=None
480
+ ) -> ShardedTensor:
481
+ """
482
+ Returns a copy of this object in CPU memory.
483
+
484
+ If this ShardedTensor is already on CPU memory, then no copy is
485
+ performed and original object is returned.
486
+
487
+ .. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might
488
+ need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo),
489
+ it is the user's responsiblity to explicitly pass in a new process_group that
490
+ is compatible with CPU.
491
+ """
492
+ # TODO: make this a __torch_function__ op once ShardedTensor becomes a
493
+ # torch.Tensor subclass, see https://github.com/pytorch/pytorch/issues/75402
494
+ if memory_format != torch.preserve_format and \
495
+ memory_format != torch.contiguous_format:
496
+ raise RuntimeError("Only `torch.contiguous_format` or "
497
+ "`torch.preserve_format` is supported!")
498
+ all_on_cpu = True
499
+ for meta in self.metadata().shards_metadata:
500
+ all_on_cpu &= (meta.placement.device().type == "cpu") # type: ignore[union-attr]
501
+
502
+ # if every shard is already on CPU, return the original object
503
+ if all_on_cpu:
504
+ return self
505
+
506
+ # if not, returns a copy of this object on CPU
507
+ list_shards: List[Shard] = []
508
+ # move all local shards to cpu, and change metadata
509
+ for shard in self._local_shards:
510
+ cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg]
511
+ metadata = copy.deepcopy(shard.metadata)
512
+ metadata.placement._device = torch.device("cpu") # type: ignore[union-attr]
513
+ list_shards.append(
514
+ Shard(cpu_tensor, metadata)
515
+ )
516
+
517
+ st_meta = copy.deepcopy(self.metadata())
518
+ for meta in st_meta.shards_metadata:
519
+ if meta.placement.device().type != "cpu": # type: ignore[union-attr]
520
+ meta.placement._device = torch.device("cpu") # type: ignore[union-attr]
521
+
522
+ pg = self._process_group if process_group is None else process_group
523
+ st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata(
524
+ list_shards,
525
+ sharded_tensor_metadata=st_meta,
526
+ process_group=pg,
527
+ init_rrefs=self._init_rrefs
528
+ )
529
+ return st_cpu
530
+
531
+ def cuda(
532
+ self,
533
+ device=None,
534
+ non_blocking=False,
535
+ memory_format=torch.preserve_format,
536
+ process_group=None
537
+ ) -> ShardedTensor:
538
+ """
539
+ Returns a copy of this object in CUDA memory, if the original ShardedTensor
540
+ is on CPU, we will move the local shard to the current GPU device of each
541
+ process in a SPMD fashion.
542
+ If this ShardedTensor is already on CUDA memory and local shards on each rank are
543
+ already on current device, we still returns a new ShardedTensor object with new
544
+ metadata, but no underlying data movements are performed.
545
+ .. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might
546
+ need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL),
547
+ it is the user's responsiblity to explicitly pass in a new process_group that
548
+ is compatible with GPU.
549
+ """
550
+ if memory_format != torch.preserve_format and \
551
+ memory_format != torch.contiguous_format:
552
+ raise RuntimeError("Only `torch.contiguous_format` or "
553
+ "`torch.preserve_format` is supported!")
554
+
555
+ if device is not None:
556
+ device = torch.device(device) if isinstance(device, str) else device
557
+ assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), \
558
+ '''Only device without device id (e.g. "cpu" or "cuda") is expected for ShardedTensor!'''
559
+
560
+ current_device = torch.device(torch.cuda.current_device())
561
+ # returns a copy of ShardedTensor on CUDA current device
562
+ list_shards: List[Shard] = []
563
+ # move all local shards to current device, and change metadata
564
+ # if local shards already on the current device, there's no
565
+ # real data movement, only the metadata are copied.
566
+ for shard in self._local_shards:
567
+ cuda_tensor = shard.tensor.cuda(
568
+ device=current_device,
569
+ non_blocking=non_blocking,
570
+ memory_format=memory_format
571
+ ) # type: ignore[call-arg]
572
+ metadata = copy.deepcopy(shard.metadata)
573
+ metadata.placement._device = current_device # type: ignore[union-attr]
574
+
575
+ list_shards.append(
576
+ Shard(cuda_tensor, metadata)
577
+ )
578
+
579
+ st_meta = copy.deepcopy(self.metadata())
580
+ for meta in st_meta.shards_metadata:
581
+ if meta.placement.device().type != "cuda": # type: ignore[union-attr]
582
+ meta.placement._device = current_device # type: ignore[union-attr]
583
+
584
+ pg = self._process_group if process_group is None else process_group
585
+ # we need to use `init_from_local_shards` to communicate between ranks
586
+ # and update the sharding spec/shards metadata.
587
+ st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata(
588
+ list_shards,
589
+ sharded_tensor_metadata=st_meta,
590
+ process_group=pg,
591
+ init_rrefs=self._init_rrefs
592
+ )
593
+ return st_cuda
594
+
595
+ def to(self, *args, **kwargs) -> ShardedTensor:
596
+ current_device: torch.device
597
+ if self._local_shards:
598
+ current_device = self._local_shards[0].tensor.device
599
+ elif self._process_group._get_backend_name() == "gloo":
600
+ current_device = torch.device("cpu")
601
+ else:
602
+ current_device = torch.device(torch.cuda.current_device())
603
+ current_dtype = self.dtype
604
+ device_to = current_device
605
+ dtype_to = current_dtype
606
+ if len(args) == 1:
607
+ if isinstance(args[0], torch.dtype):
608
+ dtype_to = args[0]
609
+ elif isinstance(args[0], torch.device):
610
+ device_to = args[0]
611
+ elif isinstance(args[0], (str, int)):
612
+ device_to = torch.device(args[0])
613
+ elif isinstance(args[0], torch.Tensor):
614
+ dtype_to = args[0].dtype
615
+ device_to = args[0].device
616
+ else:
617
+ raise RuntimeError(f"ShardedTensor.to() have wrong arguments: {args}")
618
+ elif len(args) == 2:
619
+ device_to, dtype_to = args
620
+ else:
621
+ dtype_to = kwargs.get("dtype", current_dtype)
622
+ device_to = kwargs.get("device", current_device)
623
+
624
+ device_to = torch.device(device_to) if isinstance(device_to, (str, int)) else device_to
625
+
626
+ if device_to.type == "cuda":
627
+ # if device_to set to cuda, set to current device even
628
+ # if user specify the device index.
629
+ current_idx = torch.cuda.current_device()
630
+ if device_to.index != current_idx:
631
+ warnings.warn("ShardedTensor.to only move tensor to its current device"
632
+ "If you want to put to different device, use `reshard` instead.")
633
+ device_to = torch.device(current_idx)
634
+
635
+ copy_tensor = kwargs.get("copy", False)
636
+ non_blocking = kwargs.get("non_blocking", False)
637
+ memory_format = kwargs.get("memory_format", torch.preserve_format)
638
+ process_group = kwargs.get("process_group", None)
639
+
640
+ if not copy_tensor and dtype_to == current_dtype and device_to == current_device:
641
+ # already have correct dtype and device, return itself
642
+ return self
643
+
644
+ # returns a copy of ShardedTensor on CUDA current device
645
+ list_shards: List[Shard] = []
646
+
647
+ for shard in self._local_shards:
648
+ new_tensor = shard.tensor.to( # type: ignore[call-overload]
649
+ device=device_to,
650
+ dtype=dtype_to,
651
+ non_blocking=non_blocking,
652
+ copy=copy_tensor,
653
+ memory_format=memory_format
654
+ )
655
+ metadata = copy.deepcopy(shard.metadata)
656
+ if metadata.placement is not None:
657
+ metadata.placement._device = device_to
658
+ list_shards.append(Shard(new_tensor, metadata))
659
+
660
+ # update metadata
661
+ st_meta = copy.deepcopy(self.metadata())
662
+ st_meta.tensor_properties.dtype = dtype_to
663
+ for meta in st_meta.shards_metadata:
664
+ meta.placement._device = device_to # type: ignore[union-attr]
665
+
666
+ pg = self._process_group if process_group is None else process_group
667
+ # we need to use `init_from_local_shards` to communicate between ranks
668
+ # and update the sharding spec/shards metadata.
669
+ st_to = ShardedTensor._init_from_local_shards_and_global_metadata(
670
+ list_shards,
671
+ sharded_tensor_metadata=st_meta,
672
+ process_group=pg,
673
+ init_rrefs=self._init_rrefs
674
+ )
675
+ return st_to
676
+
677
+
678
+ @classmethod
679
+ def _init_from_local_shards(
680
+ cls,
681
+ local_shards: List[Shard],
682
+ *global_size,
683
+ process_group=None,
684
+ init_rrefs=False,
685
+ ):
686
+ # STEP 1: Validate the Shardmetadatas locally
687
+ process_group = (
688
+ process_group
689
+ if process_group is not None
690
+ else distributed_c10d._get_default_group()
691
+ )
692
+ current_rank = dist.get_rank(process_group)
693
+ world_size = dist.get_world_size(process_group)
694
+
695
+ local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
696
+ global_tensor_size = _flatten_tensor_size(global_size)
697
+
698
+ if len(local_shards) > 0:
699
+ local_sharded_tensor_metadata = \
700
+ build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
701
+
702
+ # STEP 2. Validate metadata across ranks, and build a global sharded tensor
703
+ # metadata by gathering local ShardedTensorMetadata
704
+ gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
705
+ if world_size > 1:
706
+ gathered_metadatas = [None for _ in range(world_size)]
707
+
708
+ dist.all_gather_object(
709
+ gathered_metadatas,
710
+ local_sharded_tensor_metadata,
711
+ group=process_group
712
+ )
713
+ else:
714
+ gathered_metadatas = [local_sharded_tensor_metadata]
715
+
716
+ global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
717
+ tensor_properties = global_sharded_tensor_metadata.tensor_properties
718
+
719
+ # STEP 3: Validation done, create the actual ShardedTensor and populate fields
720
+ # prepare initialization
721
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(
722
+ global_sharded_tensor_metadata.shards_metadata
723
+ )
724
+ sharded_tensor = cls.__new__(cls,
725
+ spec,
726
+ global_sharded_tensor_metadata.size,
727
+ dtype=tensor_properties.dtype,
728
+ layout=tensor_properties.layout,
729
+ pin_memory=tensor_properties.pin_memory,
730
+ requires_grad=tensor_properties.requires_grad)
731
+ sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
732
+
733
+ # attach local_shards to the ShardedTensor created
734
+ sharded_tensor._local_shards = local_shards
735
+
736
+ # run post initialization, i.e. map registration, rpc initialization
737
+ sharded_tensor._post_init()
738
+ return sharded_tensor
739
+
740
+ @classmethod
741
+ def _init_from_local_tensor(
742
+ cls,
743
+ local_tensor: torch.Tensor,
744
+ sharding_spec: shard_spec.ShardingSpec,
745
+ *global_size: Sequence[int],
746
+ process_group: Optional[dist.ProcessGroup] = None,
747
+ init_rrefs=False,
748
+ ) -> ShardedTensor:
749
+ """
750
+ Initialize a ShardedTensor given only one local tensor, global sharded tensor
751
+ size and sharding spec on each rank.
752
+
753
+ Args:
754
+ local_tensor (Tensor): Single tensor of local shard stored in each rank.
755
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
756
+ The specification describing how to shard the Tensor.
757
+ global_size (Sequence[int]): Size of the sharded tensor.
758
+ process_group (ProcessGroup, optional): The process group to aggregate on.
759
+ Default: None
760
+ init_rrefs (bool, optional): Whether or not to initialize
761
+ :class:`torch.distributed.rpc.RRef`s pointing to remote shards.
762
+ Need to initialize the RPC Framework if specified as ``True``.
763
+ Default: ``False``.
764
+
765
+ Returns:
766
+ A :class:`ShardedTensor` sharded based on the given sharding_spec with local
767
+ tensor stored in the current rank.
768
+
769
+ Examples:
770
+ >>> # xdoctest: +SKIP
771
+ >>> # All tensors below are of torch.int64 type.
772
+ >>> # We have 2 process groups, 2 ranks.
773
+ >>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
774
+ >>> local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2]))
775
+ >>> local_tensor
776
+ tensor([[1, 2, 3, 4]]) # Rank 0
777
+ tensor([[3, 4, 5, 6]]) # Rank 1
778
+ >>> sharding_dim = 0
779
+ >>> sharding_spec = ChunkShardingSpec(
780
+ dim=sharding_dim,
781
+ placements=[
782
+ "rank:0/cuda:0",
783
+ "rank:1/cuda:1",
784
+ ],
785
+ )
786
+ >>> st = ShardedTensor._init_from_local_tensor(local_tensor, sharding_spec, [2, 4])
787
+ >>> st
788
+ ShardedTensor(
789
+ ShardedTensorMetadata(
790
+ shards_metadata=[
791
+ ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1, 4], placement=rank:0/cuda:0),
792
+ ShardMetadata(shard_offsets=[1, 0], shard_sizes=[1, 4], placement=rank:1/cuda:1),
793
+ ],
794
+ size=torch.Size([2, 4])
795
+ )
796
+ >>> st.local_tensor()
797
+ tensor([1, 2, 3, 4]) # Rank 0
798
+ tensor([3, 4, 5, 6]) # Rank 1
799
+
800
+ Warning: This API is experimental and subject to change. It lacks of a fully across
801
+ rank validations, and we only validate the local shard on the current rank.
802
+ We fully rely on the user to ensure local tensor is sharded based on the
803
+ sharding spec.
804
+ """
805
+ warnings.warn(DEPRECATE_MSG)
806
+
807
+ if not local_tensor.is_contiguous():
808
+ raise ValueError('local_tensor is not a contiguous Tensor.')
809
+
810
+ global_tensor_size = _flatten_tensor_size(global_size)
811
+ tensor_properties = TensorProperties(
812
+ dtype=local_tensor.dtype,
813
+ layout=local_tensor.layout,
814
+ requires_grad=local_tensor.requires_grad,
815
+ memory_format=torch.contiguous_format,
816
+ pin_memory=local_tensor.is_pinned())
817
+ sharded_tensor_metadata = sharding_spec.build_metadata(
818
+ global_tensor_size,
819
+ tensor_properties
820
+ )
821
+
822
+ process_group = (
823
+ process_group
824
+ if process_group is not None
825
+ else distributed_c10d._get_default_group()
826
+ )
827
+ current_rank = dist.get_rank(process_group)
828
+
829
+ local_shards: List[Shard] = []
830
+ for shard_metadata in sharded_tensor_metadata.shards_metadata:
831
+ rank, device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
832
+ if rank == current_rank:
833
+ local_shards.append(Shard(local_tensor, shard_metadata))
834
+
835
+ # TODO: figure out what the API should behave when some rank have no shard
836
+ # see https://github.com/pytorch/pytorch/issues/7313
837
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
838
+ local_shards,
839
+ sharded_tensor_metadata,
840
+ process_group=process_group,
841
+ init_rrefs=init_rrefs,
842
+ sharding_spec=sharding_spec,
843
+ )
844
+
845
+ @classmethod
846
+ def _init_from_local_shards_and_global_metadata( # type: ignore[override]
847
+ cls,
848
+ local_shards: List[Shard],
849
+ sharded_tensor_metadata: ShardedTensorMetadata,
850
+ process_group=None,
851
+ init_rrefs=False,
852
+ sharding_spec=None,
853
+ ) -> ShardedTensor:
854
+ """
855
+ Initialize a ShardedTensor with local shards and a global
856
+ ShardedTensorMetadata built on each rank.
857
+
858
+ Warning: This API is experimental and subject to change. It does
859
+ not do cross rank validations, and fully rely on the user
860
+ for the correctness of sharded_tensor_metadata on each rank
861
+ """
862
+ process_group = (
863
+ process_group
864
+ if process_group is not None
865
+ else distributed_c10d._get_default_group()
866
+ )
867
+ current_rank = dist.get_rank(process_group)
868
+
869
+ shards_metadata = sharded_tensor_metadata.shards_metadata
870
+
871
+ local_shard_metadatas = []
872
+
873
+ # collect local shard metadatas from the global sharded_tensor_metadata
874
+ for shard_metadata in shards_metadata: # type: ignore[attr-defined]
875
+ rank, local_device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
876
+
877
+ if current_rank == rank:
878
+ local_shard_metadatas.append(shard_metadata)
879
+
880
+ if len(local_shards) != len(local_shard_metadatas):
881
+ raise RuntimeError(
882
+ f'Number of local shards ({len(local_shards)}) does not match number of local '
883
+ f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
884
+ f'on rank ({current_rank}) '
885
+ )
886
+
887
+ shards_metadata = sharded_tensor_metadata.shards_metadata
888
+ tensor_properties = sharded_tensor_metadata.tensor_properties
889
+
890
+ if len(shards_metadata) == 0:
891
+ raise ValueError("shards_metadata must not be empty!")
892
+
893
+ if tensor_properties.layout != torch.strided:
894
+ raise ValueError("Only torch.strided layout is currently supported")
895
+
896
+ if sharding_spec is None:
897
+ spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)
898
+ else:
899
+ spec = sharding_spec
900
+
901
+ sharded_tensor = ShardedTensor.__new__(
902
+ ShardedTensor,
903
+ spec,
904
+ sharded_tensor_metadata.size,
905
+ dtype=tensor_properties.dtype,
906
+ layout=tensor_properties.layout,
907
+ pin_memory=tensor_properties.pin_memory,
908
+ requires_grad=tensor_properties.requires_grad,
909
+ )
910
+
911
+ def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
912
+ tensor_property_or_metadata = (
913
+ "tensor property" if is_property else "local ShardMetadata"
914
+ )
915
+ if expected != actual:
916
+ raise ValueError(
917
+ f"Local shards' tensor {prop_name} property is incompatible with "
918
+ f"{tensor_property_or_metadata} on rank {rank}: "
919
+ f"{tensor_property_or_metadata} {prop_name}={expected}, "
920
+ f"local shard tensor {prop_name}={actual}."
921
+ )
922
+
923
+ for shard in local_shards:
924
+ shard_meta = shard.metadata
925
+ local_shard_tensor = shard.tensor
926
+ placement = shard_meta.placement
927
+ assert placement is not None, "Must specify placement for `Shard`!"
928
+ rank = placement.rank()
929
+ local_device = placement.device()
930
+
931
+ _raise_if_mismatch(
932
+ tensor_properties.layout,
933
+ local_shard_tensor.layout,
934
+ "layout",
935
+ rank,
936
+ True,
937
+ )
938
+ if not local_shard_tensor.is_contiguous():
939
+ raise ValueError(
940
+ "Only torch.contiguous_format memory_format is currently supported"
941
+ )
942
+
943
+ _raise_if_mismatch(
944
+ shard_meta.shard_sizes,
945
+ list(local_shard_tensor.size()),
946
+ "size",
947
+ rank,
948
+ )
949
+ _raise_if_mismatch(
950
+ tensor_properties.pin_memory,
951
+ local_shard_tensor.is_pinned(),
952
+ "pin_memory",
953
+ rank,
954
+ True,
955
+ )
956
+ _raise_if_mismatch(local_device, local_shard_tensor.device, "device", rank)
957
+ _raise_if_mismatch(
958
+ tensor_properties.dtype,
959
+ local_shard_tensor.dtype,
960
+ "dtype",
961
+ rank,
962
+ True,
963
+ )
964
+ _raise_if_mismatch(
965
+ tensor_properties.requires_grad,
966
+ local_shard_tensor.requires_grad,
967
+ "requires_grad",
968
+ rank,
969
+ True,
970
+ )
971
+
972
+ # check if shards_metadata have overlap shards
973
+ validate_non_overlapping_shards_metadata(shards_metadata)
974
+
975
+ # check if the shards_metadata is compatible with overall size of the sharded tensor.
976
+ check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
977
+
978
+ # done validation, add local_shards
979
+ sharded_tensor._local_shards = local_shards
980
+ sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
981
+
982
+ # run post initialization, i.e. map registration, rpc initialization
983
+ sharded_tensor._post_init()
984
+ return sharded_tensor
985
+
986
+ def sharding_spec(self) -> shard_spec.ShardingSpec:
987
+ """
988
+ Returns the ShardingSpec for the tensor.
989
+ """
990
+ return self._sharding_spec
991
+
992
+ def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor:
993
+ """
994
+ Reshard a sharded tensor given the ``resharding_spec``. For now, we only support
995
+ single local shard.
996
+
997
+ If ``resharding_spec`` is same as the original one, this becomes a no-op.
998
+ If only ``resharding_spec`` shares the same sharding dim with the original one,
999
+ we swap local shards directly.
1000
+ For more generic cases, we merge different shards across different ranks and split
1001
+ the local shards based on the ``resharding_spec`` via `all_to_all` collective API.
1002
+
1003
+ Args:
1004
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
1005
+ specification describing how the tensor is sharded.
1006
+
1007
+ Returns:
1008
+ A :class:`ShardedTensor` object whose local shards are resharded.
1009
+
1010
+ Examples:
1011
+ >>> # xdoctest: +SKIP
1012
+ >>> # We have 2 process groups, 2 ranks.
1013
+ >>> tensor = torch.arange(4, dtype=torch.int64) + 1 + 2 * rank
1014
+ >>> tensor = torch.stack([tensor, tensor])
1015
+ >>> tensor
1016
+ tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) # Rank 0
1017
+ tensor([[3, 4, 5, 6], [3, 4, 5, 6]]) # Rank 1
1018
+ tensor([[5, 6, 7, 8], [5, 6, 7, 8]]) # Rank 2
1019
+ tensor([[7, 8, 9, 10], [7, 8, 9, 10]]) # Rank 3
1020
+ >>> sharding_dim = 0
1021
+ >>> spec = ChunkShardingSpec(
1022
+ dim=sharding_dim,
1023
+ placements=[
1024
+ "rank:0/cuda:0",
1025
+ "rank:1/cuda:1",
1026
+ "rank:2/cuda:2",
1027
+ "rank:3/cuda:3",
1028
+ ],
1029
+ )
1030
+ >>> current_offsets = [0] * 2
1031
+ >>> current_offsets[0] = rank * 2
1032
+ >>> shard_metadata = ShardMetadata(
1033
+ shard_offsets=copy.deepcopy(current_offsets),
1034
+ shard_sizes=tensor.size(),
1035
+ placement=spec.placements[rank],
1036
+ )
1037
+ >>> local_shards = [
1038
+ Shard(
1039
+ tensor=tensor,
1040
+ metadata=shard_metadata,
1041
+ )
1042
+ ]
1043
+ >>> st = ShardedTensor._init_from_local_shards(local_shards, tensor.size())
1044
+ >>> sharding_dim = 1
1045
+ >>> resharding_spec = ChunkShardingSpec(
1046
+ dim=sharding_dim,
1047
+ placements=[
1048
+ "rank:0/cuda:0",
1049
+ "rank:1/cuda:1",
1050
+ "rank:2/cuda:2",
1051
+ "rank:3/cuda:3",
1052
+ ],
1053
+ )
1054
+ >>> st.reshard(resharding_spec)
1055
+ >>> tensor = st.local_shards()[0].tensor
1056
+ >>> tensor
1057
+ tensor([[1], [1], [3], [3], [5], [5], [7], [7]]) # Rank 0
1058
+ tensor([[2], [2], [4], [4], [6], [6], [8], [8]]) # Rank 1
1059
+ tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2
1060
+ tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3
1061
+ """
1062
+ warnings.warn(DEPRECATE_MSG)
1063
+
1064
+ if (
1065
+ not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or
1066
+ not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec)
1067
+ ):
1068
+ raise NotImplementedError("Only ChunkShardingSpec supported for reshard.")
1069
+ if (len(self.local_shards()) != 1):
1070
+ raise NotImplementedError("Only single local shard supported for reshard.")
1071
+
1072
+ if self._sharding_spec.dim == resharding_spec.dim: # type: ignore[attr-defined]
1073
+ if self._sharding_spec.placements == resharding_spec.placements: # type: ignore[attr-defined]
1074
+ return self
1075
+ else:
1076
+ local_shards, shards_metadata = reshuffle_local_shard(
1077
+ self.local_tensor(),
1078
+ self.size(), # type: ignore[arg-type]
1079
+ self._sharding_spec,
1080
+ resharding_spec,
1081
+ self._process_group,
1082
+ )
1083
+ else:
1084
+ local_shards, shards_metadata = reshard_local_shard(
1085
+ self.local_tensor(),
1086
+ self.size(), # type: ignore[arg-type]
1087
+ self._sharding_spec,
1088
+ resharding_spec,
1089
+ self._process_group,
1090
+ )
1091
+ self._local_shards = local_shards
1092
+ self._metadata.shards_metadata = shards_metadata
1093
+ self._sharding_spec = resharding_spec
1094
+ return self
1095
+
1096
+ def local_tensor(self) -> torch.Tensor:
1097
+ """
1098
+ Return local tensor for a sharded_tensor. For now we only support single local shard.
1099
+
1100
+ Returns:
1101
+ A :class:`torch.Tensor` of the local shard.
1102
+ """
1103
+ if len(self.local_shards()) != 1:
1104
+ raise NotImplementedError("Only single local shard is supported.")
1105
+ return self.local_shards()[0].tensor
1106
+
1107
+ @classmethod
1108
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
1109
+ def dispatch(st: ShardedTensor, func: Callable):
1110
+ # Dispatch to custom user provided op first if it exists.
1111
+ if func in _CUSTOM_SHARDED_OPS:
1112
+ return _CUSTOM_SHARDED_OPS[func](types, args, kwargs, st._process_group)
1113
+
1114
+ # Dispatch to custom sharding spec op if it has one.
1115
+ if _has_custom_op(st._sharding_spec, func):
1116
+ return _dispatch_custom_op(
1117
+ st._sharding_spec,
1118
+ func,
1119
+ types,
1120
+ args,
1121
+ kwargs,
1122
+ st._process_group
1123
+ )
1124
+
1125
+ if func in _SHARDED_OPS:
1126
+ return _SHARDED_OPS[func](types, args, kwargs, st._process_group)
1127
+
1128
+ raise RuntimeError(
1129
+ f"torch function '{func.__name__}', with args: {args} and "
1130
+ f"kwargs: {kwargs} not supported for ShardedTensor!")
1131
+
1132
+ warnings.warn(DEPRECATE_MSG)
1133
+ # Find ShardedTensor instance to get process_group and sharding_spec.
1134
+ st_instance = None
1135
+
1136
+ def find_sharded_tensor(e):
1137
+ nonlocal st_instance
1138
+ if st_instance is None and isinstance(e, ShardedTensor):
1139
+ st_instance = e
1140
+
1141
+ pytree.tree_map_(find_sharded_tensor, args)
1142
+ pytree.tree_map_(find_sharded_tensor, kwargs)
1143
+
1144
+ if st_instance is not None:
1145
+ return dispatch(st_instance, func)
1146
+
1147
+ raise RuntimeError(
1148
+ f"torch function '{func.__name__}', with args: {args} and "
1149
+ f"kwargs: {kwargs} not supported for ShardedTensor!")
1150
+
1151
+ def is_pinned(self) -> bool: # type: ignore[override]
1152
+ """
1153
+ Returns True if the sharded tensor (each local shard) resides in pinned memory.
1154
+ """
1155
+ return self._metadata.tensor_properties.pin_memory
1156
+
1157
+ def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
1158
+ self._remote_shards[rpc_rank] = remote_shards
1159
+
1160
+ def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
1161
+ """
1162
+ Returns a Dict[int, RRef] with keys being the RPC rank and values
1163
+ being RRefs to shards on that rank. Need to initialize the
1164
+ RPC framework for this functionality.
1165
+
1166
+ Raises an exception if ShardedTensor was created with ``init_rrefs=False``
1167
+ """
1168
+ if not self._init_rrefs:
1169
+ raise RuntimeError(
1170
+ 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
1171
+ )
1172
+ return self._remote_shards
1173
+
1174
+ def __hash__(self):
1175
+ return id(self)
1176
+
1177
+ def __repr__(self):
1178
+ return f'ShardedTensor({self._metadata})'
1179
+
1180
+ @dataclass
1181
+ class ProcessGroupState:
1182
+ """
1183
+ State for ser-de of process group
1184
+ """
1185
+ local_rank: int
1186
+ global_rank: int
1187
+ local_world_size: int
1188
+ global_world_size: int
1189
+
1190
+ def __getstate__(self):
1191
+ pg_state = ShardedTensor.ProcessGroupState(
1192
+ distributed_c10d.get_rank(self._process_group),
1193
+ distributed_c10d.get_rank(),
1194
+ distributed_c10d.get_world_size(self._process_group),
1195
+ distributed_c10d.get_world_size(),
1196
+ )
1197
+
1198
+ return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
1199
+
1200
+ def __setstate__(self, state):
1201
+ self._sharded_tensor_id = None
1202
+ if not distributed_c10d.is_initialized():
1203
+ raise RuntimeError(
1204
+ 'Need to initialize default process group using '
1205
+ '"init_process_group" before loading ShardedTensor')
1206
+
1207
+ self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
1208
+
1209
+ # Setup process group
1210
+ from torch.distributed._shard.api import _get_current_process_group
1211
+ self._process_group = _get_current_process_group()
1212
+
1213
+ # Validate process group.
1214
+ local_rank = distributed_c10d.get_rank(self._process_group)
1215
+ if pg_state.local_rank != local_rank:
1216
+ raise RuntimeError(
1217
+ f'Local rank at save time was {pg_state.local_rank}, but at '
1218
+ f'load time was {local_rank}')
1219
+
1220
+ global_rank = distributed_c10d.get_rank()
1221
+ if pg_state.global_rank != global_rank:
1222
+ raise RuntimeError(
1223
+ f'Global rank at save time was {pg_state.global_rank}, but at '
1224
+ f'load time was {global_rank}')
1225
+
1226
+ local_world_size = distributed_c10d.get_world_size(self._process_group)
1227
+ if pg_state.local_world_size != local_world_size:
1228
+ raise RuntimeError(
1229
+ f'Local world size at save time was {pg_state.local_world_size}, '
1230
+ f'but at load time was {local_world_size}')
1231
+
1232
+ global_world_size = distributed_c10d.get_world_size()
1233
+ if pg_state.global_world_size != global_world_size:
1234
+ raise RuntimeError(
1235
+ f'Global world size at save time was {pg_state.global_world_size}, '
1236
+ f'but at load time was {global_world_size}')
1237
+
1238
+ self._post_init()
1239
+
1240
+
1241
+ def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties):
1242
+ """ Helper to construct tensor from size, device and common params. """
1243
+ dtype = tensor_properties.dtype
1244
+ layout = tensor_properties.layout
1245
+ requires_grad = tensor_properties.requires_grad
1246
+ memory_format = tensor_properties.memory_format
1247
+ pin_memory = tensor_properties.pin_memory
1248
+
1249
+ return torch.empty(
1250
+ *size, dtype=dtype, layout=layout,
1251
+ device=local_device, requires_grad=requires_grad,
1252
+ memory_format=memory_format, pin_memory=pin_memory
1253
+ )
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ from typing import List, Tuple
11
+
12
+ from torch.distributed._shard.sharded_tensor.logging_handlers import (
13
+ _log_handlers,
14
+ )
15
+
16
+ __all__: List[str] = []
17
+
18
+
19
+ def _get_or_create_logger() -> logging.Logger:
20
+ logging_handler, log_handler_name = _get_logging_handler()
21
+ logger = logging.getLogger(f"sharding-spec-{log_handler_name}")
22
+ logger.setLevel(logging.DEBUG)
23
+ formatter = logging.Formatter(
24
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
25
+ )
26
+ logging_handler.setFormatter(formatter)
27
+ logger.propagate = False
28
+ logger.addHandler(logging_handler)
29
+ return logger
30
+
31
+
32
+ def _get_logging_handler(
33
+ destination: str = "default",
34
+ ) -> Tuple[logging.Handler, str]:
35
+ log_handler = _log_handlers[destination]
36
+ log_handler_name = type(log_handler).__name__
37
+ return (log_handler, log_handler_name)
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ from typing import Dict, List
11
+
12
+ __all__: List[str] = []
13
+
14
+ _log_handlers: Dict[str, logging.Handler] = {
15
+ "default": logging.NullHandler(),
16
+ }
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from enum import Enum
3
+ from typing import List
4
+
5
+ import torch
6
+ from torch.distributed._shard.metadata import ShardMetadata
7
+
8
+ class MEM_FORMAT_ENCODING(Enum):
9
+ TORCH_CONTIGUOUS_FORMAT = 0
10
+ TORCH_CHANNELS_LAST = 1
11
+ TORCH_PRESERVE_FORMAT = 2
12
+
13
+ @dataclass
14
+ class TensorProperties:
15
+ """ Properties used to create :class:`Tensor` """
16
+
17
+ # Regular tensor fields
18
+ dtype: torch.dtype = field(default=torch.get_default_dtype())
19
+ layout: torch.layout = field(default=torch.strided)
20
+ requires_grad: bool = False
21
+ memory_format: torch.memory_format = field(default=torch.contiguous_format)
22
+ pin_memory: bool = False
23
+
24
+ def __getstate__(self):
25
+ # Since torch.memory_format cannot be pickled!
26
+ memory_format = self.memory_format
27
+ if memory_format == torch.contiguous_format:
28
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT
29
+ elif memory_format == torch.channels_last:
30
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST
31
+ elif memory_format == torch.preserve_format:
32
+ mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT
33
+ else:
34
+ raise RuntimeError(f'Invalid torch.memory_format: {memory_format}')
35
+
36
+ return (
37
+ self.dtype,
38
+ self.layout,
39
+ self.requires_grad,
40
+ mem_format_encoding,
41
+ self.pin_memory,
42
+ )
43
+
44
+ def __setstate__(
45
+ self,
46
+ state,
47
+ ):
48
+ (self.dtype, self.layout, self.requires_grad, mem_format_encoding, self.pin_memory) = state
49
+
50
+ if mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT:
51
+ memory_format = torch.contiguous_format
52
+ elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST:
53
+ memory_format = torch.channels_last
54
+ elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT:
55
+ memory_format = torch.preserve_format
56
+ else:
57
+ raise RuntimeError(f'Invalid torch.memory_format encoding: {mem_format_encoding}')
58
+
59
+ self.memory_format = memory_format
60
+
61
+ @staticmethod
62
+ def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties":
63
+ return TensorProperties(
64
+ dtype=tensor.dtype,
65
+ layout=tensor.layout,
66
+ requires_grad=tensor.requires_grad,
67
+ memory_format=torch.contiguous_format,
68
+ pin_memory=tensor.is_pinned()
69
+ )
70
+ @dataclass
71
+ class ShardedTensorMetadata:
72
+ """
73
+ Represents metadata for :class:`ShardedTensor`
74
+ """
75
+
76
+ # Metadata about each shard of the Tensor
77
+ shards_metadata: List[ShardMetadata] = field(default_factory=list)
78
+
79
+ # Size of each dim of the overall Tensor.
80
+ size: torch.Size = field(default=torch.Size([]))
81
+
82
+ tensor_properties: TensorProperties = field(default_factory=TensorProperties)
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import List, Tuple
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch._C._distributed_c10d import (
7
+ ProcessGroup,
8
+ )
9
+ import torch.distributed._shard.sharding_spec as shard_spec
10
+ from torch.distributed._shard.sharding_spec._internals import (
11
+ get_split_size,
12
+ get_chunked_dim_size,
13
+ )
14
+ from torch.distributed.nn.functional import (
15
+ all_to_all,
16
+ all_to_all_single,
17
+ )
18
+ from torch.distributed._shard.metadata import ShardMetadata
19
+
20
+ from .shard import Shard
21
+
22
+
23
+ def get_idx_from_placements(placements, current_rank) -> int:
24
+ """
25
+ Return the position of the current rank in the given placements.
26
+
27
+ Args:
28
+ placements(List[Union[_remote_device, str]]):
29
+ Specifies the placement of each shard of the Tensor. The size of
30
+ the list represents the number of shards to be created. This could
31
+ be a list of
32
+ :class:`torch.distributed._remote_device`'s. This list
33
+ could also contain a string which represents remote
34
+ device as accepted by
35
+ :class:`torch.distributed._remote_device`
36
+ current_rank (int): number of current device.
37
+
38
+ Returns:
39
+ A int which contains the position of current device in the placement list.
40
+ """
41
+ for idx, placement in enumerate(placements): # type: ignore[attr-defined]
42
+ if current_rank == placement.rank(): # type: ignore[union-attr]
43
+ return idx
44
+ raise RuntimeError('current_rank not in the placement.')
45
+
46
+
47
+ def build_reshard_metadata(
48
+ st_size: torch.Size,
49
+ sharding_spec: shard_spec.ShardingSpec,
50
+ world_size: int,
51
+ ) -> Tuple[List[ShardMetadata], List[int]]:
52
+ """
53
+ Based the given sharding spec, we calculate the offset and local shard size.
54
+ We then build a ShardMetadata on top of the calculation result.
55
+
56
+ Args:
57
+ st_size (torch.Size): The size of the sharded tensor.
58
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
59
+ specification describing how the tensor is sharded.
60
+ world_size (int): number of ranks.
61
+
62
+ Returns:
63
+ A Tuple of the followings:
64
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
65
+ offsets, lengths and device placement.
66
+ A List[int] which contains the ranks in the order of placement.
67
+ """
68
+ shard_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
69
+ shards_metadata = [None] * world_size
70
+ ranks = []
71
+ offsets = [0] * len(st_size)
72
+ split_size = get_split_size(st_size[shard_dim], world_size)
73
+ for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
74
+ ranks.append(placement.rank())
75
+ sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx)
76
+ local_tensor_size = list(st_size)
77
+ local_tensor_size[shard_dim] = sharded_dim_size
78
+ shards_metadata[placement.rank()] = ShardMetadata( # type: ignore[call-overload]
79
+ shard_offsets=copy.deepcopy(offsets),
80
+ shard_sizes=local_tensor_size,
81
+ placement=placement,
82
+ )
83
+ offsets[shard_dim] += sharded_dim_size
84
+ return shards_metadata, ranks # type: ignore[return-value]
85
+
86
+
87
+ def reshuffle_local_shard(
88
+ local_shard: torch.Tensor,
89
+ st_size: torch.Size,
90
+ sharding_spec: shard_spec.ShardingSpec,
91
+ resharding_spec: shard_spec.ShardingSpec,
92
+ pg: ProcessGroup,
93
+ ) -> Tuple[List[Shard], List[ShardMetadata]]:
94
+ """
95
+ Reshuffle the local shard directly when the reshard dim is same as the original
96
+ sharding dim. Logically we do this in two step:
97
+ 1. To collect all shards based on original sharding spec.
98
+ 2. Reshard the tensor based on the given resharding spec.
99
+
100
+ In reality, we consolidate the two steps into one by sending the local tensor to
101
+ the new shard directly based on the resharding spec.
102
+
103
+ Args:
104
+ local_shard (Tensor): Local tensor stored in the current rank.
105
+ st_size (torch.Size): The size of the sharded tensor.
106
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
107
+ specification describing how the tensor is sharded originally.
108
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
109
+ specification describing how the tensor will be resharded.
110
+ pg (ProcessGroup): The process group to aggregate on.
111
+
112
+ Returns:
113
+ A Tuple of the followings:
114
+ A List[`Shard`] which contains the local tensor and its metadata.
115
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
116
+ offsets, lengths and device placement.
117
+ """
118
+ current_rank = dist.get_rank(pg)
119
+ world_size = dist.get_world_size(pg)
120
+ # Build shards_metadata first.
121
+ shards_metadata, ranks = build_reshard_metadata(
122
+ st_size, resharding_spec, world_size
123
+ )
124
+ # Get input split size for all2all.
125
+ reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
126
+ split_size = get_split_size(st_size[reshard_dim], world_size)
127
+ input_split_sizes = [0] * world_size
128
+ idx = get_idx_from_placements(sharding_spec.placements, current_rank) # type: ignore[attr-defined]
129
+ new_rank = resharding_spec.placements[idx].rank() # type: ignore[union-attr, attr-defined]
130
+ input_split_sizes[new_rank] = local_shard.size(reshard_dim)
131
+ # Get output split size for all2all.
132
+ output_split_sizes = [0] * world_size
133
+ new_idx = ranks.index(current_rank)
134
+ sharded_dim_size = get_chunked_dim_size(st_size[reshard_dim], split_size, new_idx)
135
+ output_split_sizes[new_rank] = sharded_dim_size
136
+ # Get gathered_input for all2all.
137
+ local_shard = local_shard.transpose(0, reshard_dim).contiguous()
138
+ gathered_input_size = list(local_shard.size())
139
+ gathered_input_size[0] = sharded_dim_size
140
+ gathered_input = torch.empty(gathered_input_size, device=local_shard.device, dtype=local_shard.dtype)
141
+ # all2all.
142
+ local_shard = all_to_all_single(
143
+ gathered_input,
144
+ local_shard,
145
+ input_split_sizes=input_split_sizes,
146
+ output_split_sizes=output_split_sizes,
147
+ group=pg,
148
+ )
149
+ local_tensor = local_shard.transpose(0, reshard_dim).contiguous()
150
+ local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
151
+ return local_shards, shards_metadata
152
+
153
+
154
+ def reshard_local_shard(
155
+ local_tensor: torch.Tensor,
156
+ st_size: torch.Size,
157
+ sharding_spec: shard_spec.ShardingSpec,
158
+ resharding_spec: shard_spec.ShardingSpec,
159
+ pg: ProcessGroup,
160
+ ) -> Tuple[List[Shard], List[ShardMetadata]]:
161
+ """
162
+ Reshard a sharded tensor given the ``resharding_spec``. When the reshard dim is
163
+ different from the original sharding dim, we need to do two steps logically:
164
+ 1. To collect all shards based on original sharding spec.
165
+ 2. Reshard the tensor based on the given resharding spec.
166
+
167
+ In reality, we consolidate the two steps into one by sending each rank the new
168
+ shard based on the resharding spec.
169
+
170
+ Args:
171
+ local_tensor (Tensor): Local tensor stored in the current rank.
172
+ st_size (torch.Size): The size of the sharded tensor.
173
+ sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
174
+ specification describing how the tensor is sharded originally.
175
+ resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
176
+ specification describing how the tensor will be resharded.
177
+ pg (ProcessGroup): The process group to aggregate on.
178
+
179
+ Returns:
180
+ A Tuple of the followings:
181
+ A List[`Shard`] which contains the local tensor and its metadata.
182
+ A List[`ShardMetadata`] which contains the metadata for the shard, including
183
+ offsets, lengths and device placement.
184
+ """
185
+ current_rank = dist.get_rank(pg)
186
+ world_size = dist.get_world_size(pg)
187
+ current_sharding_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
188
+ reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
189
+
190
+ # Build shards_metadata first.
191
+ shards_metadata, ranks = build_reshard_metadata(
192
+ st_size, resharding_spec, world_size
193
+ )
194
+
195
+ # Compute expected size
196
+ input_split_sizes = []
197
+ for metadata in shards_metadata:
198
+ input_split_sizes.append(metadata.shard_sizes[reshard_dim])
199
+ rearrange_input = any(ranks[i] > ranks[i + 1] for i in range(len(ranks) - 1))
200
+
201
+ if rearrange_input:
202
+ # Need to re-arrange reshard_dim of local_tensor before all2all.
203
+ indices: List[int] = []
204
+ for metadata in shards_metadata:
205
+ offset_start_idx = metadata.shard_offsets[reshard_dim]
206
+ split_size = metadata.shard_sizes[reshard_dim]
207
+ indices += range(offset_start_idx, offset_start_idx + split_size)
208
+ local_tensor = local_tensor.index_select(
209
+ reshard_dim, torch.tensor(indices, device=local_tensor.device)
210
+ )
211
+
212
+ # Because reshard_dim != original shard_dim. We need to compute the
213
+ # size of tensor from each rank.
214
+ output_tensor_list = [torch.tensor(1)] * world_size
215
+ split_size = get_split_size(st_size[current_sharding_dim], world_size)
216
+ rearrange_output_list = False
217
+ indices = []
218
+ for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
219
+ sharded_dim_size = get_chunked_dim_size(
220
+ st_size[current_sharding_dim], split_size, idx
221
+ )
222
+ output_tensor_size = list(st_size)
223
+ output_tensor_size[current_sharding_dim] = sharded_dim_size
224
+ output_tensor_size[reshard_dim] = input_split_sizes[current_rank]
225
+ output_tensor_list[
226
+ placement.rank()
227
+ ] = torch.empty( # type: ignore[union-attr, index]
228
+ output_tensor_size, device=local_tensor.device, dtype=local_tensor.dtype
229
+ )
230
+ indices.append(placement.rank()) # type: ignore[union-attr, index, arg-type]
231
+ if idx != placement.rank(): # type: ignore[union-attr]
232
+ rearrange_output_list = True
233
+
234
+ # Perform autograd enabled all2all.
235
+ input_tensor_tuple = torch.split(local_tensor, input_split_sizes, dim=reshard_dim)
236
+ input_tensor_list = [tensor.contiguous() for tensor in input_tensor_tuple]
237
+ output_tensor_list = all_to_all(
238
+ output_tensor_list,
239
+ input_tensor_list,
240
+ group=pg,
241
+ )
242
+
243
+ if rearrange_output_list:
244
+ # Need to re-arrange original shard_dim of output_tensor_list.
245
+ output_tensor_list = [output_tensor_list[idx] for idx in indices] # type: ignore[call-overload]
246
+ local_tensor = torch.cat(output_tensor_list, dim=current_sharding_dim)
247
+ local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
248
+ return local_shards, shards_metadata
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List
3
+
4
+ import torch
5
+ from torch.distributed._shard.metadata import ShardMetadata
6
+ from torch.distributed.remote_device import _remote_device
7
+
8
+
9
+ @dataclass
10
+ class Shard:
11
+ """
12
+ Container which holds the data for a shard as a Tensor and also
13
+ the associated metadata for that shard.
14
+
15
+ Args:
16
+ tensor(torch.Tensor): Local tensor for the shard.
17
+ metadata(:class `torch.distributed._shard.sharded_tensor.ShardMetadata`):
18
+ The metadata for the shard, including offsets, lengths and device placement.
19
+ """
20
+ __slots__ = ['tensor', 'metadata']
21
+ tensor: torch.Tensor
22
+ metadata: ShardMetadata
23
+
24
+ def __post_init__(self):
25
+ # verification between local tensor and metadata
26
+ if list(self.tensor.size()) != self.metadata.shard_sizes:
27
+ raise ValueError(
28
+ "Shard tensor size does not match with metadata.shard_lengths! "
29
+ f"Found shard tensor size: {list(self.tensor.size())}, "
30
+ f"metadata.shard_lengths: {self.metadata.shard_sizes}, "
31
+ )
32
+ placement_device = self.metadata.placement
33
+ if placement_device is not None and placement_device.device() != self.tensor.device:
34
+ raise ValueError(
35
+ f"Local shard tensor device does not match with local Shard's placement! "
36
+ f"Found local shard tensor device: {self.tensor.device}, "
37
+ f"local shard metadata placement device: {placement_device.device()}"
38
+ )
39
+
40
+ @classmethod
41
+ def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int):
42
+ """
43
+ Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank.
44
+
45
+ Args:
46
+ tensor(torch.Tensor): Local tensor for the shard.
47
+ shard_offsets(List[int]): List of integers specify the offset
48
+ of the shard on each dimension.
49
+ rank(int): Specify the rank for the shard.
50
+ """
51
+ shard_sizes = list(tensor.size())
52
+ placement = _remote_device(f"rank:{rank}/{str(tensor.device)}")
53
+ shard_meta = ShardMetadata(
54
+ shard_offsets=shard_offsets,
55
+ shard_sizes=shard_sizes,
56
+ placement=placement
57
+ )
58
+ return Shard(tensor, shard_meta)
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections.abc
2
+ import copy
3
+ from typing import Optional, List, Sequence
4
+
5
+ import torch
6
+ from torch.distributed import distributed_c10d
7
+ from torch.distributed import rpc
8
+ from torch.distributed._shard.sharding_spec._internals import (
9
+ check_tensor,
10
+ validate_non_overlapping_shards_metadata,
11
+ )
12
+
13
+ from torch.distributed._shard.metadata import ShardMetadata
14
+ from .metadata import TensorProperties, ShardedTensorMetadata
15
+ from .shard import Shard
16
+
17
+ def _parse_and_validate_remote_device(pg, remote_device):
18
+ if remote_device is None:
19
+ raise ValueError("remote device is None")
20
+
21
+ worker_name = remote_device.worker_name()
22
+ rank = remote_device.rank()
23
+ device = remote_device.device()
24
+
25
+ # Validate rank, skip validation if rank is not part of process group.
26
+ if not distributed_c10d._rank_not_in_group(pg):
27
+ if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)):
28
+ raise ValueError(f'Invalid rank: {rank}')
29
+
30
+ if worker_name is not None:
31
+ if not rpc._is_current_rpc_agent_set():
32
+ raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}')
33
+
34
+ workers = rpc._get_current_rpc_agent().get_worker_infos()
35
+ for worker in workers:
36
+ if worker.name == worker_name:
37
+ return worker.id, device
38
+
39
+ raise ValueError(f'Invalid worker name: {worker_name}')
40
+
41
+ return rank, device
42
+
43
+ def _validate_output_tensor_for_gather(
44
+ my_rank: int,
45
+ dst_rank: int,
46
+ size: torch.Size,
47
+ dst_tensor: Optional[torch.Tensor],
48
+ ) -> None:
49
+ if dst_rank == my_rank:
50
+ if dst_tensor is None:
51
+ raise ValueError(
52
+ f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}"
53
+ )
54
+ if tuple(size) != (dst_tensor.size()):
55
+ raise ValueError(
56
+ f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())},"
57
+ f"but should be {tuple(size)}"
58
+ )
59
+ elif dst_tensor:
60
+ raise ValueError(
61
+ "Argument ``dst_tensor`` must NOT be specified "
62
+ "on non-destination ranks."
63
+ )
64
+
65
+ def _flatten_tensor_size(size) -> torch.Size:
66
+ """
67
+ Checks if tensor size is valid, then flatten/return a torch.Size object.
68
+ """
69
+ if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):
70
+ dims = list(*size)
71
+ else:
72
+ dims = list(size)
73
+
74
+ for dim in dims:
75
+ if not isinstance(dim, int):
76
+ raise TypeError(f'size has to be a sequence of ints, found: {dims}')
77
+
78
+ return torch.Size(dims)
79
+
80
+ def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True):
81
+ if is_local:
82
+ assert isinstance(ranks, int)
83
+ if expected != actual:
84
+ raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! "
85
+ f"Found one local shard tensor {prop_name}={expected}, "
86
+ f"the other local shard tensor {prop_name}={actual}.")
87
+ else:
88
+ # compare failure check across ranks, ranks list should have two rank
89
+ assert len(ranks) == 2
90
+ if expected != actual:
91
+ raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! "
92
+ f"Found {prop_name}={expected} on rank:{ranks[0]}, "
93
+ f"and {prop_name}={actual} on rank:{ranks[1]}.")
94
+
95
+
96
+ def build_metadata_from_local_shards(
97
+ local_shards: List[Shard],
98
+ global_size: torch.Size,
99
+ current_rank: int,
100
+ pg: distributed_c10d.ProcessGroup
101
+ ) -> ShardedTensorMetadata:
102
+
103
+ assert len(local_shards) > 0, "must have local shards!"
104
+ local_shard_metadatas: List[ShardMetadata] = []
105
+
106
+ first_shard_dtype = local_shards[0].tensor.dtype
107
+ first_shard_layout = local_shards[0].tensor.layout
108
+ first_shard_requires_grad = local_shards[0].tensor.requires_grad
109
+ first_shard_is_pinned = local_shards[0].tensor.is_pinned()
110
+
111
+ # 1). Validate local tensors and associated metadatas
112
+ for local_shard in local_shards:
113
+ local_shard_tensor = local_shard.tensor
114
+ local_shard_meta = local_shard.metadata
115
+ local_shard_metadatas.append(local_shard_meta)
116
+ rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement)
117
+
118
+ if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout:
119
+ raise ValueError(
120
+ f'Only torch.strided layout is currently supported, but found '
121
+ f'{local_shard_tensor.layout} on rank:{current_rank}!'
122
+ )
123
+
124
+ if not local_shard_tensor.is_contiguous():
125
+ raise ValueError('Only torch.contiguous_format memory_format is currently supported!')
126
+
127
+ if rank != current_rank:
128
+ raise ValueError(
129
+ f"Local shard metadata's rank does not match with the rank in its process group! "
130
+ f'Found current rank in the process group: {current_rank}, '
131
+ f"local ShardMetadata placement's rank: {rank}"
132
+ )
133
+ if local_shard_tensor.device != local_device:
134
+ raise ValueError(
135
+ f"Local shard tensor device does not match with local Shard's placement! "
136
+ f"Found local shard tensor device: {local_shard_tensor.device}, "
137
+ f"local shard metadata placement device: {local_device}"
138
+ )
139
+
140
+ _raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
141
+ _raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank)
142
+ _raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank)
143
+ _raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank)
144
+
145
+ # 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then
146
+ # do all_gather to collect local_sharded_tensor_metadata from all ranks
147
+ local_tensor_properties = TensorProperties(
148
+ dtype=first_shard_dtype,
149
+ layout=first_shard_layout,
150
+ requires_grad=first_shard_requires_grad,
151
+ memory_format=torch.contiguous_format,
152
+ pin_memory=first_shard_is_pinned
153
+ )
154
+
155
+ local_sharded_tensor_metadata = ShardedTensorMetadata(
156
+ shards_metadata=local_shard_metadatas,
157
+ size=global_size,
158
+ tensor_properties=local_tensor_properties)
159
+
160
+ return local_sharded_tensor_metadata
161
+
162
+
163
+ def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]):
164
+ global_sharded_tensor_metadata = None
165
+ global_metadata_rank = 0
166
+
167
+ for rank, rank_metadata in enumerate(gathered_metadatas):
168
+ if rank_metadata is None:
169
+ continue
170
+
171
+ if global_sharded_tensor_metadata is None:
172
+ global_sharded_tensor_metadata = copy.deepcopy(rank_metadata)
173
+ global_metadata_rank = rank
174
+ else:
175
+ _raise_if_mismatch(global_sharded_tensor_metadata.size,
176
+ rank_metadata.size,
177
+ "global_size",
178
+ [global_metadata_rank, rank],
179
+ is_local=False)
180
+
181
+ # don't need to check layout and memory format as we already checked in local shards validation stage
182
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype,
183
+ rank_metadata.tensor_properties.dtype,
184
+ "dtype",
185
+ [global_metadata_rank, rank],
186
+ is_local=False)
187
+
188
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad,
189
+ rank_metadata.tensor_properties.requires_grad,
190
+ "requires_grad",
191
+ [global_metadata_rank, rank],
192
+ is_local=False)
193
+
194
+ _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory,
195
+ rank_metadata.tensor_properties.pin_memory,
196
+ "pin_memory",
197
+ [global_metadata_rank, rank],
198
+ is_local=False)
199
+ # pass all validations, extend shards metadata
200
+ global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata)
201
+
202
+ if global_sharded_tensor_metadata is not None:
203
+ # check if shards_metadata have overlap shards
204
+ validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata)
205
+
206
+ # check if the shards_metadata is compatible with global size of the sharded tensor.
207
+ check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size)
208
+ else:
209
+ raise ValueError("ShardedTensor have no local shards on all ranks!")
210
+
211
+ return global_sharded_tensor_metadata
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import torch.nn as nn
3
+
4
+ class Sharder(abc.ABC):
5
+ """
6
+ This is an interface which allows user to create more advanced
7
+ sharding strategies that are not easily be composed by the
8
+ `ShardingSpec`.
9
+
10
+ :class:`torch.distributed._shard.sharding_plan.ShardingPlan` could
11
+ take an object of the `Sharder` and call `shard` to shard the module,
12
+ then replace the original module with sharded module returned.
13
+ """
14
+ @abc.abstractmethod
15
+ def shard(self, module: nn.Module) -> nn.Module:
16
+ """
17
+ Shard a module base on the implementation of this method, and
18
+ return the sharded version of the module.
19
+
20
+ Args:
21
+ module (:class:`torch.nn.Module`):
22
+ The module to apply sharding to.
23
+ Returns:
24
+ A :class:`torch.nn.Module` object that represents a module
25
+ that's already been sharded.
26
+ """
27
+ pass
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .api import (
2
+ ShardingPlan,
3
+ ShardingPlanner
4
+ )
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (272 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import torch.nn as nn
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Dict, List, Optional, Union
6
+
7
+ from torch.distributed._shard.sharder import Sharder
8
+ from torch.distributed._shard.sharding_spec import ShardingSpec
9
+
10
+ @dataclass
11
+ class ShardingPlan:
12
+ """
13
+ Representation of a sharding plan, describes how to shard a module
14
+ across hosts. `plan` is used to shard module parameters according to the spec provided,
15
+ `output_plan` and `return_local_tensor` are optional, they are used to specify the output
16
+ layout of a module with a spec, and when to convert back to data parallel fashion.
17
+
18
+ Args:
19
+ plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`,
20
+ :class:`torch.distributed._shard.sharder.Sharder`]):
21
+ a dict describes how to shard a module, there're currently two ways to shard a module:
22
+ 1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of
23
+ a parameter to a `ShardingSpec`.
24
+ 2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module
25
+ to a `Sharder` object.
26
+ output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional):
27
+ a dict specifies the layout of a module's output which produces a ShardedTensor,
28
+ keyed by the name of module to ShardingSpec("" in key means the root module).
29
+ Default: `None`
30
+ return_local_tensor (List[str], optional): a list of string, each element enables
31
+ a module's sharded output to be returned as a Tensor from its local shards to
32
+ ensure further processing in a data parallel fashion. ("" in list means the
33
+ root module).
34
+ Default: None
35
+ Example:
36
+ Suppose we want to shard a module with two linear layers and then run it with DDP, we also
37
+ want to convert the output of the second linear layer back to DDP, we can do it as follows:
38
+
39
+ >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
40
+ >>> class MyModule(nn.Module):
41
+ >>> def __init__(self):
42
+ >>> super().__init__()
43
+ >>> self.fc1 = nn.Linear()
44
+ >>> self.gelu = nn.GELU()
45
+ >>> self.fc2 = nn.Linear()
46
+ >>> self.relu = nn.Linear()
47
+ >>>
48
+ >>> def forward(self, input):
49
+ >>> return self.relu(self.fc2(self.gelu(self.fc1(input))))
50
+
51
+
52
+ >>> # xdoctest: +SKIP("Undefined spec1, spec2)
53
+ >>> sharding_plan = ShardingPlan(
54
+ >>> plan={
55
+ >>> "fc1.weight": spec1,
56
+ >>> "fc2.weight": spec2
57
+ >>> },
58
+ >>> output_plan={
59
+ >>> "fc2": output_spec
60
+ >>> },
61
+ >>> return_local_tensor=["fc2"]
62
+ >>> )
63
+ """
64
+ plan: Dict[str, Union[ShardingSpec, Sharder]]
65
+ output_plan: Optional[Dict[str, ShardingSpec]] = None
66
+ return_local_tensor: Optional[List[str]] = None
67
+
68
+
69
+ class ShardingPlanner(abc.ABC):
70
+ """
71
+ Default ShardingPlanner interface, can be extended and
72
+ implement advanced sharding strategies.
73
+ """
74
+ @abc.abstractmethod
75
+ def build_plan(self, module: nn.Module) -> ShardingPlan:
76
+ """
77
+ Given a nn.Module, define how to shard the module across
78
+ ranks, return a ShardingPlan
79
+ Args:
80
+ module (:class:`torch.nn.Module`):
81
+ The module to apply sharding to.
82
+ Returns:
83
+ A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that
84
+ represents how to shard the module.
85
+ """
86
+ pass
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .api import (
2
+ DevicePlacementSpec,
3
+ EnumerableShardingSpec,
4
+ PlacementSpec,
5
+ ShardingSpec,
6
+ _infer_sharding_spec_from_shards_metadata,
7
+ )
8
+ from .chunk_sharding_spec import (
9
+ ChunkShardingSpec as ChunkShardingSpec,
10
+ )
11
+
12
+ from torch.distributed._shard.metadata import ShardMetadata
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (522 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc ADDED
Binary file (9.22 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc ADDED
Binary file (5.75 kB). View file