applied-ai-018 commited on
Commit
d1645b2
·
verified ·
1 Parent(s): d375727

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  2. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py +2 -0
  3. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py +217 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py +144 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py +438 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py +506 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py +246 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py +246 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py +15 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/fsspec.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_save_plans.py +49 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_tensors.py +59 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py +15 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py +53 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py +103 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_storage_utils.py +50 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_traverse.py +167 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py +41 -0
ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa08d8aebd7e783ad6b9bfb1f029b49070b388941e8b783eeabed1b09c7107b9
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from ._fsdp_api import MixedPrecisionPolicy
2
+ from .fully_shard import FSDP, fully_shard
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (305 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc ADDED
Binary file (2.73 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, NamedTuple, Optional, Tuple
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+ from torch.distributed.distributed_c10d import ReduceOp
6
+ from ._fsdp_common import (
7
+ _get_dim0_padded_size,
8
+ _raise_assert_with_print,
9
+ _to_dtype_if_needed,
10
+ )
11
+ from ._fsdp_param import FSDPParam
12
+
13
+
14
+ class AllGatherResult(NamedTuple):
15
+ all_gather_output: torch.Tensor
16
+ all_gather_event: Optional[torch.cuda.Event]
17
+ all_gather_work: Optional[dist.distributed_c10d.Work]
18
+ all_gather_input_numels: List[int]
19
+
20
+
21
+ @torch.no_grad()
22
+ def foreach_all_gather(
23
+ fsdp_params: List[FSDPParam],
24
+ group: dist.ProcessGroup,
25
+ async_op: bool,
26
+ all_gather_copy_in_stream: torch.cuda.Stream,
27
+ all_gather_stream: torch.cuda.Stream,
28
+ device: torch.device,
29
+ ) -> Optional[AllGatherResult]:
30
+ world_size, rank = group.size(), group.rank()
31
+ # - Copy in
32
+ with torch.cuda.stream(all_gather_copy_in_stream):
33
+ param_all_gather_inputs = [
34
+ fsdp_param.all_gather_input for fsdp_param in fsdp_params
35
+ ]
36
+ dtype = param_all_gather_inputs[0].dtype
37
+ if not all(t.dtype == dtype for t in param_all_gather_inputs):
38
+ raise NotImplementedError(
39
+ f"Mixed dtype not supported yet: {[t.dtype for t in param_all_gather_inputs]}"
40
+ )
41
+ inp_split_sizes = [inp.numel() for inp in param_all_gather_inputs]
42
+ all_gather_input_numel = sum(inp_split_sizes)
43
+ all_gather_output = torch.empty(
44
+ (all_gather_input_numel * world_size,), dtype=dtype, device=device
45
+ )
46
+ all_gather_input = all_gather_output.narrow(
47
+ 0, all_gather_input_numel * rank, all_gather_input_numel
48
+ )
49
+ foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes)
50
+ torch._foreach_copy_(foreach_copy_dsts, param_all_gather_inputs)
51
+ del param_all_gather_inputs
52
+ all_gather_stream.wait_stream(all_gather_copy_in_stream)
53
+ with torch.cuda.stream(all_gather_stream):
54
+ # - All-gather
55
+ all_gather_work = dist.all_gather_into_tensor(
56
+ output_tensor=all_gather_output,
57
+ input_tensor=all_gather_input,
58
+ group=group,
59
+ async_op=async_op,
60
+ )
61
+ all_gather_event = all_gather_stream.record_event()
62
+ return AllGatherResult(
63
+ all_gather_output, all_gather_event, all_gather_work, inp_split_sizes
64
+ )
65
+
66
+
67
+ @torch.no_grad()
68
+ def foreach_all_gather_copy_out(
69
+ all_gather_result: AllGatherResult,
70
+ fsdp_params: List[FSDPParam],
71
+ group: dist.ProcessGroup,
72
+ ) -> None:
73
+ (
74
+ all_gather_output,
75
+ all_gather_event,
76
+ all_gather_work,
77
+ all_gather_input_numels,
78
+ ) = all_gather_result
79
+ if all_gather_event is not None: # sync op
80
+ torch.cuda.current_stream().wait_event(all_gather_event)
81
+ if all_gather_work is not None: # async op
82
+ all_gather_work.wait()
83
+ world_size = group.size()
84
+ dtype, device = all_gather_output.dtype, all_gather_output.device
85
+ for all_gather_input_numel, fsdp_param in zip(all_gather_input_numels, fsdp_params):
86
+ fsdp_param.init_all_gather_output(
87
+ all_gather_input_numel, world_size, dtype, device
88
+ ) # no-op after 1st call
89
+ fsdp_param.alloc_all_gather_output()
90
+ all_gather_output = all_gather_output.view(world_size, -1)
91
+ out = [
92
+ fsdp_param.all_gather_output.view(world_size, -1) for fsdp_param in fsdp_params
93
+ ]
94
+ torch.split_with_sizes_copy(
95
+ all_gather_output, all_gather_input_numels, dim=1, out=out
96
+ )
97
+
98
+
99
+ @torch.no_grad()
100
+ def foreach_reduce_scatter(
101
+ fsdp_params: List[FSDPParam],
102
+ unsharded_grads: List[torch.Tensor],
103
+ group: dist.ProcessGroup,
104
+ reduce_scatter_stream: torch.cuda.Stream,
105
+ orig_dtype: torch.dtype,
106
+ reduce_dtype: Optional[torch.dtype],
107
+ device: torch.device,
108
+ divide_factors: Optional[Tuple[float, float]],
109
+ ) -> torch.cuda.Event:
110
+ """
111
+ ``unsharded_grads`` owns the references to the gradients computed by
112
+ autograd, so clearing the list frees the gradients.
113
+ """
114
+ grad_dtypes = {grad.dtype for grad in unsharded_grads}
115
+ if len(grad_dtypes) != 1:
116
+ # Check this at runtime since it could be a real runtime error if e.g.
117
+ # fp8 weights do not produce the correct higher precision gradients
118
+ _raise_assert_with_print(
119
+ f"FSDP reduce-scatter expects uniform gradient dtype but got {grad_dtypes}"
120
+ )
121
+ grad_dtype = unsharded_grads[0].dtype
122
+ reduce_dtype = reduce_dtype or grad_dtype
123
+ world_size = group.size()
124
+ padded_unsharded_sizes = tuple(
125
+ _get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads
126
+ )
127
+ reduce_scatter_input_numel = sum(s.numel() for s in padded_unsharded_sizes)
128
+ reduce_scatter_output_numel = reduce_scatter_input_numel // world_size
129
+ current_stream = torch.cuda.current_stream()
130
+ reduce_scatter_stream.wait_stream(current_stream)
131
+ with torch.cuda.stream(reduce_scatter_stream):
132
+ reduce_scatter_input = torch.empty(
133
+ (reduce_scatter_input_numel,), dtype=reduce_dtype, device=device
134
+ )
135
+ foreach_reduce_scatter_copy_in(
136
+ unsharded_grads, reduce_scatter_input, world_size
137
+ )
138
+ # Only after the copy-in finishes can we free the gradients, which were
139
+ # computed in the default stream
140
+ current_stream.wait_stream(reduce_scatter_stream)
141
+ unsharded_grads.clear()
142
+ reduce_scatter_output = reduce_scatter_input.new_empty(
143
+ (reduce_scatter_output_numel,)
144
+ )
145
+ _reduce_scatter(
146
+ reduce_scatter_output, reduce_scatter_input, group, divide_factors
147
+ )
148
+ reduce_scatter_output = _to_dtype_if_needed(reduce_scatter_output, orig_dtype)
149
+ # - View out and accumulate
150
+ flat_grad_offset = 0 # [0, reduce_scatter_output_numel - 1]
151
+ for padded_unsharded_size, fsdp_param in zip(
152
+ padded_unsharded_sizes, fsdp_params
153
+ ):
154
+ new_sharded_grad = torch.as_strided(
155
+ reduce_scatter_output,
156
+ size=fsdp_param.sharded_size,
157
+ stride=fsdp_param.contiguous_sharded_stride,
158
+ storage_offset=flat_grad_offset,
159
+ )
160
+ to_accumulate_grad = fsdp_param.sharded_param.grad is not None
161
+ new_sharded_dtensor_grad = fsdp_param.to_sharded_dtensor(new_sharded_grad)
162
+ if to_accumulate_grad:
163
+ fsdp_param.sharded_param.grad += new_sharded_dtensor_grad
164
+ else:
165
+ fsdp_param.sharded_param.grad = new_sharded_dtensor_grad
166
+ padded_sharded_numel = padded_unsharded_size.numel() // world_size
167
+ flat_grad_offset += padded_sharded_numel
168
+ reduce_scatter_view_out_event = reduce_scatter_stream.record_event()
169
+ # The RS output is allocated in the RS stream and used in the default
170
+ # stream (for optimizer). To ensure its memory is not reused for later
171
+ # RSs, we do not need extra synchronization since the sharded parameters
172
+ # hold refs through the end of backward.
173
+ return reduce_scatter_view_out_event
174
+
175
+
176
+ def foreach_reduce_scatter_copy_in(
177
+ unsharded_grads: List[torch.Tensor],
178
+ reduce_scatter_input: torch.Tensor,
179
+ world_size: int,
180
+ ) -> None:
181
+ grad_views: List[torch.Tensor] = []
182
+ grads_to_copy: List[torch.Tensor] = []
183
+ padded_grad_slices: List[torch.Tensor] = []
184
+ for grad in unsharded_grads:
185
+ grad_size = grad.size()
186
+ dim0_padded_size = _get_dim0_padded_size(grad_size, world_size)
187
+ if dim0_padded_size != grad_size:
188
+ padded_grad = grad.new_empty(dim0_padded_size)
189
+ padded_grad_slices.append(padded_grad[: grad.size(0)])
190
+ grads_to_copy.append(grad)
191
+ grad = padded_grad
192
+ grad_views.append(grad.view(world_size, -1))
193
+ if padded_grad_slices:
194
+ torch._foreach_copy_(padded_grad_slices, grads_to_copy)
195
+ torch.cat(grad_views, dim=-1, out=reduce_scatter_input.view(world_size, -1))
196
+
197
+
198
+ def _reduce_scatter(
199
+ output: torch.Tensor,
200
+ input: torch.Tensor,
201
+ group: dist.ProcessGroup,
202
+ divide_factors: Optional[Tuple[float, float]],
203
+ ) -> None:
204
+ if divide_factors:
205
+ predivide_factor, postdivide_factor = divide_factors
206
+ _div_if_needed(input, predivide_factor)
207
+ dist.reduce_scatter_tensor(output, input, group=group)
208
+ _div_if_needed(output, postdivide_factor)
209
+ else:
210
+ # Using NCCL's reduce-scatter to do the division by world size saves
211
+ # extra memory read/write from a separate division kernel
212
+ dist.reduce_scatter_tensor(output, input, op=ReduceOp.AVG, group=group)
213
+
214
+
215
+ def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None:
216
+ if div_factor > 1:
217
+ tensor.div_(div_factor)
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import List, Optional, Set, Tuple, Union
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.nn as nn
7
+
8
+ from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh
9
+ from torch.distributed.device_mesh import _get_device_handle
10
+ from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo
11
+ from ._fsdp_state import _get_module_fsdp_state
12
+
13
+
14
+ def _get_post_forward_mesh_info(
15
+ reshard_after_forward: Union[bool, int], mesh_info: FSDPMeshInfo
16
+ ) -> Optional[FSDPMeshInfo]:
17
+ shard_mesh_size = mesh_info.shard_mesh_size
18
+ if not isinstance(reshard_after_forward, (bool, int)):
19
+ raise ValueError(
20
+ "reshard_after_forward should be a bool or an int representing the "
21
+ f"group size to reshard to, not {reshard_after_forward}"
22
+ )
23
+ # NOTE: `isinstance(False, int)` returns `True`.
24
+ if not isinstance(reshard_after_forward, bool) and isinstance(
25
+ reshard_after_forward, int
26
+ ):
27
+ if (
28
+ reshard_after_forward < 1
29
+ or reshard_after_forward > shard_mesh_size
30
+ or shard_mesh_size % reshard_after_forward != 0
31
+ ):
32
+ raise ValueError(
33
+ "If passing reshard_after_forward as an int, it should be a "
34
+ f"factor of {shard_mesh_size}, not {reshard_after_forward}"
35
+ )
36
+ elif reshard_after_forward == 1:
37
+ reshard_after_forward = False
38
+ elif reshard_after_forward == shard_mesh_size:
39
+ reshard_after_forward = True
40
+ post_forward_mesh_info = None
41
+ if reshard_after_forward is True:
42
+ post_forward_mesh_info = mesh_info
43
+ elif reshard_after_forward is not False: # int case
44
+ # For HSDP, we can flatten the two replicate dims into the 0th dim
45
+ post_forward_mesh_tensor = mesh_info.mesh.mesh.view(-1, reshard_after_forward)
46
+ post_forward_mesh = DeviceMesh(
47
+ mesh_info.mesh.device_type, post_forward_mesh_tensor
48
+ )
49
+ post_forward_mesh_info = HSDPMeshInfo(
50
+ post_forward_mesh, shard_mesh_dim=1, replicate_mesh_dim=0
51
+ )
52
+ return post_forward_mesh_info
53
+
54
+
55
+ def _init_default_fully_shard_mesh() -> DeviceMesh:
56
+ """Default to global CUDA mesh if possible else global CPU mesh."""
57
+ if not dist.distributed_c10d.is_initialized():
58
+ dist.distributed_c10d.init_process_group()
59
+ default_pg = dist.distributed_c10d._get_default_group()
60
+ device_type = "cuda" if torch.cuda.is_available() else "cpu"
61
+ mesh = init_device_mesh(device_type, mesh_shape=(default_pg.size(),))
62
+ return mesh
63
+
64
+
65
+ def _get_device_from_mesh(mesh: DeviceMesh) -> torch.device:
66
+ if mesh.device_type == "cpu":
67
+ return torch.device("cpu")
68
+ device_handle = _get_device_handle(mesh.device_type)
69
+ return torch.device(mesh.device_type, device_handle.current_device())
70
+
71
+
72
+ def _get_managed_modules(root_module: nn.Module) -> List[nn.Module]:
73
+ modules: List[nn.Module] = []
74
+ # Track visisted modules to avoid visiting shared modules multiple times
75
+ visited_modules: Set[nn.Module] = set()
76
+
77
+ def dfs(module: nn.Module) -> None:
78
+ """
79
+ Runs a DFS to collect managed modules, not recursing into modules with
80
+ a non-composable API or ``fully_shard`` already applied.
81
+ """
82
+ if not _is_composable_with_fsdp(module):
83
+ return
84
+ elif module is not root_module and _get_module_fsdp_state(module) is not None:
85
+ return # nested `fully_shard` module
86
+ visited_modules.add(module)
87
+ for submodule in module.children():
88
+ if submodule not in visited_modules:
89
+ dfs(submodule)
90
+ modules.append(module)
91
+
92
+ dfs(root_module)
93
+ return modules
94
+
95
+
96
+ def _get_managed_states(
97
+ modules: List[nn.Module],
98
+ ) -> Tuple[List[nn.Parameter], List[torch.Tensor]]:
99
+ params: List[nn.Parameter] = []
100
+ buffers: List[torch.Tensor] = []
101
+ # Track visited parameters/buffers to avoid visiting shared parameters and
102
+ # buffers multiple times
103
+ visited_params: Set[nn.Parameter] = set()
104
+ visited_buffers: Set[torch.Tensor] = set()
105
+ for module in modules:
106
+ for param in module.parameters(recurse=False):
107
+ if param not in visited_params:
108
+ params.append(param)
109
+ visited_params.add(param)
110
+ for buffer in module.buffers(recurse=False):
111
+ if buffer not in visited_buffers:
112
+ buffers.append(buffer)
113
+ visited_buffers.add(buffer)
114
+ return params, buffers
115
+
116
+
117
+ def _move_states_to_device(
118
+ params: List[nn.Parameter],
119
+ buffers: List[torch.Tensor],
120
+ device: torch.device,
121
+ mesh_info: FSDPMeshInfo,
122
+ ) -> None:
123
+ """
124
+ We have FSDP move states to device for simpler and faster initialization
125
+ since FSDP almost always uses CUDA for training. We move parameters/buffers
126
+ rather than modules since modules to support ignoring parameters/buffers in
127
+ the future.
128
+ """
129
+ # TODO: De-duplicate with `_apply` after `swap_tensors` path lands:
130
+ # https://github.com/pytorch/pytorch/issues/115792
131
+ for tensor in itertools.chain(params, buffers):
132
+ if tensor.device == device or tensor.device.type == "meta":
133
+ # Keep meta-device tensors on meta device for deferred init
134
+ continue
135
+ if isinstance(tensor, DTensor):
136
+ if (dtensor_mesh_type := tensor._spec.mesh.device_type) != device.type:
137
+ raise ValueError(
138
+ "Requires DTensor to have mesh of the same type as the FSDP mesh "
139
+ f"but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP"
140
+ )
141
+ raise AssertionError(
142
+ f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}"
143
+ )
144
+ tensor.data = tensor.to(device)
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from enum import auto, Enum
3
+ from typing import cast, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from torch._prims_common import make_contiguous_strides_for
9
+ from torch.distributed._functional_collectives import AsyncCollectiveTensor
10
+ from torch.distributed._tensor import DTensor, Placement, Replicate, Shard
11
+ from torch.distributed._tensor.device_mesh import _mesh_resources
12
+ from torch.distributed._tensor.placement_types import DTensorSpec
13
+ from ._fsdp_api import MixedPrecisionPolicy
14
+ from ._fsdp_common import (
15
+ _chunk_with_empty,
16
+ _from_local_no_grad,
17
+ _get_dim0_chunked_size,
18
+ _raise_assert_with_print,
19
+ _to_dtype_if_needed,
20
+ FSDPMeshInfo,
21
+ HSDPMeshInfo,
22
+ )
23
+
24
+ """
25
+ [Note: FSDP tensors]
26
+ FSDP considers the following tensors:
27
+ - Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one
28
+ on the module when applying FSDP
29
+ - Sharded parameter: sharding the original parameter on dim-0 as a DTensor
30
+ over the main mesh
31
+ - All-gather input: the ``torch.Tensor`` passed to all-gather, derived from the
32
+ sharded parameter
33
+ - All-gather output: the ``torch.Tensor`` resulting from all-gathering the
34
+ all-gather input
35
+ - Unsharded parameter: parameter used for forward/backward computation, derived
36
+ from the all-gather output; autograd leaf
37
+
38
+ We define these tensors to describe the general framework that can accomodate
39
+ extensions, where:
40
+ - all-gather-input = pre-all-gather-transform(sharded-parameter)
41
+ - unsharded-parameter = post-all-gather-transform(all-gather-output)
42
+
43
+ For the default ``torch.Tensor`` case, the sharded parameter and all-gather
44
+ input share the same underlying tensor data, meaning that they can be thought
45
+ of as the same tensors. The same applies for the all-gather output and
46
+ unsharded parameter. For non-``torch.Tensor`` extensions, these equivalences
47
+ may no longer hold due to the pre/post-all-gather transforms.
48
+
49
+ [Note: FSDP and autograd]
50
+ FSDP dynamically frees and allocates the unsharded parameter. Since autograd
51
+ can pack a reference to it or a view to save for backward, we use storage
52
+ resizing to implement the freeing/allocation since that preserves the aliasing.
53
+ This implies that we construct the unsharded parameter object once and write to
54
+ it in-place thereafter. For the default ``torch.Tensor` original parameter
55
+ case, the all-gather output and unsharded parameter share the same
56
+ data, so we use storage resizing on the all-gather output.
57
+ """
58
+
59
+
60
+ class ShardedState(Enum):
61
+ """
62
+ - ``SHARDED``: The sharded parameter is registered to the module. It is the
63
+ only contributor to parameter memory.
64
+ - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a
65
+ smaller world size. Since this data should not be used for computation,
66
+ we do not register it to the module. Users should reshard the module
67
+ before any in-place modifications. Both it and the sharded parameter
68
+ contribute to parameter memory.
69
+ - ``UNSHARDED``: The unsharded parameter is registered to the module. Both
70
+ it and the sharded parameter contribute to parameter memory.
71
+ """
72
+
73
+ SHARDED = auto()
74
+ SHARDED_POST_FORWARD = auto()
75
+ UNSHARDED = auto()
76
+
77
+
78
+ @dataclass
79
+ class ParamModuleInfo:
80
+ """
81
+ For a parameter, this stores the module and the parameter name to be able
82
+ to do a parameter swap via ``setattr(module, param_name, ...)`` or to get
83
+ the parameter via ``getattr(module, param_name)``. We additionally save
84
+ shared modules and shared parameter names to update them accordingly.
85
+ """
86
+
87
+ # Parameter names are unprefixed, e.g. "weight", not "lin.weight"
88
+ module: nn.Module
89
+ param_name: str
90
+ shared_modules: List[nn.Module] = field(default_factory=list)
91
+ shared_param_names: List[str] = field(default_factory=list)
92
+
93
+
94
+ class FSDPParam:
95
+ """
96
+ This class manages a parameter with FSDP or FSDP variants applied,
97
+ implementing dim-0 per-parameter sharding.
98
+ """
99
+
100
+ orig_dtype: torch.dtype
101
+ param_dtype: Optional[torch.dtype]
102
+ reduce_dtype: Optional[torch.dtype]
103
+ _orig_size: torch.Size # ND
104
+ _contiguous_orig_stride: Tuple[int, ...]
105
+ sharded_size: torch.Size # ND
106
+ contiguous_sharded_stride: Tuple[int, ...]
107
+ padded_sharded_param_size: torch.Size # ND
108
+ sharded_post_forward_size: torch.Size # ND
109
+ contiguous_sharded_post_forward_stride: Tuple[int, ...]
110
+ _sharded_param_data: torch.Tensor # 1D
111
+ sharded_param: nn.Parameter # ND
112
+ _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D
113
+ _sharded_post_forward_param: Optional[nn.Parameter] # ND
114
+ _unsharded_param: nn.Parameter # ND
115
+ _global_placements: Tuple[Placement, ...]
116
+ _global_size: torch.Size
117
+ _global_stride: Tuple[int, ...]
118
+ # DTensor attributes (only defined for DTensor `param`):
119
+ _tp_spec: DTensorSpec
120
+
121
+ def __init__(
122
+ self,
123
+ param: nn.Parameter,
124
+ module_info: ParamModuleInfo,
125
+ mesh_info: FSDPMeshInfo,
126
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
127
+ device: torch.device,
128
+ mp_policy: MixedPrecisionPolicy,
129
+ ):
130
+ self._module_info: ParamModuleInfo = module_info
131
+ self.mesh_info = mesh_info
132
+ self.post_forward_mesh_info = post_forward_mesh_info
133
+ self.device = device
134
+ self._init_sharded_param(param, device)
135
+ if self.post_forward_mesh_info:
136
+ self._init_sharded_post_forward_param_metadata(param)
137
+ self.all_gather_output = torch.empty(0)
138
+ self._param_fqn: Optional[str] = None # prefixed from root module
139
+
140
+ @torch.no_grad()
141
+ def _init_sharded_param(self, param: nn.Parameter, device: torch.device):
142
+ if param.device != device and param.device.type != "meta":
143
+ raise AssertionError(
144
+ f"Expects the parameter to already be moved to device {device} but got {param.device}"
145
+ )
146
+ # TODO: Replace the sharded DTensor parameter construction logic with
147
+ # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101
148
+ # TODO: Simplify the following sharded parameter padding logic after
149
+ # https://github.com/pytorch/pytorch/issues/113045
150
+ self.is_dtensor = isinstance(param, DTensor)
151
+ if self.is_dtensor:
152
+ self._tp_spec = cast(DTensor, param)._spec
153
+ if (
154
+ self.mesh_info.shard_mesh_dim != 0
155
+ or self.mesh_info.replicate_mesh_dim is not None
156
+ ):
157
+ raise NotImplementedError("Using TP with HSDP is not supported")
158
+ dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh)
159
+ dp_global_mesh = _mesh_resources.get_parent_mesh(dp_mesh)
160
+ tp_global_mesh = _mesh_resources.get_parent_mesh(tp_mesh)
161
+ if dp_global_mesh != tp_global_mesh or (
162
+ dp_global_mesh is None or tp_global_mesh is None
163
+ ):
164
+ raise AssertionError(
165
+ "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n"
166
+ f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}"
167
+ )
168
+ self._global_mesh = dp_global_mesh
169
+ if len(self._tp_spec.placements) != 1:
170
+ raise NotImplementedError(
171
+ f"FSDP only supports 1D TP, not {self._tp_spec.placements}"
172
+ )
173
+ global_placements: List[Placement] = [Replicate(), Replicate()]
174
+ global_dp_mesh_dim = _mesh_resources.get_parent_mesh_dim(dp_mesh)
175
+ global_tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(tp_mesh)
176
+ assert global_dp_mesh_dim is not None # mypy
177
+ assert global_tp_mesh_dim is not None # mypy
178
+ # TODO: Hard code FSDP + TP; need to support HSDP + TP
179
+ global_placements[global_dp_mesh_dim] = Shard(0)
180
+ global_placements[global_tp_mesh_dim] = self._tp_spec.placements[0]
181
+ self._global_placements = tuple(global_placements)
182
+ self._global_size = param.size()
183
+ self._global_stride = param.stride()
184
+ param_data = cast(DTensor, param)._local_tensor
185
+ else:
186
+ self._global_mesh = self.mesh_info.mesh
187
+ self._global_placements = (Shard(0),)
188
+ self._global_size = param.size()
189
+ self._global_stride = param.stride()
190
+ param_data = param
191
+ self._orig_size = param_data.size()
192
+ self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size)
193
+ shard_rank = self.mesh_info.shard_mesh_rank
194
+ shard_world_size = self.mesh_info.shard_mesh_size
195
+ chunks = _chunk_with_empty(param_data, shard_world_size, dim=0)
196
+ sharded_param = chunks[shard_rank]
197
+ self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size())
198
+ self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size)
199
+ padded_sharded_size = chunks[0].size() # 0th always padded
200
+ padded_sharded_param = param_data.new_zeros(padded_sharded_size)
201
+ self.padded_sharded_param_size = padded_sharded_param.size()
202
+ if sharded_param.numel() > 0:
203
+ padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param)
204
+ self._sharded_param_data = padded_sharded_param.view(-1)
205
+ self.sharded_param = nn.Parameter(
206
+ self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)])
207
+ )
208
+ self.sharded_param.requires_grad_(param.requires_grad)
209
+ # Let `param_data` be freed normally when its ref count reaches 0 when
210
+ # the `fully_shard` call returns to allow provided parameters to alias
211
+ self._setattr_on_modules(self.sharded_param)
212
+ self.sharded_state = ShardedState.SHARDED
213
+
214
+ def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None:
215
+ mesh_info = self.post_forward_mesh_info
216
+ assert mesh_info is not None # mypy
217
+ param_data = param._local_tensor if isinstance(param, DTensor) else param
218
+ chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0)
219
+ self.sharded_post_forward_size = _get_dim0_chunked_size(
220
+ chunks[mesh_info.shard_mesh_rank], param_data.size()
221
+ )
222
+ self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for(
223
+ self.sharded_post_forward_size
224
+ )
225
+
226
+ def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy):
227
+ param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype)
228
+ self.orig_dtype = self.sharded_param.dtype
229
+ # Clamp `param_dtype` to `None` if no casting is required
230
+ if param_dtype == self.orig_dtype:
231
+ param_dtype = None
232
+ self.param_dtype = param_dtype
233
+ self.reduce_dtype = reduce_dtype
234
+ # None indicates that the mixed precision is not enabled
235
+
236
+ def init_all_gather_output(
237
+ self,
238
+ all_gather_input_numel: int,
239
+ world_size: int,
240
+ dtype: torch.dtype,
241
+ device: torch.device,
242
+ ):
243
+ if self.all_gather_output.numel() > 0:
244
+ return # already initialized
245
+ all_gather_output_size = torch.Size([all_gather_input_numel * world_size])
246
+ self.all_gather_output = torch.empty(
247
+ all_gather_output_size, dtype=dtype, device=device
248
+ )
249
+
250
+ def init_unsharded_param(self):
251
+ if hasattr(self, "_unsharded_param"):
252
+ return # already initialized
253
+ # For the default path (no post-all-gather), the all-gather output
254
+ # gives the unsharded parameter data directly
255
+ unsharded_param = torch.as_strided(
256
+ self.all_gather_output,
257
+ self._orig_size,
258
+ self._contiguous_orig_stride,
259
+ storage_offset=0,
260
+ )
261
+ if self.is_dtensor:
262
+ unsharded_param = _from_local_no_grad(
263
+ unsharded_param,
264
+ self._tp_spec.mesh,
265
+ self._tp_spec.placements,
266
+ self._global_size,
267
+ self._global_stride,
268
+ )
269
+ self._unsharded_param = nn.Parameter(unsharded_param)
270
+ self._unsharded_param.requires_grad_(self.sharded_param.requires_grad)
271
+
272
+ def to_sharded(self) -> None:
273
+ self._setattr_on_modules(self.sharded_param)
274
+ self.free_all_gather_output()
275
+ self.sharded_state = ShardedState.SHARDED
276
+
277
+ def to_sharded_post_forward(self) -> None:
278
+ if self.is_dtensor:
279
+ raise NotImplementedError(
280
+ "Resharding to smaller mesh with TP is not supported yet"
281
+ )
282
+ self._assert_in_states(ShardedState.UNSHARDED)
283
+ assert self.post_forward_mesh_info is not None # mypy
284
+ shard_world_size = self.post_forward_mesh_info.shard_mesh_size
285
+ if (numel := self.all_gather_output.numel()) % shard_world_size != 0:
286
+ _raise_assert_with_print(
287
+ f"All-gather output size ({numel}) must be divisible by the shard "
288
+ f"world size ({shard_world_size})"
289
+ )
290
+ shard_rank = self.post_forward_mesh_info.shard_mesh_rank
291
+ sharded_numel = numel // shard_world_size
292
+ self._sharded_post_forward_param_data = (
293
+ self.all_gather_output.narrow(0, sharded_numel * shard_rank, sharded_numel)
294
+ ).clone() # clone to be able to free all-gather output
295
+ sharded_post_forward_tensor = torch.as_strided(
296
+ self._sharded_post_forward_param_data,
297
+ size=self.sharded_post_forward_size,
298
+ stride=self.contiguous_sharded_post_forward_stride,
299
+ storage_offset=0,
300
+ )
301
+ self._sharded_post_forward_param = nn.Parameter(
302
+ self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor)
303
+ )
304
+ self._setattr_on_modules(self._sharded_post_forward_param)
305
+ self.free_all_gather_output()
306
+ self.sharded_state = ShardedState.SHARDED_POST_FORWARD
307
+
308
+ def to_unsharded(self) -> None:
309
+ # Assume that the data has been allocated and all-gathered
310
+ set_requires_grad_if_needed(self.sharded_param, self._unsharded_param)
311
+ self._setattr_on_modules(self._unsharded_param)
312
+ if self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
313
+ # The data is allocated in the default stream via the post-forward
314
+ # reshard and must be kept alive for the next all-gather copy-in.
315
+ # Since we call this method after the copy-out, the data's lifetime
316
+ # is ensured without further synchronization.
317
+ self._sharded_post_forward_param = None
318
+ self._sharded_post_forward_param_data = None # free
319
+ self.sharded_state = ShardedState.UNSHARDED
320
+
321
+ def _setattr_on_modules(self, param: nn.Parameter) -> None:
322
+ unsafe_setattr_param(
323
+ self._module_info.module, self._module_info.param_name, param
324
+ )
325
+ for shared_module, shared_param_name in zip(
326
+ self._module_info.shared_modules, self._module_info.shared_param_names
327
+ ):
328
+ unsafe_setattr_param(shared_module, shared_param_name, param)
329
+
330
+ def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor:
331
+ """
332
+ Converts a local tensor representing either the sharded parameter or
333
+ sharded gradient to DTensor.
334
+ """
335
+ if tensor.shape != self.sharded_size:
336
+ _raise_assert_with_print(
337
+ f"Expects size {self.sharded_size} but got {tensor.shape}"
338
+ )
339
+ return _from_local_no_grad(
340
+ tensor,
341
+ self._global_mesh,
342
+ self._global_placements,
343
+ self._global_size,
344
+ self._global_stride,
345
+ )
346
+
347
+ def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor:
348
+ if tensor.shape != self.sharded_post_forward_size:
349
+ _raise_assert_with_print(
350
+ f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}"
351
+ )
352
+ assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo)
353
+ # TODO: Prefer this DTensor to be read-only and generalize the
354
+ # placement once we support TP.
355
+ return _from_local_no_grad(
356
+ tensor,
357
+ self.post_forward_mesh_info.mesh,
358
+ (Replicate(), Shard(0)),
359
+ self._global_size,
360
+ self._global_stride,
361
+ )
362
+
363
+ def alloc_all_gather_output(self) -> None:
364
+ unsafe_alloc_storage(self.all_gather_output)
365
+
366
+ def free_all_gather_output(self) -> None:
367
+ unsafe_free_storage(self.all_gather_output)
368
+
369
+ @property
370
+ def all_gather_input(self) -> torch.Tensor: # 1D
371
+ self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD)
372
+ if self.sharded_state == ShardedState.SHARDED:
373
+ return _to_dtype_if_needed(self._sharded_param_data, self.param_dtype)
374
+ elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
375
+ return _to_dtype_if_needed(
376
+ cast(torch.Tensor, self._sharded_post_forward_param_data),
377
+ self.param_dtype,
378
+ )
379
+ return torch.empty(0) # mypy
380
+
381
+ @property
382
+ def unsharded_param(self) -> nn.Parameter: # ND
383
+ self._assert_in_states(ShardedState.UNSHARDED)
384
+ return self._unsharded_param
385
+
386
+ @property
387
+ def unsharded_grad_data(self) -> torch.Tensor:
388
+ grad = self.unsharded_param.grad
389
+ assert grad is not None, "Expects unsharded_param.grad to not be None"
390
+ return self._get_grad_inner_tensor(grad)
391
+
392
+ def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor:
393
+ if self.is_dtensor:
394
+ if isinstance(grad, AsyncCollectiveTensor):
395
+ grad = grad.wait()
396
+ grad = cast(DTensor, grad)._local_tensor
397
+ return grad
398
+
399
+ def _assert_in_states(self, *states: ShardedState) -> None:
400
+ if self.sharded_state not in states:
401
+ _raise_assert_with_print(
402
+ f"Expects to be in one of {states}, not {self.sharded_state}"
403
+ )
404
+
405
+
406
+ # NOTE: Unsafe here refers to not checking whether the storage is already
407
+ # allocated or freed, respectively. We should be safe to use them since we
408
+ # explicitly manage the state transition.
409
+ def unsafe_alloc_storage(tensor: torch.Tensor) -> None:
410
+ # Skip the already-allocated check and assume that `tensor` is the base
411
+ # tensor to save CPU overhead
412
+ tensor.untyped_storage().resize_(tensor.numel() * tensor.itemsize)
413
+
414
+
415
+ def unsafe_free_storage(tensor: torch.Tensor) -> None:
416
+ # Skip the already-freed check to save CPU overhead
417
+ tensor.untyped_storage().resize_(0)
418
+
419
+
420
+ # NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial
421
+ # CPU overhead, if the module did not override it. For FSDP, we know we do not
422
+ # need those checks when transitioning between sharded/unsharded parameters.
423
+ def unsafe_setattr_param(
424
+ module: nn.Module, param_name: str, param: nn.Parameter
425
+ ) -> None:
426
+ if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__:
427
+ module._parameters[param_name] = param
428
+ else: # slow path
429
+ setattr(module, param_name, param)
430
+
431
+
432
+ def set_requires_grad_if_needed(
433
+ src_tensor: torch.Tensor, dst_tensor: torch.Tensor
434
+ ) -> None:
435
+ # Only call `requires_grad_` if needed to avoid the Python <> C++ context
436
+ # switch overhead
437
+ if src_tensor.requires_grad != dst_tensor.requires_grad:
438
+ dst_tensor.requires_grad_(src_tensor.requires_grad)
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.nn as nn
8
+
9
+ from torch.autograd.graph import Node
10
+ from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates
11
+ from torch.utils._pytree import tree_flatten, tree_unflatten
12
+ from torch.utils.hooks import RemovableHandle
13
+ from ._fsdp_api import MixedPrecisionPolicy
14
+ from ._fsdp_collectives import (
15
+ AllGatherResult,
16
+ foreach_all_gather,
17
+ foreach_all_gather_copy_out,
18
+ foreach_reduce_scatter,
19
+ )
20
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo, TrainingState
21
+ from ._fsdp_param import FSDPParam, ParamModuleInfo, ShardedState
22
+
23
+ _ModuleToHandleDict = Dict[nn.Module, RemovableHandle] # for state dict
24
+
25
+
26
+ """
27
+ [Note: Overlapping all-gather copy-in and all-gather]
28
+ For implicit forward prefetching, we want to overlap the next copy-in with the
29
+ current all-gather. We do so using a separate copy-in stream. However, since
30
+ we have the all-gather input as a view into the output, we must make sure to
31
+ copy into different memory from the current all-gather's output. Thus, we keep
32
+ a reference to the current all-gather's output and have the next FSDP parameter
33
+ group free it after its copy-in. Finally, we have the last FSDP state flush the
34
+ reference to avoid holding onto memory after forward.
35
+ """
36
+
37
+
38
+ class FSDPCommContext:
39
+ """This has the communication state shared across FSDP states/parameter groups."""
40
+
41
+ def init(self):
42
+ # Setting the all-gather/reduce-scatter streams to be higher priority
43
+ # can help avoid some issues where their copies in/out are delayed and
44
+ # block computation
45
+ high_priority = -1
46
+ # All-gather state and copy-in stream allow overlapping the next
47
+ # copy-in with the current all-gather in forward; copy-in overlaps with
48
+ # reduce-scatter in backward without the separate copy-in stream
49
+ self.all_gather_copy_in_stream = torch.cuda.Stream(priority=high_priority)
50
+ self.all_gather_state: Optional[AllGatherState] = None
51
+ # All-gather stream allows overlapping next all-gather with current
52
+ # forward compute
53
+ self.all_gather_stream = torch.cuda.Stream(priority=high_priority)
54
+ # Reduce-scatter stream gives separate execution "thread" for post-
55
+ # backward logic like pre/post-gradient division and reduce-scatter
56
+ self.reduce_scatter_stream = torch.cuda.Stream(priority=high_priority)
57
+ # Post-forward order for explicit backward prefetching
58
+ self.post_forward_order: List[FSDPParamGroup] = [] # will cause ref cycles
59
+
60
+ def get_all_gather_streams(
61
+ self, training_state: TrainingState
62
+ ) -> Tuple[torch.cuda.Stream, torch.cuda.Stream]:
63
+ if training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD):
64
+ # Use separate streams for implicit prefetching
65
+ return self.all_gather_copy_in_stream, self.all_gather_stream
66
+ current_stream = torch.cuda.current_stream()
67
+ return current_stream, current_stream
68
+
69
+
70
+ # See [Note: Overlapping all-gather copy-in and all-gather]
71
+ class AllGatherState(NamedTuple):
72
+ all_gather_result: AllGatherResult
73
+ event: torch.cuda.Event # all-gather copy-out
74
+
75
+
76
+ class FSDPParamGroup:
77
+ """This class represents a parameter group to communicate together."""
78
+
79
+ _orig_dtype: torch.dtype
80
+ _reduce_dtype: Optional[torch.dtype]
81
+
82
+ def __init__(
83
+ self,
84
+ params: List[nn.Parameter],
85
+ module: nn.Module,
86
+ mesh_info: FSDPMeshInfo,
87
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
88
+ device: torch.device,
89
+ mp_policy: MixedPrecisionPolicy,
90
+ ):
91
+ self.module = module # permit ref cycle because 1:1 lifetime
92
+ param_module_infos = _get_param_module_infos(params, module)
93
+ self.fsdp_params = [
94
+ FSDPParam(
95
+ param, module_info, mesh_info, post_forward_mesh_info, device, mp_policy
96
+ )
97
+ for param, module_info in zip(params, param_module_infos)
98
+ ]
99
+ self.mesh_info = mesh_info
100
+ self.post_forward_mesh_info = post_forward_mesh_info
101
+ self.device = device
102
+ self.mp_policy = mp_policy
103
+ self._training_state = TrainingState.IDLE
104
+ # Group's sharded state always matches its parameters' sharded states
105
+ self._sharded_state = ShardedState.SHARDED
106
+ self._module_fqn: Optional[str] = None # prefixed from root module
107
+
108
+ # - Hook state
109
+ self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {}
110
+ self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {}
111
+
112
+ # - Communication and communication/computation overlap
113
+ self.comm_ctx = FSDPCommContext()
114
+ # Group's indices in the shared post-forward order
115
+ self._post_forward_indices: List[int] = []
116
+ # Used to avoid mistargeted backward prefetches when the module is used
117
+ # in forward but not in backward: for each forward, we record a tuple
118
+ # of the output's grad fns and later query the autograd engine whether
119
+ # any grad fn will execute in the current backward to know to prefetch.
120
+ self.all_forward_output_grad_fns: Set[Tuple[Node, ...]] = set()
121
+ # Whether to reduce-scatter or all-reduce gradients, respectively
122
+ # (can be set to false to save communication during gradient
123
+ # accumulation); all-reducing without reduce-scatter is disallowed
124
+ self.reduce_scatter_grads: bool = True
125
+ self.all_reduce_grads: bool = True
126
+
127
+ # - CUDA events for stream synchronization
128
+ # Holds the all-gather output buffer, sync objects, and metadata
129
+ self._all_gather_result: Optional[AllGatherResult] = None
130
+ # Holds the reduce-scatter view-out CUDA event that marks the end of
131
+ # the group's post-backward (e.g. reduce-scatter and div), which should
132
+ # be waited on at the end of backward
133
+ self._reduce_scatter_view_out_event: Optional[torch.cuda.Event] = None
134
+ # Holds the reshard-after-forward CUDA event when resharding to a
135
+ # different world size, which should be waited on in the next unshard
136
+ self._reshard_after_forward_event: Optional[torch.cuda.Event] = None
137
+
138
+ # Initialization #
139
+ def _init_mp_dtypes(self) -> None:
140
+ for fsdp_param in self.fsdp_params:
141
+ fsdp_param.init_dtype_attrs(self.mp_policy)
142
+ orig_dtypes = {fsdp_param.orig_dtype for fsdp_param in self.fsdp_params}
143
+ if len(orig_dtypes) != 1:
144
+ # This can be relaxed if we copy-out for the reduce-scatter
145
+ raise AssertionError(
146
+ f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
147
+ )
148
+ self._orig_dtype = next(iter(orig_dtypes))
149
+ reduce_dtypes = {fsdp_param.reduce_dtype for fsdp_param in self.fsdp_params}
150
+ if len(reduce_dtypes) != 1:
151
+ # This can be relaxed if we issue one reduce-scatter per reduce
152
+ # dtype (but we would need a way for users to specify multiple
153
+ # reduce dtypes)
154
+ raise AssertionError(
155
+ f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
156
+ )
157
+ self._reduce_dtype = next(iter(reduce_dtypes))
158
+
159
+ def _init_grad_divide_factors(self):
160
+ data_parallel_world_size = 1
161
+ data_parallel_world_size *= self.mesh_info.shard_mesh_size
162
+ if isinstance(self.mesh_info, HSDPMeshInfo):
163
+ data_parallel_world_size *= self.mesh_info.replicate_mesh_size
164
+ if self._reduce_dtype == torch.float32:
165
+ # Use NCCL's AVG op to divide after reduction since it is more
166
+ # performant and fp32 has sufficient precision
167
+ self._grad_divide_factors: Optional[Tuple[float, float]] = None
168
+ return
169
+ # For N data parallel workers, each worker computes g_i, and they
170
+ # collectively reduce (g_1 + ... + g_N) / N. To avoid overflow and
171
+ # underflow, we divide by ~sqrt(N) before and after the reduction.
172
+ factor: int = 1
173
+ while (
174
+ data_parallel_world_size % factor == 0
175
+ and data_parallel_world_size / factor > factor
176
+ ):
177
+ factor *= 2
178
+ factor = float(factor)
179
+ self._grad_divide_factors = (factor, data_parallel_world_size / factor)
180
+
181
+ def lazy_init(self):
182
+ param_names_on_meta = [
183
+ fsdp_param._param_fqn
184
+ for fsdp_param in self.fsdp_params
185
+ if fsdp_param.sharded_param.device.type == "meta"
186
+ ]
187
+ if param_names_on_meta:
188
+ raise RuntimeError(
189
+ "FSDP parameters should be materialized from meta device before training, "
190
+ f"but the following were still on meta device: {param_names_on_meta}\n"
191
+ "For example, call module.to_empty(device) to materialize to device and "
192
+ "call module.reset_parameters() on each module to initialize values."
193
+ )
194
+ # Initialize mixed precision attributes lazily in case the user changes
195
+ # the parameter dtypes after construction time but before forward
196
+ self._init_mp_dtypes()
197
+ self._init_grad_divide_factors()
198
+ self._register_state_dict_hooks()
199
+
200
+ # Runtime #
201
+ def unshard(self, async_op: bool = False):
202
+ if self._all_gather_result is not None: # already called, pending wait
203
+ return
204
+ if self.is_unsharded:
205
+ return # no-op
206
+ if self._reshard_after_forward_event is not None:
207
+ # Resharded parameter data is allocated in the default stream and
208
+ # used in the all-gather streams
209
+ self._wait_all_gather_streams_on_event(self._reshard_after_forward_event)
210
+ self._reshard_after_forward_event = None
211
+ self._all_gather_result = foreach_all_gather(
212
+ self.fsdp_params,
213
+ self._all_gather_process_group,
214
+ async_op,
215
+ *self.comm_ctx.get_all_gather_streams(self._training_state),
216
+ self.device,
217
+ )
218
+
219
+ def wait_for_unshard(self):
220
+ """
221
+ 1. In forward with implict prefetching, to overlap the current copy-out
222
+ with the next all-gather, we save a reference to the current all-gather
223
+ result to free after the next copy-out.
224
+ 2. Otherwise (explicit prefetching or in backward), we free the
225
+ all-gather result immediately after the current copy-out since we can
226
+ already overlap the current copy-out with the previous reduce-scatter.
227
+ """
228
+ if not self._all_gather_result:
229
+ return # no preceding unshard
230
+ if self._training_state == TrainingState.FORWARD: # implicit prefetch
231
+ if prev_all_gather_state := self.comm_ctx.all_gather_state:
232
+ self._wait_all_gather_streams_on_event(prev_all_gather_state.event)
233
+ self.comm_ctx.all_gather_state = None # free the all-gather result
234
+ foreach_all_gather_copy_out(
235
+ self._all_gather_result, self.fsdp_params, self._all_gather_process_group
236
+ )
237
+ for fsdp_param in self.fsdp_params:
238
+ fsdp_param.init_unsharded_param() # no-op after 1st call
239
+ self._to_unsharded()
240
+ all_gather_copy_out_event = torch.cuda.Event()
241
+ all_gather_copy_out_event.record()
242
+ if self._training_state == TrainingState.FORWARD:
243
+ self.comm_ctx.all_gather_state = AllGatherState(
244
+ self._all_gather_result, all_gather_copy_out_event
245
+ )
246
+ else:
247
+ self._wait_all_gather_streams_on_event(all_gather_copy_out_event)
248
+ self._all_gather_result = None # free unless saved in `all_gather_state`
249
+
250
+ def _wait_all_gather_streams_on_event(self, event: torch.cuda.Event):
251
+ self.comm_ctx.all_gather_copy_in_stream.wait_event(event)
252
+ self.comm_ctx.all_gather_stream.wait_event(event)
253
+
254
+ def reshard(self):
255
+ if self._training_state == TrainingState.FORWARD:
256
+ if not self._reshard_after_forward:
257
+ return
258
+ if self._use_post_forward_mesh:
259
+ self._to_sharded_post_forward()
260
+ self._reshard_after_forward_event = torch.cuda.Event()
261
+ self._reshard_after_forward_event.record()
262
+ return
263
+ self._to_sharded()
264
+
265
+ def pre_forward(
266
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
267
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
268
+ with torch.profiler.record_function("FSDP::pre_forward"):
269
+ self._training_state = TrainingState.FORWARD
270
+ self.unshard()
271
+ self.wait_for_unshard()
272
+ args, kwargs = self._register_post_backward_hook(args, kwargs)
273
+ return args, kwargs
274
+
275
+ def post_forward(self, module: nn.Module, input: Any, output: Any):
276
+ with torch.profiler.record_function("FSDP::post_forward"):
277
+ self.reshard()
278
+ self._record_post_forward()
279
+ self._training_state = TrainingState.IDLE
280
+ return output
281
+
282
+ def _record_post_forward(self) -> None:
283
+ # Since a group has one pre-backward unshard for each forward call
284
+ # before the backward, we record each usage (with multiplicity)
285
+ post_forward_index = len(self.comm_ctx.post_forward_order)
286
+ self.comm_ctx.post_forward_order.append(self)
287
+ self._post_forward_indices.append(post_forward_index)
288
+
289
+ def pre_backward(self, forward_grad_fns: Tuple[Any, ...], *unused: Any):
290
+ with torch.profiler.record_function("FSDP::pre_backward"):
291
+ self._training_state = TrainingState.PRE_BACKWARD
292
+ self.unshard() # no-op if prefetched
293
+ self.wait_for_unshard()
294
+ # Can be already removed if running multiple `backward`s
295
+ self.all_forward_output_grad_fns.discard(forward_grad_fns)
296
+ self._prefetch_unshard()
297
+
298
+ def post_backward(self, *unused: Any):
299
+ self._training_state = TrainingState.POST_BACKWARD
300
+ with torch.profiler.record_function("FSDP::post_backward_reshard"):
301
+ if not self.reduce_scatter_grads:
302
+ self.reshard()
303
+ return
304
+ # Save the autograd-computed gradients before resharding to only
305
+ # access the unsharded parameters when their data is present
306
+ fsdp_params_with_grad: List[FSDPParam] = []
307
+ unsharded_grads: List[torch.Tensor] = []
308
+ for fsdp_param in self.fsdp_params:
309
+ if fsdp_param.unsharded_param.grad is not None:
310
+ fsdp_params_with_grad.append(fsdp_param)
311
+ unsharded_grads.append(fsdp_param.unsharded_grad_data)
312
+ fsdp_param.unsharded_param.grad = None
313
+ self.reshard()
314
+ if len(fsdp_params_with_grad) == 0:
315
+ return
316
+ with torch.profiler.record_function("FSDP::post_backward_reduce"):
317
+ self._reduce_scatter_view_out_event = foreach_reduce_scatter(
318
+ fsdp_params_with_grad,
319
+ unsharded_grads,
320
+ self._reduce_scatter_process_group,
321
+ self.comm_ctx.reduce_scatter_stream,
322
+ self._orig_dtype,
323
+ self._reduce_dtype,
324
+ self.device,
325
+ self._grad_divide_factors,
326
+ )
327
+
328
+ def finalize_backward(self):
329
+ if self._reduce_scatter_view_out_event is not None:
330
+ torch.cuda.current_stream().wait_event(self._reduce_scatter_view_out_event)
331
+ self._reduce_scatter_view_out_event = None
332
+ self._training_state = TrainingState.IDLE
333
+ self._post_forward_indices.clear()
334
+ self.all_forward_output_grad_fns.clear()
335
+
336
+ def _prefetch_unshard(self):
337
+ if self._training_state == TrainingState.PRE_BACKWARD:
338
+ if not self._post_forward_indices:
339
+ # Can be cleared if running multiple `backward`s
340
+ return
341
+ curr_index = self._post_forward_indices.pop()
342
+ if (target_index := curr_index - 1) < 0:
343
+ return
344
+ target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index]
345
+ if any(
346
+ torch._C._will_engine_execute_node(grad_fn) # type: ignore[attr-defined]
347
+ for grad_fns in target_fsdp_param_group.all_forward_output_grad_fns
348
+ for grad_fn in grad_fns
349
+ ):
350
+ with torch.profiler.record_function(
351
+ "FSDP::backward_prefetch"
352
+ ), target_fsdp_param_group.use_training_state(
353
+ TrainingState.PRE_BACKWARD
354
+ ):
355
+ target_fsdp_param_group.unshard()
356
+
357
+ # Utilities #
358
+ def _to_sharded(self):
359
+ if not self.is_sharded:
360
+ for fsdp_param in self.fsdp_params:
361
+ fsdp_param.to_sharded()
362
+ self._sharded_state = ShardedState.SHARDED
363
+
364
+ def _to_sharded_post_forward(self):
365
+ if not self.is_sharded_post_forward:
366
+ for fsdp_param in self.fsdp_params:
367
+ fsdp_param.to_sharded_post_forward()
368
+ self._sharded_state = ShardedState.SHARDED_POST_FORWARD
369
+
370
+ def _to_unsharded(self):
371
+ if not self.is_unsharded:
372
+ for fsdp_param in self.fsdp_params:
373
+ fsdp_param.to_unsharded()
374
+ self._sharded_state = ShardedState.UNSHARDED
375
+
376
+ @property
377
+ def is_sharded(self) -> bool:
378
+ return self._sharded_state == ShardedState.SHARDED
379
+
380
+ @property
381
+ def is_sharded_post_forward(self) -> bool:
382
+ return self._sharded_state == ShardedState.SHARDED_POST_FORWARD
383
+
384
+ @property
385
+ def is_unsharded(self) -> bool:
386
+ return self._sharded_state == ShardedState.UNSHARDED
387
+
388
+ @contextlib.contextmanager
389
+ def use_training_state(self, training_state: TrainingState):
390
+ old_training_state = self._training_state
391
+ self._training_state = training_state
392
+ try:
393
+ yield
394
+ finally:
395
+ self._training_state = old_training_state
396
+
397
+ # Hook Registration #
398
+ def _register_post_backward_hook(
399
+ self, args: Tuple[Any, ...], kwargs: Dict[str, Any]
400
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
401
+ if not torch.is_grad_enabled():
402
+ return args, kwargs
403
+ args_list, args_spec = tree_flatten(args)
404
+ kwargs_list, kwargs_spec = tree_flatten(kwargs)
405
+ args_kwargs_list = list(args_list) + list(kwargs_list)
406
+ inp_tensor_indices: List[int] = []
407
+ inp_tensors: List[torch.Tensor] = []
408
+ for i, obj in enumerate(args_kwargs_list):
409
+ if torch.is_tensor(obj) and obj.requires_grad:
410
+ inp_tensor_indices.append(i)
411
+ inp_tensors.append(obj)
412
+ if len(inp_tensors) == 0:
413
+ return args, kwargs # no tensors that require gradients
414
+ inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors)
415
+ for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors):
416
+ args_kwargs_list[inp_tensor_idx] = inp_tensor
417
+ args_list = args_kwargs_list[: len(args_list)]
418
+ kwargs_list = args_kwargs_list[len(args_list) :]
419
+ args = tree_unflatten(args_list, args_spec)
420
+ kwargs = tree_unflatten(kwargs_list, kwargs_spec)
421
+ return args, kwargs
422
+
423
+ def _register_state_dict_hooks(self) -> None:
424
+ assert len(self._module_to_pre_save_state_dict_hook_handle) == 0
425
+ assert len(self._module_to_pre_load_state_dict_hook_handle) == 0
426
+ modules_with_fsdp_params: Set[nn.Module] = {
427
+ fsdp_param._module_info.module for fsdp_param in self.fsdp_params
428
+ }
429
+
430
+ def to_sharded_hook(*args: Any, **kwargs: Any) -> None:
431
+ self._to_sharded()
432
+
433
+ for module in modules_with_fsdp_params:
434
+ self._module_to_pre_save_state_dict_hook_handle[
435
+ module
436
+ ] = module.register_state_dict_pre_hook(to_sharded_hook)
437
+ self._module_to_pre_load_state_dict_hook_handle[
438
+ module
439
+ ] = module._register_load_state_dict_pre_hook(to_sharded_hook)
440
+
441
+ # Properties #
442
+ @property
443
+ def _reshard_after_forward(self) -> bool:
444
+ return self.post_forward_mesh_info is not None
445
+
446
+ @property
447
+ def _use_post_forward_mesh(self) -> bool:
448
+ return (
449
+ self._reshard_after_forward
450
+ and self.mesh_info != self.post_forward_mesh_info
451
+ )
452
+
453
+ @property
454
+ def _all_gather_process_group(self) -> dist.ProcessGroup:
455
+ mesh_info = (
456
+ cast(FSDPMeshInfo, self.post_forward_mesh_info)
457
+ if self.is_sharded_post_forward
458
+ else self.mesh_info
459
+ )
460
+ assert isinstance(mesh_info, FSDPMeshInfo)
461
+ return mesh_info.shard_process_group
462
+
463
+ @property
464
+ def _reduce_scatter_process_group(self) -> dist.ProcessGroup:
465
+ mesh_info = self.mesh_info
466
+ assert isinstance(mesh_info, FSDPMeshInfo)
467
+ return mesh_info.shard_process_group
468
+
469
+
470
+ def _get_param_module_infos(
471
+ params: List[nn.Parameter], module: nn.Module
472
+ ) -> List[ParamModuleInfo]:
473
+ """
474
+ Shared parameter: lin1.weight = lin2.weight
475
+ Shared module: mlp.lin1 = mlp.lin2
476
+ We do not remove duplicates when traversing both modules and parameters to
477
+ find shared modules' parameters and shared parameters within a module.
478
+ """
479
+ params_set = set(params)
480
+ param_to_module_info: Dict[nn.Parameter, ParamModuleInfo] = {}
481
+ for _, submodule in module.named_modules(remove_duplicate=False):
482
+ for param_name, param in _named_parameters_with_duplicates(
483
+ submodule, recurse=False
484
+ ):
485
+ if param in params_set:
486
+ if param not in param_to_module_info:
487
+ param_to_module_info[param] = ParamModuleInfo(submodule, param_name)
488
+ else:
489
+ param_to_module_info[param].shared_modules.append(submodule)
490
+ param_to_module_info[param].shared_param_names.append(param_name)
491
+ if len(param_to_module_info) != len(params):
492
+ raise AssertionError(f"Some parameters are not in the module tree of {module}")
493
+ return [param_to_module_info[param] for param in params]
494
+
495
+
496
+ class RegisterPostBackwardFunction(torch.autograd.Function):
497
+ @staticmethod
498
+ def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor):
499
+ # All tensors in `inputs` should require gradient
500
+ ctx.param_group = param_group
501
+ return inputs
502
+
503
+ @staticmethod
504
+ def backward(ctx, *grads: torch.Tensor):
505
+ ctx.param_group.post_backward()
506
+ return (None,) + grads
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.autograd import Variable
8
+ from torch.autograd.graph import Node, register_multi_grad_hook
9
+ from torch.distributed._composable_state import (
10
+ _get_module_state,
11
+ _insert_module_state,
12
+ _State,
13
+ )
14
+ from torch.distributed.utils import _to_kwargs
15
+ from torch.utils._pytree import tree_flatten, tree_map
16
+ from torch.utils.hooks import RemovableHandle
17
+ from ._fsdp_api import MixedPrecisionPolicy
18
+ from ._fsdp_common import _cast_fp_tensor, TrainingState
19
+ from ._fsdp_param import FSDPParam
20
+ from ._fsdp_param_group import FSDPCommContext, FSDPParamGroup
21
+
22
+
23
+ class FSDPStateContext:
24
+ """This has state shared across FSDP states."""
25
+
26
+ def __init__(self):
27
+ # All FSDP states in the root state's module tree
28
+ self.all_states: List[FSDPState] = []
29
+ # Iteration's forward root runs the once-per-forward logic; this root
30
+ # may not be the overall root set by lazy initialization in cases where
31
+ # only a submodule runs forward (e.g. encoder-only for eval)
32
+ self.iter_forward_root: Optional[FSDPState] = None
33
+ # Final callback should only be queued once per backward
34
+ self.post_backward_final_callback_queued: bool = False
35
+ # Whether to finalize backward in this backward's final callback
36
+ self.is_last_backward: bool = True
37
+
38
+
39
+ class FSDPState(_State):
40
+ def __init__(self):
41
+ super().__init__()
42
+ self._fsdp_param_group: Optional[FSDPParamGroup] = None
43
+ self._is_root: Optional[bool] = None # root set during lazy init
44
+ self._state_ctx = FSDPStateContext()
45
+ self._comm_ctx = FSDPCommContext()
46
+ self._training_state: TrainingState = TrainingState.IDLE
47
+ self._pre_backward_hook_handles: List[RemovableHandle] = []
48
+
49
+ # Define a separate init since `__init__` is called in the contract
50
+ def init(
51
+ self, module: nn.Module, device: torch.device, mp_policy: MixedPrecisionPolicy
52
+ ) -> None:
53
+ _insert_module_state(module, self)
54
+ self._module = module
55
+ self._device = device
56
+ self._mp_policy = mp_policy
57
+ self._pre_forward_hook_handle = module.register_forward_pre_hook(
58
+ self._pre_forward, prepend=True, with_kwargs=True
59
+ )
60
+ self._post_forward_hook_handle = module.register_forward_hook(
61
+ self._post_forward, prepend=False
62
+ )
63
+
64
+ def _root_pre_forward(
65
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
66
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
67
+ self._lazy_init()
68
+ if self._state_ctx.iter_forward_root is not None:
69
+ return args, kwargs
70
+ self._state_ctx.iter_forward_root = self
71
+ with torch.profiler.record_function("FSDP::root_pre_forward"):
72
+ # Wait for optimizer before implicitly prefetched all-gathers
73
+ current_stream = torch.cuda.current_stream()
74
+ self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream)
75
+ self._comm_ctx.all_gather_stream.wait_stream(current_stream)
76
+ if self._device.type == "cuda":
77
+ with torch.profiler.record_function("FSDP::inputs_to_device"):
78
+ args_tuple, kwargs_tuple = _to_kwargs(
79
+ args, kwargs, self._device, False
80
+ ) # same as DDP
81
+ args, kwargs = args_tuple[0], kwargs_tuple[0]
82
+ return args, kwargs
83
+
84
+ def _lazy_init(self) -> None:
85
+ """
86
+ Lazy initialization represents when all modules' parallelisms have
87
+ finalized (e.g. FSDP has been applied to all desired modules). This
88
+ means that we can determine which state is the root, and we do so by
89
+ the 1st state to run forward.
90
+ """
91
+ if self._is_root is not None:
92
+ return # no-op: already initialized
93
+ self._is_root = True
94
+ root_module = self._module
95
+ for module_name, module in root_module.named_modules():
96
+ if (state := _get_module_fsdp_state(module)) is None:
97
+ continue
98
+ if module is not root_module:
99
+ if state._is_root is not None:
100
+ raise RuntimeError(
101
+ "FSDP state has already been lazily initialized for "
102
+ f"{module_name}\nFSDP requires running forward through "
103
+ "the root module first"
104
+ )
105
+ state._is_root = False
106
+ self._state_ctx.all_states.append(state)
107
+ if self._fsdp_param_group:
108
+ # For the root, do not reshard after forward since for training,
109
+ # the parameters would be freed and all-gathered immediately
110
+ self._fsdp_param_group.post_forward_mesh_info = None
111
+ self._init_fqns()
112
+ self._init_shared_state()
113
+ # Run parameter group lazy inits after initializing FQNs for improved
114
+ # error messages
115
+ for state in self._state_ctx.all_states:
116
+ if state._fsdp_param_group:
117
+ state._fsdp_param_group.lazy_init()
118
+
119
+ def _init_shared_state(self) -> None:
120
+ self._comm_ctx.init()
121
+ for state in self._state_ctx.all_states:
122
+ state._state_ctx = self._state_ctx
123
+ state._comm_ctx = self._comm_ctx
124
+ if fsdp_param_group := state._fsdp_param_group:
125
+ fsdp_param_group.comm_ctx = self._comm_ctx
126
+
127
+ def _init_fqns(self) -> None:
128
+ """Sets module and parameter FQN attributes for debugging."""
129
+ assert self._is_root
130
+ root_module = self._module
131
+ param_to_fsdp_param: Dict[nn.Parameter, FSDPParam] = {}
132
+ module_to_fsdp_param_group: Dict[nn.Module, FSDPParamGroup] = {}
133
+ for state in self._state_ctx.all_states:
134
+ if fsdp_param_group := state._fsdp_param_group:
135
+ for fsdp_param in fsdp_param_group.fsdp_params:
136
+ param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param
137
+ module_to_fsdp_param_group[fsdp_param_group.module] = fsdp_param_group
138
+ for param_name, param in root_module.named_parameters():
139
+ if param in param_to_fsdp_param:
140
+ param_to_fsdp_param[param]._param_fqn = param_name
141
+ for module_name, module in root_module.named_modules():
142
+ if module in module_to_fsdp_param_group:
143
+ module_to_fsdp_param_group[module]._module_fqn = module_name
144
+
145
+ def _pre_forward(
146
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
147
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
148
+ # When composing with module-hook-based activation checkpointing, the
149
+ # the pre-backward hook is responsible for the unshard
150
+ if self._training_state == TrainingState.PRE_BACKWARD:
151
+ return args, kwargs
152
+ self._training_state = TrainingState.FORWARD
153
+ args, kwargs = self._root_pre_forward(module, args, kwargs)
154
+ if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype:
155
+ with torch.profiler.record_function("FSDP::cast_forward_inputs"):
156
+ cast_fn = functools.partial(
157
+ _cast_fp_tensor, self._mp_policy.param_dtype
158
+ )
159
+ args, kwargs = tree_map(cast_fn, args), tree_map(cast_fn, kwargs)
160
+ if self._fsdp_param_group:
161
+ args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs)
162
+ return args, kwargs
163
+
164
+ def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any:
165
+ # When composing with module-hook-based activation checkpointing, the
166
+ # post-backward hook is responsible for the reshard
167
+ if self._training_state == TrainingState.PRE_BACKWARD:
168
+ return output
169
+ if self._fsdp_param_group:
170
+ output = self._fsdp_param_group.post_forward(module, input, output)
171
+ output = self._register_pre_backward_hook(output)
172
+ self._training_state = TrainingState.IDLE
173
+ if self._state_ctx.iter_forward_root is self:
174
+ if all_gather_state := self._comm_ctx.all_gather_state:
175
+ # Free the last all-gather result if needed; refer to
176
+ # [Note: Overlapping all-gather copy-in and all-gather]
177
+ self._comm_ctx.all_gather_copy_in_stream.wait_event(
178
+ all_gather_state.event
179
+ )
180
+ self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event)
181
+ self._comm_ctx.all_gather_state = None # free the all-gather result
182
+ self._state_ctx.iter_forward_root = None
183
+ if self._mp_policy.output_dtype is not None:
184
+ with torch.profiler.record_function("FSDP::cast_forward_outputs"):
185
+ output = tree_map(
186
+ functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype),
187
+ output,
188
+ )
189
+ return output
190
+
191
+ def _pre_backward(self, forward_grad_fns: Tuple[Node, ...], *unused: Any) -> None:
192
+ self._training_state = TrainingState.PRE_BACKWARD
193
+ self._register_root_post_backward_final_callback()
194
+ if self._fsdp_param_group:
195
+ self._fsdp_param_group.pre_backward(forward_grad_fns, *unused)
196
+
197
+ def _root_post_backward_final_callback(self) -> None:
198
+ with torch.profiler.record_function("FSDP::root_post_backward_callback"):
199
+ for state in self._state_ctx.all_states:
200
+ if state._fsdp_param_group and state._fsdp_param_group.is_unsharded:
201
+ # Run post-backward in case forward inputs did not require
202
+ # gradient so the autograd backward did not run
203
+ state._fsdp_param_group.post_backward()
204
+ if self._state_ctx.is_last_backward:
205
+ state._finalize_backward()
206
+ if self._state_ctx.is_last_backward:
207
+ self._comm_ctx.post_forward_order.clear()
208
+ self._state_ctx.post_backward_final_callback_queued = False
209
+
210
+ def _finalize_backward(self) -> None:
211
+ self._training_state = TrainingState.IDLE
212
+ for handle in self._pre_backward_hook_handles:
213
+ handle.remove()
214
+ self._pre_backward_hook_handles.clear()
215
+ if self._fsdp_param_group:
216
+ self._fsdp_param_group.finalize_backward()
217
+
218
+ def _register_pre_backward_hook(self, output: Any) -> Any:
219
+ if not torch.is_grad_enabled():
220
+ return output
221
+
222
+ flat_outputs, _ = tree_flatten(output)
223
+ tensors = tuple(t for t in flat_outputs if t.requires_grad)
224
+ if tensors:
225
+ grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None)
226
+ pre_backward = functools.partial(self._pre_backward, grad_fns)
227
+ handle = register_multi_grad_hook(tensors, pre_backward, mode="any")
228
+ self._pre_backward_hook_handles.append(handle)
229
+ if self._fsdp_param_group:
230
+ self._fsdp_param_group.all_forward_output_grad_fns.add(grad_fns)
231
+ return output
232
+
233
+ def _register_root_post_backward_final_callback(self):
234
+ if self._state_ctx.post_backward_final_callback_queued:
235
+ return
236
+ self._state_ctx.post_backward_final_callback_queued = True
237
+ Variable._execution_engine.queue_callback(
238
+ self._root_post_backward_final_callback
239
+ )
240
+
241
+
242
+ def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]:
243
+ state = _get_module_state(module)
244
+ if isinstance(state, FSDPState):
245
+ return state
246
+ return None
venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, cast, Optional, Union
2
+
3
+ import typing_extensions
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from torch.distributed._composable import contract
9
+ from torch.distributed._tensor import DeviceMesh, DTensor
10
+
11
+ from ._fsdp_api import MixedPrecisionPolicy
12
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo
13
+ from ._fsdp_init import (
14
+ _get_device_from_mesh,
15
+ _get_managed_modules,
16
+ _get_managed_states,
17
+ _get_post_forward_mesh_info,
18
+ _init_default_fully_shard_mesh,
19
+ _move_states_to_device,
20
+ )
21
+ from ._fsdp_param_group import FSDPParamGroup
22
+ from ._fsdp_state import _get_module_fsdp_state, FSDPState
23
+
24
+
25
+ # The decorator adds a state object to `module` that can be accessed via
26
+ # `fully_shard.state(module)`. The state object and module are 1:1.
27
+ @contract(state_cls=FSDPState)
28
+ def fully_shard(
29
+ module: nn.Module,
30
+ *,
31
+ mesh: Optional[DeviceMesh] = None,
32
+ reshard_after_forward: Union[bool, int] = True,
33
+ mp_policy: MixedPrecisionPolicy = MixedPrecisionPolicy(),
34
+ ):
35
+ """
36
+ Shard module parameters across data parallel workers.
37
+
38
+ This function applies fully sharded data parallelism (FSDP) or a variant to
39
+ ``module``, a technique for memory savings at the cost of communication.
40
+ Parameters are sharded across ``mesh``, and in turn, so are their gradients
41
+ and optimizer states.
42
+
43
+ The sharded parameters are all-gathered to construct the unsharded
44
+ parameters for forward or backward computation. The unsharded parameters
45
+ are freed after computation to save memory. The gradients are reduced
46
+ across the mesh and divided by the mesh size for data parallelism. The
47
+ optimizer step runs on the sharded parameters.
48
+
49
+ Each call to ``fully_shard`` constructs one communication group that
50
+ includes the parameters in ``module.parameters()`` except those already
51
+ assigned to a group from a nested call. Each group's parameters and its
52
+ gradients are communicated together in one collective, respectively.
53
+ Constructing multiple groups across the model (e.g. "layer by layer")
54
+ allows for peak memory savings and communication/computation overlap.
55
+
56
+ Implementation-wise, the sharded parameters are represented as
57
+ :class:`DTensor` s, sharded on dim-0, and the unsharded parameters are
58
+ represented as :class:`Tensor` s. A module forward pre-hook all-gathers the
59
+ parameters, and a module forward hook frees them. Similar backward hooks
60
+ gather parameters and later free parameters/reduce gradients.
61
+
62
+ Args:
63
+ mesh (Optional[DeviceMesh]): This data parallel mesh defines the
64
+ sharding and device. If 1D, then parameters are fully sharded
65
+ across the 1D mesh (FSDP). If 2D, then parameters are sharded
66
+ across the 0th dim and replicated across the 1st dim (HSDP). The
67
+ mesh's device type gives the device type used for communication;
68
+ if a CUDA or CUDA-like device type, then we use the current device.
69
+ reshard_after_forward (Union[bool, int]): This controls the parameter
70
+ behavior after forward and can trade off memory and communication:
71
+ - If ``True``, then this reshards parameters after forward and
72
+ all-gathers in backward.
73
+ - If ``False``, then this keeps the unsharded parameters in memory
74
+ after forward and avoids the all-gather in backward.
75
+ - If an ``int``, then this represents the world size to reshard to
76
+ after forward. It should be a non-trivial divisor of the ``mesh``
77
+ shard dim size (i.e. excluding 1 and the dim size itself). A choice
78
+ may be the intra-node size (e.g. ``torch.cuda.device_count()``).
79
+ This allows the all-gather in backward to be over a smaller world
80
+ size at the cost of higher memory usage than setting to ``True``.
81
+ - The root FSDP state has its value specially set to ``False`` as a
82
+ heuristic since its parameters would typically be immediately
83
+ all-gathered for backward.
84
+ - After forward, the parameters registered to the module depend on
85
+ to this: The registered parameters are the sharded parameters if
86
+ ``True``; unsharded parameters if ``False``; and the paramters
87
+ resharded to the smaller mesh otherwise. To modify the parameters
88
+ between forward and backward, the registered parameters must be the
89
+ sharded parameters. For ``False`` or an ``int``, this can be done
90
+ by manually resharding via :meth:`reshard`.
91
+ mp_policy (MixedPrecisionPolicy): This controls the mixed precision
92
+ policy, which offers parameter/reduction mixed precision for this
93
+ module. See :class:`MixedPrecisionPolicy` for details.
94
+ """
95
+ if isinstance(module, (nn.ModuleList, nn.ModuleDict)):
96
+ raise ValueError(
97
+ f"fully_shard does not support containers that do not implement forward: {module}"
98
+ )
99
+ mesh = mesh or _init_default_fully_shard_mesh()
100
+ if mesh.ndim not in (1, 2):
101
+ raise ValueError(f"fully_shard expects a 1D or 2D DeviceMesh but got {mesh}")
102
+ elif mesh.ndim == 1:
103
+ mesh_info = FSDPMeshInfo(mesh, shard_mesh_dim=0)
104
+ else:
105
+ mesh_info = HSDPMeshInfo(mesh, shard_mesh_dim=1, replicate_mesh_dim=0)
106
+ device = _get_device_from_mesh(mesh)
107
+ post_forward_mesh_info = _get_post_forward_mesh_info(
108
+ reshard_after_forward, mesh_info
109
+ )
110
+
111
+ state = fully_shard.state(module)
112
+ state.init(module, device, mp_policy)
113
+
114
+ managed_modules = _get_managed_modules(module)
115
+ params, buffers = _get_managed_states(managed_modules)
116
+ _move_states_to_device(params, buffers, device, mesh_info)
117
+ if params:
118
+ state._fsdp_param_group = FSDPParamGroup(
119
+ params, module, mesh_info, post_forward_mesh_info, device, mp_policy
120
+ )
121
+
122
+ # for dynamo
123
+ for module in managed_modules:
124
+ module._is_fsdp_managed_module = True # type: ignore[assignment]
125
+ module._fsdp_use_orig_params = True # type: ignore[assignment]
126
+
127
+ # Place FSDP leftmost for highest priority in the method resolution order
128
+ cls = module.__class__
129
+ dct = {"__deepcopy__": unimplemented_deepcopy}
130
+ new_cls = type(f"FSDP{cls.__name__}", (FSDP, cls), dct)
131
+ module.__class__ = new_cls
132
+ return module
133
+
134
+
135
+ def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never:
136
+ raise AssertionError(
137
+ "FSDP does not support deepcopy. Please use state dict for serialization."
138
+ )
139
+
140
+
141
+ class FSDP:
142
+ def __new__(cls, *args, **kwargs):
143
+ """
144
+ Override ``__new__`` to remove the FSDP class and directly construct
145
+ the original class for cases like indexing into a container module.
146
+ """
147
+ # Use index 2 since 0 is the dynamically constructed `FSDP<...>` class
148
+ # and index 1 is the `FSDP` class itself
149
+ orig_cls = cls.__mro__[2]
150
+ self = orig_cls.__new__(orig_cls, *args, **kwargs)
151
+ self.__init__(*args, **kwargs)
152
+ return self
153
+
154
+ def reshard(self) -> None:
155
+ """
156
+ Reshards the module's parameters, registering the sharded parameters
157
+ to the module and freeing the unsharded parameters if needed. This
158
+ method is *not* recursive.
159
+ """
160
+ state = self._get_fsdp_state()
161
+ if fsdp_param_group := state._fsdp_param_group:
162
+ fsdp_param_group.reshard()
163
+
164
+ def set_is_last_backward(self, is_last_backward: bool) -> None:
165
+ """
166
+ Sets whether the next backward is the last one, meaning that FSDP
167
+ should wait for gradient reduction to finish and clear internal data
168
+ structures used for explicit prefetching.
169
+ """
170
+ state = self._get_fsdp_state()
171
+ state._state_ctx.is_last_backward = is_last_backward
172
+
173
+ def set_requires_gradient_sync(
174
+ self, requires_gradient_sync: bool, recurse: bool = True
175
+ ) -> None:
176
+ """
177
+ Sets if the module should sync gradients. This can be used to implement
178
+ gradient accumulation without communication. For HSDP, this controls
179
+ both reduce-scatter and all-reduce together.
180
+
181
+ Args:
182
+ requires_gradient_sync (bool): Whether to reduce gradients for the
183
+ module's parameters.
184
+ recurse (bool): Whether to set for all submodules or just the
185
+ passed-in module.
186
+ """
187
+ for module in cast(nn.Module, self).modules():
188
+ if isinstance(module, FSDP):
189
+ state = module._get_fsdp_state()
190
+ if fsdp_param_group := state._fsdp_param_group:
191
+ fsdp_param_group.reduce_scatter_grads = requires_gradient_sync
192
+ fsdp_param_group.all_reduce_grads = requires_gradient_sync
193
+
194
+ def set_requires_all_reduce(self, requires_all_reduce: bool, recurse: bool = True):
195
+ """
196
+ Sets if the module should all-reduce gradients. This can be used to
197
+ implement gradient accumulation with only reduce-scatter but not
198
+ all-reduce for HSDP.
199
+ """
200
+ for module in cast(nn.Module, self).modules():
201
+ if isinstance(module, FSDP):
202
+ state = module._get_fsdp_state()
203
+ if fsdp_param_group := state._fsdp_param_group:
204
+ fsdp_param_group.all_reduce_grads = requires_all_reduce
205
+
206
+ def _get_fsdp_state(self) -> FSDPState:
207
+ if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None:
208
+ raise AssertionError(f"No FSDP state found on {self}")
209
+ return state
210
+
211
+ def _apply(self, *args: Any, **kwargs: Any) -> Any:
212
+ # Reshard to ensure that sharded parameters are registered
213
+ self.reshard()
214
+ ret = super()._apply(*args, **kwargs) # type: ignore[misc]
215
+ state = self._get_fsdp_state()
216
+ if not (fsdp_param_group := state._fsdp_param_group):
217
+ return ret
218
+ # TODO: Remove this padding logic once DTensor pads the local tensor:
219
+ # https://github.com/pytorch/pytorch/issues/113045
220
+ with torch.no_grad():
221
+ for fsdp_param in fsdp_param_group.fsdp_params:
222
+ module_info = fsdp_param._module_info
223
+ new_param = getattr(module_info.module, module_info.param_name)
224
+ if new_param is not fsdp_param.sharded_param:
225
+ if torch.__future__.get_swap_module_params_on_conversion():
226
+ raise AssertionError(
227
+ "Expects swap_tensors to preserve object but got "
228
+ f"{new_param} instead of {fsdp_param.sharded_param}"
229
+ )
230
+ else:
231
+ raise AssertionError(
232
+ "Please set torch.__future__.set_swap_module_params_on_conversion(True) "
233
+ "to use _apply methods with FSDP"
234
+ )
235
+ local_tensor = new_param._local_tensor
236
+ padded_sharded_size = fsdp_param.padded_sharded_param_size
237
+ if local_tensor.size() != padded_sharded_size:
238
+ padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
239
+ padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
240
+ local_tensor = padded_local_tensor
241
+ fsdp_param._sharded_param_data = local_tensor.view(-1)
242
+ assert isinstance(fsdp_param.sharded_param, DTensor) # mypy
243
+ fsdp_param.sharded_param._local_tensor = local_tensor[
244
+ : fsdp_param.sharded_size[0]
245
+ ]
246
+ return ret
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .api import CheckpointException
2
+ from .default_planner import DefaultLoadPlanner, DefaultSavePlanner
3
+ from .filesystem import FileSystemReader, FileSystemWriter
4
+ from .fsspec import FsspecReader, FsspecWriter
5
+ from .metadata import (
6
+ BytesStorageMetadata,
7
+ ChunkStorageMetadata,
8
+ Metadata,
9
+ TensorStorageMetadata,
10
+ )
11
+ from .optimizer import load_sharded_optimizer_state_dict
12
+ from .planner import LoadPlan, LoadPlanner, ReadItem, SavePlan, SavePlanner, WriteItem
13
+ from .state_dict_loader import load, load_state_dict
14
+ from .state_dict_saver import async_save, save, save_state_dict
15
+ from .storage import StorageReader, StorageWriter
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc ADDED
Binary file (482 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc ADDED
Binary file (2.73 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/fsspec.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc ADDED
Binary file (8.44 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc ADDED
Binary file (29.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_save_plans.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import dataclasses
3
+ from collections import defaultdict
4
+ from typing import Dict, List, Set
5
+
6
+ from torch.distributed.checkpoint.metadata import MetadataIndex
7
+ from torch.distributed.checkpoint.planner import SavePlan, WriteItem
8
+
9
+ __all__ = ["dedup_save_plans"]
10
+
11
+
12
+ def dedup_save_plans(all_plans: List[SavePlan]) -> List[SavePlan]:
13
+ """
14
+ Removes duplicate entries from appearing on multiple SavePlans. For each duplicate across
15
+ a set of SavePlans, only the smallest SavePlan in terms of planned storage keeps the entry.
16
+ """
17
+
18
+ write_item_to_plan_indices: Dict[MetadataIndex, Set[int]] = defaultdict(set)
19
+ write_item_idx_to_write_item: Dict[MetadataIndex, WriteItem] = {}
20
+ for plan_idx, plan in enumerate(all_plans):
21
+ for write_item in plan.items:
22
+ # map each write item to its plan
23
+ write_item_to_plan_indices[write_item.index].add(plan_idx)
24
+ write_item_idx_to_write_item[write_item.index] = write_item
25
+
26
+ # put item in the plan with the smallest size and remove it from the other plan_indices
27
+ to_remove: List[Set] = [set() for _ in range(len(all_plans))]
28
+ plan_to_size = [0] * len(all_plans)
29
+ for write_item_idx, plan_indices in write_item_to_plan_indices.items():
30
+ select_plan_idx = min(plan_indices, key=lambda plan_idx: plan_to_size[plan_idx])
31
+
32
+ write_item = write_item_idx_to_write_item[write_item_idx]
33
+ # essentially ignores the storage size of anything that is not a tensor, since
34
+ # we don't know how much storage they represent
35
+ plan_to_size[select_plan_idx] += write_item.tensor_storage_size() or 1
36
+
37
+ plan_indices.remove(select_plan_idx)
38
+ for plan_idx in plan_indices:
39
+ to_remove[plan_idx].add(write_item_idx)
40
+
41
+ for plan_idx, remove_set in enumerate(to_remove):
42
+ new_items = [
43
+ write_item
44
+ for write_item in all_plans[plan_idx].items
45
+ if write_item.index not in remove_set
46
+ ]
47
+ all_plans[plan_idx] = dataclasses.replace(all_plans[plan_idx], items=new_items)
48
+
49
+ return all_plans
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_tensors.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import dataclasses
3
+ import logging
4
+ from typing import Dict, List
5
+
6
+ from torch.distributed.checkpoint.metadata import MetadataIndex
7
+ from torch.distributed.checkpoint.planner import SavePlan
8
+
9
+ __all__ = ["dedup_tensors"]
10
+
11
+
12
+ def init_logger() -> logging.Logger:
13
+ logger = logging.getLogger(__name__)
14
+ level = logging.INFO
15
+ logger.setLevel(level)
16
+ console = logging.StreamHandler()
17
+ formatter = logging.Formatter(
18
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
19
+ )
20
+ console.setFormatter(formatter)
21
+ console.setLevel(level)
22
+ logger.addHandler(console)
23
+ logger.propagate = False
24
+ return logger
25
+
26
+
27
+ logger = init_logger()
28
+
29
+
30
+ # TODO add docstring for dedup_tensors
31
+ def dedup_tensors(all_plans: List[SavePlan]) -> List[SavePlan]:
32
+ all_plans = list(all_plans)
33
+ key_to_plan: Dict[MetadataIndex, List[int]] = {}
34
+ for plan_idx, plan in enumerate(all_plans):
35
+ for write_item in plan.items:
36
+ key_to_plan.setdefault(write_item.index, []).append(plan_idx)
37
+
38
+ replicated_items = {k: v for k, v in key_to_plan.items() if len(v) > 1}
39
+
40
+ # Remove duplicates by always keeping the first entry.
41
+ # Compute the per-rank remove set.
42
+ plan_to_keys: Dict[int, List[MetadataIndex]] = {}
43
+ for key, plans in replicated_items.items():
44
+ for plan_idx in plans[1:]:
45
+ plan_to_keys.setdefault(plan_idx, []).append(key)
46
+ if len(plan_to_keys) > 0:
47
+ logger.info("Duplicate keys to remove: %s", plan_to_keys)
48
+
49
+ for plan_idx, keys in plan_to_keys.items():
50
+ key_set = set(keys)
51
+ # rewrite items and remove elements
52
+ new_items = [
53
+ write_item
54
+ for write_item in all_plans[plan_idx].items
55
+ if write_item.index not in key_set
56
+ ]
57
+ all_plans[plan_idx] = dataclasses.replace(all_plans[plan_idx], items=new_items)
58
+
59
+ return all_plans
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Mypy will not try inferring the types of any 3rd party libraries installed.
2
+ # mypy: ignore-errors
3
+
4
+ import logging
5
+
6
+ from torch.distributed.checkpoint.fsspec import ( # noqa: F401 # noqa: F401
7
+ FsspecReader,
8
+ FsspecWriter,
9
+ )
10
+
11
+ log = logging.getLogger(__name__)
12
+ log.warning(
13
+ "FSSpec Filesystem has been made public, please update your "
14
+ "import to torch.distributed.checkpoint"
15
+ )
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import Dict, Tuple
3
+
4
+ from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
5
+
6
+ from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict
7
+
8
+ """
9
+ TODO:
10
+ Need to add ability to handle tuple, OrderedDict, NamedTuple.
11
+ Update mappings from dict to a class.
12
+ Change set_element to recreate the right type for tuple, OrderedDict, and NamedTuple.
13
+ """
14
+
15
+
16
+ FLATTEN_MAPPING = Dict[str, OBJ_PATH]
17
+
18
+
19
+ # TODO: Update Docstring for nested_dict.py
20
+ def flatten_state_dict(
21
+ state_dict: STATE_DICT_TYPE,
22
+ ) -> Tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]:
23
+ """
24
+ Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary.
25
+
26
+ Use ``unflatten_state_dict`` to revert this process.
27
+ Returns:
28
+ A tuple with the flatten state_dict and a mapping from original to new state_dict.
29
+ N.B. The new keys are derived from the object paths, joined by dot.
30
+ For example: ``{ 'a': {'b':...}}`` results in the key `a.b`.
31
+ """
32
+ flattened: STATE_DICT_TYPE = {}
33
+ mappings: FLATTEN_MAPPING = {}
34
+
35
+ def flat_copy(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:
36
+ new_fqn = ".".join(map(str, path))
37
+ if new_fqn in flattened:
38
+ raise ValueError(f"duplicated flatten key {new_fqn}")
39
+ flattened[new_fqn] = value
40
+ mappings[new_fqn] = path
41
+
42
+ traverse_state_dict(state_dict, flat_copy)
43
+ return flattened, mappings
44
+
45
+
46
+ def unflatten_state_dict(
47
+ state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING
48
+ ) -> STATE_DICT_TYPE:
49
+ """Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``."""
50
+ nested: STATE_DICT_TYPE = {}
51
+ for key, value in state_dict.items():
52
+ set_element(nested, mapping[key], value)
53
+ return nested
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+
3
+ import copy
4
+
5
+ import torch.distributed as dist
6
+ from torch.distributed._shard.sharded_tensor import Shard, ShardedTensor, ShardMetadata
7
+ from torch.distributed._shard.sharded_tensor.metadata import ShardedTensorMetadata
8
+ from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
9
+ from torch.distributed.remote_device import _remote_device
10
+
11
+ from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict
12
+ from .utils import _element_wise_add, _normalize_device_info
13
+
14
+
15
+ # TODO: We need to refactor this code.
16
+ def _flatten_sharded_tensors(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
17
+ r"""
18
+ Transform ``state_dict`` by flattening all nested ShardedTensor instances found.
19
+
20
+ The resulting ShardedTensor instances are only correct regarding the local shard and
21
+ MUST not be used for any other purpose but checkpointing, as no operator will work with them.
22
+
23
+ This function should be used in conjunction with a state_dict produced by FSDP's
24
+ StateDictType.SHARDED_STATE_DICT methods.
25
+ """
26
+ new_state_dict: STATE_DICT_TYPE = {}
27
+
28
+ def rewrite_dict(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:
29
+ if not isinstance(value, ShardedTensor):
30
+ set_element(new_state_dict, path, value)
31
+ return
32
+ shards = value.local_shards()
33
+
34
+ if len(shards) == 0:
35
+ return
36
+ if len(shards) != 1:
37
+ set_element(new_state_dict, path, value)
38
+ return
39
+
40
+ outer_shard = shards[0]
41
+
42
+ inner_st = outer_shard.tensor
43
+ if not isinstance(inner_st, ShardedTensor):
44
+ set_element(new_state_dict, path, value)
45
+ return
46
+
47
+ if len(inner_st.local_shards()) != 1:
48
+ raise ValueError("Cannot handle inner tensor with more than 1 shard")
49
+ inner_shard = inner_st.local_shards()[0]
50
+
51
+ local_shards = [
52
+ Shard(
53
+ tensor=inner_shard.tensor,
54
+ metadata=ShardMetadata(
55
+ shard_offsets=_element_wise_add(
56
+ outer_shard.metadata.shard_offsets,
57
+ inner_shard.metadata.shard_offsets,
58
+ ),
59
+ shard_sizes=inner_shard.metadata.shard_sizes,
60
+ placement=f"rank:{dist.get_rank()}/{inner_shard.tensor.device}",
61
+ ),
62
+ )
63
+ ]
64
+
65
+ st_meta: ShardedTensorMetadata = copy.deepcopy(value.metadata())
66
+ other_rank = 0 if dist.get_rank() > 0 else 1
67
+ device_info = _normalize_device_info(inner_shard.tensor.device.type, 0)
68
+
69
+ # Remove the outer ST shard the inner ST covers
70
+ for i, shard_md in enumerate(st_meta.shards_metadata):
71
+ if shard_md.shard_offsets == outer_shard.metadata.shard_offsets:
72
+ st_meta.shards_metadata.pop(i)
73
+ break
74
+
75
+ # Attribute other rank for the other shards
76
+ for shard_md in st_meta.shards_metadata:
77
+ shard_md.placement = _remote_device(f"rank:{other_rank}/{device_info}")
78
+
79
+ # Add other inner shards from the inner tensor
80
+ for inner_md in inner_st.metadata().shards_metadata:
81
+ if inner_md.shard_offsets != inner_shard.metadata.shard_offsets:
82
+ st_meta.shards_metadata.append(
83
+ ShardMetadata(
84
+ shard_offsets=_element_wise_add(
85
+ outer_shard.metadata.shard_offsets,
86
+ inner_md.shard_offsets,
87
+ ),
88
+ shard_sizes=inner_md.shard_sizes,
89
+ placement=f"rank:{other_rank}/{device_info}",
90
+ )
91
+ )
92
+
93
+ # Finally add this shard
94
+ st_meta.shards_metadata.append(local_shards[0].metadata)
95
+
96
+ st = ShardedTensor._init_from_local_shards_and_global_metadata(
97
+ local_shards=local_shards,
98
+ sharded_tensor_metadata=st_meta,
99
+ )
100
+ set_element(new_state_dict, path, st)
101
+
102
+ traverse_state_dict(state_dict, rewrite_dict)
103
+ return new_state_dict
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_storage_utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Type, Union
3
+
4
+ from .filesystem import FileSystemReader, FileSystemWriter
5
+
6
+ from .storage import StorageReader, StorageWriter
7
+
8
+
9
+ def _storage_setup(
10
+ storage: Union[StorageReader, StorageWriter, None],
11
+ checkpoint_id: Union[str, os.PathLike, None],
12
+ reader: bool = False,
13
+ ) -> Union[None, StorageReader, StorageWriter]:
14
+ if storage:
15
+ if checkpoint_id is not None:
16
+ storage.reset(checkpoint_id)
17
+ return storage
18
+
19
+ if not checkpoint_id:
20
+ raise RuntimeError(
21
+ "`checkpoint_id` must be specificed if "
22
+ "storage_reader/storage_writer is None."
23
+ )
24
+
25
+ targets: List[Type[Union[StorageReader, StorageWriter]]] = []
26
+ if reader:
27
+ targets = [
28
+ FileSystemReader,
29
+ ]
30
+ else:
31
+ targets = [
32
+ FileSystemWriter,
33
+ ]
34
+ try:
35
+ from .fsspec import FsspecReader, FsspecWriter
36
+
37
+ targets.append(FsspecReader if reader else FsspecWriter)
38
+ except Exception:
39
+ pass
40
+
41
+ for target in targets:
42
+ if target.validate_checkpoint_id(checkpoint_id):
43
+ storage = target(checkpoint_id) # type: ignore[call-arg]
44
+ storage.reset(checkpoint_id)
45
+ return storage
46
+
47
+ raise RuntimeError(
48
+ "Cannot detect which StorageReader or StorageWriter to use. "
49
+ "Please specify the storage_reader/storage_writer."
50
+ )
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_traverse.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import (
3
+ Callable,
4
+ cast,
5
+ Collection,
6
+ List,
7
+ Mapping,
8
+ MutableMapping,
9
+ Optional,
10
+ Tuple,
11
+ TypeVar,
12
+ Union,
13
+ )
14
+
15
+ import torch
16
+ from torch.distributed._shard.sharded_tensor.api import ShardedTensor
17
+ from torch.distributed._tensor import DTensor
18
+ from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
19
+
20
+ PATH_ITEM = Union[str, int]
21
+ OBJ_PATH = Tuple[PATH_ITEM, ...]
22
+ T = TypeVar("T")
23
+
24
+ STATE_DICT_ITEM = object
25
+ CONTAINER_TYPE = MutableMapping[PATH_ITEM, STATE_DICT_ITEM]
26
+
27
+ __all__ = ["traverse_state_dict", "set_element", "get_element", "print_tensor"]
28
+
29
+
30
+ def _keep_visiting_tensors(value: STATE_DICT_ITEM) -> bool:
31
+ return isinstance(value, torch.Tensor)
32
+
33
+
34
+ # TODO: update docstring for traverse.py
35
+ def traverse_state_dict(
36
+ state_dict: STATE_DICT_TYPE,
37
+ visitor: Callable[[OBJ_PATH, STATE_DICT_ITEM], None],
38
+ keep_traversing: Callable[[STATE_DICT_ITEM], bool] = _keep_visiting_tensors,
39
+ ) -> None:
40
+ """
41
+ Invoke ``visitor`` for each value recursively in ``state_dict``.
42
+
43
+ Traversal is short-circuited when if finds a collection for which ``keep_visiting_tensors`` evaluates
44
+ to false for all elements.
45
+ By default, all collections with at least one ``torch.Tensor`` element are traversed.
46
+ Visitor takes a path argument that is a tuple of the keys used to reach it.
47
+ """
48
+
49
+ # a value is terminal if it has no other containers values inside it
50
+ def _is_terminal(value: STATE_DICT_ITEM) -> bool:
51
+ values: Collection[STATE_DICT_ITEM]
52
+ if isinstance(value, Mapping):
53
+ values = value.values()
54
+ elif isinstance(value, list):
55
+ values = value
56
+ else:
57
+ return True
58
+
59
+ for entry in values:
60
+ if isinstance(entry, (Mapping, list)) and not _is_terminal(entry):
61
+ return False
62
+ if keep_traversing is not None and keep_traversing(entry):
63
+ return False
64
+ return True
65
+
66
+ def _traverse_obj(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:
67
+ if _is_terminal(value):
68
+ visitor(path, value)
69
+ elif isinstance(value, Mapping):
70
+ for k, v in value.items():
71
+ _traverse_obj(path + (str(k),), v)
72
+ elif isinstance(value, list):
73
+ for i, v in enumerate(value):
74
+ _traverse_obj(path + (i,), v)
75
+
76
+ for key, value in state_dict.items():
77
+ _traverse_obj((str(key),), value)
78
+
79
+
80
+ def set_element(
81
+ root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: STATE_DICT_ITEM
82
+ ) -> None:
83
+ """Set ``value`` in ``root_dict`` along the ``path`` object path."""
84
+ cur_container = cast(CONTAINER_TYPE, root_dict)
85
+
86
+ def extend_list(lst: List[STATE_DICT_ITEM], idx: int) -> None:
87
+ while len(lst) <= idx:
88
+ lst.append(None)
89
+
90
+ for i in range(1, len(path)):
91
+ prev_key = path[i - 1]
92
+ key = path[i]
93
+ def_val = cast(STATE_DICT_ITEM, {} if type(key) == str else [])
94
+
95
+ if isinstance(cur_container, Mapping):
96
+ cur_container = cast(
97
+ CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val)
98
+ )
99
+ else:
100
+ extend_list(cur_container, prev_key)
101
+ if cur_container[prev_key] is None:
102
+ cur_container[prev_key] = def_val
103
+ cur_container = cur_container[prev_key]
104
+
105
+ key = path[-1]
106
+ if type(key) == int:
107
+ extend_list(cast(List[STATE_DICT_ITEM], cur_container), key)
108
+
109
+ cur_container[key] = value
110
+
111
+
112
+ def get_element(
113
+ root_dict: STATE_DICT_TYPE,
114
+ path: OBJ_PATH,
115
+ default_value: Optional[T] = None,
116
+ ) -> Optional[T]:
117
+ """Retrieve the value at ``path``from ``root_dict``, returning ``default_value`` if not found."""
118
+ cur_value = cast(CONTAINER_TYPE, root_dict)
119
+ for part in path:
120
+ if type(part) is int:
121
+ if not isinstance(cur_value, list) or len(cur_value) < part:
122
+ return default_value
123
+ elif not isinstance(cur_value, Mapping) or part not in cur_value:
124
+ return default_value
125
+
126
+ cur_value = cast(CONTAINER_TYPE, cur_value[part])
127
+ return cast(Optional[T], cur_value)
128
+
129
+
130
+ def _print_nested(
131
+ value: STATE_DICT_ITEM,
132
+ prefix: str = "",
133
+ print_fun: Callable[[str], None] = print,
134
+ ) -> None:
135
+ if type(value) is ShardedTensor:
136
+ print_fun(f"{prefix} ShardedTensor size: {value.size()}")
137
+ for shard in value.local_shards():
138
+ _print_nested(
139
+ shard.tensor,
140
+ f"{shard.metadata.shard_offsets} ",
141
+ print_fun=print_fun,
142
+ )
143
+ elif type(value) is (DTensor):
144
+ print_fun(f"{prefix} DistributedTensor size: {value.size()}")
145
+ # TODO: add local offset for _local_tensor in print_nested.
146
+ _print_nested(
147
+ value._local_tensor,
148
+ print_fun=print_fun,
149
+ )
150
+ elif isinstance(value, torch.Tensor):
151
+ print_fun(f"{prefix} Tensor size: {value.size()}")
152
+ else:
153
+ print_fun(f"{prefix} Type: {type(value)}")
154
+
155
+
156
+ def print_tensor(
157
+ path: OBJ_PATH,
158
+ value: STATE_DICT_ITEM,
159
+ print_fun: Callable[[str], None] = print,
160
+ ) -> None:
161
+ """
162
+ Use this callback with traverse_state_dict to print its content.
163
+
164
+ By default the content is printed using the builtin ``print`` but this can
165
+ be change by passing a different ``print_fun` callable.
166
+ """
167
+ _print_nested(value, prefix=str(path), print_fun=print_fun)
venv/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import traceback as tb
2
+ from typing import Any, Dict, Tuple
3
+
4
+ WRAPPED_EXCEPTION = Tuple[BaseException, tb.StackSummary]
5
+
6
+ __all__ = ["CheckpointException"]
7
+
8
+
9
+ def _wrap_exception(exc: BaseException) -> WRAPPED_EXCEPTION:
10
+ return (exc, tb.extract_tb(exc.__traceback__))
11
+
12
+
13
+ def _is_wrapped_exception(obj: Any) -> bool:
14
+ if not isinstance(obj, tuple):
15
+ return False
16
+ if len(obj) != 2:
17
+ return False
18
+ return isinstance(obj[0], BaseException) and isinstance(obj[1], tb.StackSummary)
19
+
20
+
21
+ class CheckpointException(BaseException):
22
+ """Exception raised if failure was detected as part of a checkpoint load or save."""
23
+
24
+ def __init__(self, msg: str, failures: Dict[int, WRAPPED_EXCEPTION]):
25
+ super().__init__(msg, failures)
26
+ self._failures = failures
27
+
28
+ @property
29
+ def failures(self) -> Dict[int, WRAPPED_EXCEPTION]:
30
+ """Return a dictionary mapping node ranks to their associated exceptions in case of failure."""
31
+ return self._failures
32
+
33
+ def __str__(self):
34
+ str = f"CheckpointException ranks:{self._failures.keys()}\n"
35
+ for rank, exc_pair in self._failures.items():
36
+ exc, trace = exc_pair
37
+ str += f"Traceback (most recent call last): (RANK {rank})\n"
38
+ if trace is not None:
39
+ str += "".join(tb.format_list(trace))
40
+ str += "".join(tb.format_exception_only(type(exc), value=exc))
41
+ return str