applied-ai-018 commited on
Commit
709d16e
·
verified ·
1 Parent(s): b054cff

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py +52 -0
  5. venv/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__init__.py +38 -0
  7. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_dynamo_utils.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_wrap_utils.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/fully_sharded_data_parallel.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/wrap.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py +563 -0
  29. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_debug_utils.py +155 -0
  30. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py +45 -0
  31. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_exec_order_utils.py +364 -0
  32. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py +0 -0
  33. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_fsdp_extensions.py +179 -0
  34. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py +1182 -0
  35. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_limiter_utils.py +33 -0
  36. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py +2086 -0
  37. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py +1630 -0
  38. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py +127 -0
  39. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py +928 -0
  40. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py +237 -0
  41. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py +113 -0
  42. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py +357 -0
  43. venv/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py +262 -0
  44. venv/lib/python3.10/site-packages/torch/distributed/fsdp/api.py +410 -0
  45. venv/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py +2075 -0
  46. venv/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py +388 -0
  47. venv/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py +606 -0
  48. venv/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py +7 -0
  49. venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:168239d5c9986edf1477aea746d7a585af7fcb44ef2ba21b9a4af0fb3deb77c3
3
+ size 33555627
ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d136286fc913d4d5c781d50ac52854c98d16f8eb91aa58e5c6fbf22ea3c989ac
3
+ size 33555533
ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f038fd7638035e27e4e966f0411a18d6a76603f60ad41884bb77024e3d4d123e
3
+ size 415237276
venv/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import sys
3
+ import torch
4
+
5
+
6
+ def is_available():
7
+ return hasattr(torch._C, "_dist_autograd_init")
8
+
9
+
10
+ if is_available() and not torch._C._dist_autograd_init():
11
+ raise RuntimeError("Failed to initialize torch.distributed.autograd")
12
+
13
+ if is_available():
14
+ from torch._C._distributed_autograd import (
15
+ get_gradients,
16
+ backward,
17
+ _init,
18
+ _new_context,
19
+ _release_context,
20
+ _get_max_id,
21
+ _is_valid_context,
22
+ _retrieve_context,
23
+ _current_context,
24
+ _get_debug_info,
25
+ DistAutogradContext,
26
+ )
27
+
28
+
29
+ class context:
30
+ '''
31
+ Context object to wrap forward and backward passes when using
32
+ distributed autograd. The ``context_id`` generated in the ``with``
33
+ statement is required to uniquely identify a distributed backward pass
34
+ on all workers. Each worker stores metadata associated with this
35
+ ``context_id``, which is required to correctly execute a distributed
36
+ autograd pass.
37
+
38
+ Example::
39
+ >>> # xdoctest: +SKIP
40
+ >>> import torch.distributed.autograd as dist_autograd
41
+ >>> with dist_autograd.context() as context_id:
42
+ >>> t1 = torch.rand((3, 3), requires_grad=True)
43
+ >>> t2 = torch.rand((3, 3), requires_grad=True)
44
+ >>> loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
45
+ >>> dist_autograd.backward(context_id, [loss])
46
+ '''
47
+ def __enter__(self):
48
+ self.autograd_context = _new_context()
49
+ return self.autograd_context._context_id()
50
+
51
+ def __exit__(self, type, value, traceback):
52
+ _release_context(self.autograd_context._context_id())
venv/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._flat_param import FlatParameter as FlatParameter
2
+ from .fully_sharded_data_parallel import (
3
+ BackwardPrefetch,
4
+ CPUOffload,
5
+ FullOptimStateDictConfig,
6
+ FullStateDictConfig,
7
+ FullyShardedDataParallel,
8
+ LocalOptimStateDictConfig,
9
+ LocalStateDictConfig,
10
+ MixedPrecision,
11
+ OptimStateDictConfig,
12
+ OptimStateKeyType,
13
+ ShardedOptimStateDictConfig,
14
+ ShardedStateDictConfig,
15
+ ShardingStrategy,
16
+ StateDictConfig,
17
+ StateDictSettings,
18
+ StateDictType,
19
+ )
20
+
21
+ __all__ = [
22
+ "BackwardPrefetch",
23
+ "CPUOffload",
24
+ "FullOptimStateDictConfig",
25
+ "FullStateDictConfig",
26
+ "FullyShardedDataParallel",
27
+ "LocalOptimStateDictConfig",
28
+ "LocalStateDictConfig",
29
+ "MixedPrecision",
30
+ "OptimStateDictConfig",
31
+ "OptimStateKeyType",
32
+ "ShardedOptimStateDictConfig",
33
+ "ShardedStateDictConfig",
34
+ "ShardingStrategy",
35
+ "StateDictConfig",
36
+ "StateDictSettings",
37
+ "StateDictType",
38
+ ]
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (774 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_dynamo_utils.cpython-310.pyc ADDED
Binary file (823 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc ADDED
Binary file (71.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc ADDED
Binary file (5.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc ADDED
Binary file (31.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc ADDED
Binary file (39.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_wrap_utils.cpython-310.pyc ADDED
Binary file (7.32 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/fully_sharded_data_parallel.cpython-310.pyc ADDED
Binary file (79.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/wrap.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file includes private common utilities for FSDP.
3
+ """
4
+ import logging
5
+ import traceback
6
+ import warnings
7
+ import weakref
8
+ from enum import auto, Enum
9
+ from functools import partial
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ cast,
14
+ Dict,
15
+ Generator,
16
+ Iterable,
17
+ List,
18
+ no_type_check,
19
+ Optional,
20
+ Set,
21
+ Tuple,
22
+ Type,
23
+ TYPE_CHECKING,
24
+ )
25
+
26
+ import torch
27
+ import torch.distributed as dist
28
+ import torch.distributed.fsdp._flat_param as flat_param_file
29
+ import torch.nn as nn
30
+ from torch.distributed._composable_state import _get_module_state, _State
31
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
32
+ _CHECKPOINT_PREFIX,
33
+ )
34
+ from torch.distributed.device_mesh import DeviceMesh
35
+ from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions
36
+ from torch.distributed.utils import _apply_to_tensors
37
+ from torch.utils._mode_utils import no_dispatch
38
+
39
+ from .api import (
40
+ FullOptimStateDictConfig,
41
+ FullStateDictConfig,
42
+ OptimStateDictConfig,
43
+ ShardingStrategy,
44
+ StateDictConfig,
45
+ StateDictType,
46
+ )
47
+
48
+ if TYPE_CHECKING:
49
+ from ._flat_param import FlatParamHandle
50
+
51
+ FSDP_WRAPPED_MODULE = "_fsdp_wrapped_module"
52
+ FSDP_PREFIX = FSDP_WRAPPED_MODULE + "."
53
+ FSDP_FLATTENED = "_fsdp_flattened"
54
+
55
+ # Save a global mapping from module to its input tensor dtype to be populated
56
+ # during the forward pre-hook and consumed in the forward post-hook when
57
+ # overriding a module's mixed precision
58
+ # NOTE: We currently take the last input tensor's dtype in the case of multiple
59
+ # floating-point input tensors, which may be incorrect. However, since there is
60
+ # not a 1:1 correspondence between input and output tensors, we must use *some*
61
+ # heuristic like this to predict the desired output dtype.
62
+ _MODULE_TO_INP_DTYPE: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
63
+
64
+
65
+ class _FSDPDeviceHandle:
66
+ """
67
+ This is a simple abstraction for FSDP computing devices,
68
+ which enables custom backends that implement CUDA-like
69
+ semantics to be integrated with FSDP.
70
+ """
71
+
72
+ def __init__(self, device: torch.device, backend: Any = None):
73
+ if backend is None:
74
+ try:
75
+ self.__backend = getattr(torch, device.type)
76
+ self.__device = device
77
+ except AttributeError as exc:
78
+ raise AttributeError(
79
+ f"Device '{device}' does not have a corresponding backend registered as 'torch.{device.type}'."
80
+ ) from exc
81
+ else:
82
+ self.__backend = backend
83
+
84
+ @classmethod
85
+ def from_device(cls, device: torch.device) -> "_FSDPDeviceHandle":
86
+ """
87
+ Return an device handle corresponding to the device, and through this handle,
88
+ operations with the same semantics as CUDA can be performed on the device.
89
+ Just return torch.cuda if the device is cuda to make attribute-access faster.
90
+ Custom backend must first register a module with the same name with {device.type} on torch.
91
+ """
92
+ if device.type == "cuda":
93
+ return cast(_FSDPDeviceHandle, torch.cuda)
94
+ return cls(device)
95
+
96
+ def __getattr__(self, __name: str) -> Any:
97
+ try:
98
+ return getattr(self.__backend, __name)
99
+ except AttributeError as exc:
100
+ raise AttributeError(
101
+ f"Custom backend '{self.__device.type}' not implement 'torch.{self.__device.type}.{__name}'"
102
+ ) from exc
103
+
104
+
105
+ class _UninitializedDeviceHandle(_FSDPDeviceHandle):
106
+ def __init__(self):
107
+ pass
108
+
109
+ def __getattribute__(self, __name: str) -> Any:
110
+ raise RuntimeError("Trying to use an uninitialized device handle.")
111
+
112
+
113
+ class _FSDPState(_State):
114
+ def __init__(self) -> None:
115
+ # TODO: Move all the attributes to this class to enable typing for
116
+ # FSDP/fully_shard.
117
+ self._ignored_modules: Set[nn.Module] = set()
118
+ self._ignored_params: Set[nn.Parameter] = set()
119
+ # Buffer names are cleaned (without wrapper prefixes)
120
+ self._ignored_buffer_names: Set[str] = set()
121
+ self.process_group: Optional[dist.ProcessGroup] = None
122
+ self.rank: int = -1
123
+ self.world_size: int = -1
124
+ self._device_mesh: Optional[DeviceMesh] = None
125
+ self.sharding_strategy = ShardingStrategy.FULL_SHARD
126
+ self._use_orig_params: bool = False
127
+ self.training_state = TrainingState.IDLE
128
+ self._unshard_params_ctx: Dict[nn.Module, Generator] = {}
129
+ self._state_dict_type: StateDictType = StateDictType.FULL_STATE_DICT
130
+ self._state_dict_config: StateDictConfig = FullStateDictConfig()
131
+ self._optim_state_dict_config: OptimStateDictConfig = FullOptimStateDictConfig()
132
+ self._is_root: Optional[bool] = None
133
+ self._handle: Optional[flat_param_file.FlatParamHandle] = None
134
+ self._fully_sharded_module_to_handle: Dict[
135
+ nn.Module, Optional[flat_param_file.FlatParamHandle]
136
+ ] = {}
137
+ self.compute_device: Optional[torch.device] = None
138
+ self._gradient_predivide_factor: int = 0
139
+ self._gradient_postdivide_factor: int = 0
140
+ self._comm_hook: Optional[Callable] = None
141
+ self._comm_hook_state: Optional[Any] = None
142
+ # Abstract device handle for fsdp compute device. For now,
143
+ # the compute device must implement cuda semantics used by fsdp
144
+ self._device_handle: _FSDPDeviceHandle = _UninitializedDeviceHandle()
145
+ # All following attributes should only be used for root states:
146
+ # Save these static lists to avoid the repeated tree traversals
147
+ self._all_fsdp_states: List[_FSDPState] = []
148
+ self._all_handles: List[flat_param_file.FlatParamHandle] = []
149
+ self._fsdp_extension: Optional[FSDPExtensions] = None
150
+
151
+
152
+ def _get_module_fsdp_state(module: nn.Module) -> Optional[_FSDPState]:
153
+ state = _get_module_state(module)
154
+ if state is None or not isinstance(state, _FSDPState):
155
+ return None
156
+ return state
157
+
158
+
159
+ def _get_module_fsdp_state_if_fully_sharded_module(
160
+ module: nn.Module,
161
+ ) -> Optional[_FSDPState]:
162
+ state = _get_module_fsdp_state(module)
163
+ if state is None:
164
+ return None
165
+ if state == module: # FullyShardedDataParallel module case.
166
+ return state
167
+ if module in state._fully_sharded_module_to_handle: # fully_shard case.
168
+ return state
169
+ return None
170
+
171
+
172
+ class TrainingState(Enum):
173
+ """
174
+ An enum that indicates the state of a ``FullyShardedDataParallel` instance.
175
+ """
176
+
177
+ IDLE = auto()
178
+ FORWARD_BACKWARD = auto()
179
+ SUMMON_FULL_PARAMS = auto()
180
+
181
+
182
+ class HandleTrainingState(Enum):
183
+ """
184
+ An enum that indicates the state of a ``FlatParamHandle`.
185
+ """
186
+
187
+ IDLE = auto()
188
+ FORWARD = auto()
189
+ BACKWARD_PRE = auto()
190
+ BACKWARD_POST = auto()
191
+ SUMMON_FULL_PARAMS = auto()
192
+
193
+
194
+ def _is_composable(state: _FSDPState):
195
+ # TODO: This is a temporary hack for differentiate between code paths.
196
+ return not isinstance(state, nn.Module)
197
+
198
+
199
+ @no_type_check
200
+ def _module_handle(state: _FSDPState, module: nn.Module) -> Optional["FlatParamHandle"]:
201
+ """
202
+ Returns the ``FlatParamHandle`` s corresponding to ``module``. This is
203
+ the handle that contains some parameter in ``module``.
204
+ """
205
+ if _is_composable(state):
206
+ # A valid FSDP state may have no managed parameters and hence no
207
+ # handles, meaning no entry in `_fully_sharded_module_to_handles`
208
+ if state._handle is None:
209
+ return None
210
+ assert (
211
+ module in state._fully_sharded_module_to_handle
212
+ ), f"Expects a fully sharded module but got {module} on rank {state.rank}"
213
+ return state._fully_sharded_module_to_handle[module]
214
+ else:
215
+ # NOTE: This assumes `module` is a `FullyShardedDataParallel` instance.
216
+ return module._handle
217
+
218
+
219
+ @no_type_check
220
+ def _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool:
221
+ """Returns if ``module`` has parameters managed by FSDP."""
222
+ return _module_handle(state, module) is not None
223
+
224
+
225
+ def _get_sharding_strategy(handle):
226
+ """
227
+ Returns the sharding strategy of the handle.
228
+ """
229
+ return handle._sharding_strategy if handle else None
230
+
231
+
232
+ def clean_tensor_name(tensor_name: str) -> str:
233
+ """
234
+ Cleans the parameter or buffer name by removing any module wrapper
235
+ prefixes.
236
+ """
237
+ tensor_name = tensor_name.replace(FSDP_PREFIX, "")
238
+ # TODO: Explicitly replacing the checkpoint wrapper prefix is not ideal as
239
+ # it couples `CheckpointWrapper` and FSDP and also does not scale for more
240
+ # module wrappers.
241
+ tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, "")
242
+ return tensor_name
243
+
244
+
245
+ def _set_fsdp_flattened(tensor: torch.Tensor) -> None:
246
+ """
247
+ Sets an attribute on ``tensor`` to mark it as flattened by FSDP. This is to
248
+ avoid re-flattening it during nested construction.
249
+ """
250
+ setattr(tensor, FSDP_FLATTENED, True)
251
+
252
+
253
+ def _is_fsdp_flattened(tensor: torch.Tensor) -> bool:
254
+ """Returns if ``tensor`` has been marked as flattened by FSDP."""
255
+ return getattr(tensor, FSDP_FLATTENED, False)
256
+
257
+
258
+ def _named_parameters_with_duplicates(
259
+ module: nn.Module, **kwargs: Any
260
+ ) -> List[Tuple[str, nn.Parameter]]:
261
+ """
262
+ This API is required as some modules overwrite `named_parameters()` but do not support
263
+ `remove_duplicate`.
264
+ """
265
+ assert (
266
+ "remove_duplicate" not in kwargs
267
+ ), "_named_parameters_with_duplicates cannot be used with `remove_duplicate` argument."
268
+ kwargs["remove_duplicate"] = False
269
+ try:
270
+ ret = list(module.named_parameters(**kwargs))
271
+ except AssertionError as e:
272
+ kwargs.pop("remove_duplicate")
273
+ ret = list(module.named_parameters(**kwargs))
274
+ return ret
275
+
276
+
277
+ def _get_param_to_fqns(
278
+ model: torch.nn.Module,
279
+ dedup_shared_params: bool = True,
280
+ ) -> Dict[nn.Parameter, List[str]]:
281
+ """
282
+ Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here,
283
+ we use canonical to mean the fully-qualified name assigned to the parameter
284
+ based on its position in the original nn.Module hierarchy before any wrapper
285
+ or parallelism has been applied to it. This is in contrast to FQNs that may be
286
+ generated after parallelisms or wrappers have been applied to the model.
287
+
288
+ Each normal parameter maps to a singleton list containing its FQN, while each
289
+ ``FlatParameter`` maps to a list of its original parameter FQNs, which may
290
+ have length greater than one. All FQNs are prefixed starting from ``model``.
291
+
292
+ In the case where FSDP was applied with ``use_orig_params=True``, there should be no
293
+ ``FlatParameter`` s registered to the model's modules and this mapping will only
294
+ contain mappings from ``nn.Parameter`` s to singleton FQN lists.
295
+
296
+ It is only in the case where FSDP was applied with ``use_orig_params=False`` where
297
+ a ``FlatParameter`` will be registered in place of the original parameters and there
298
+ will be mappings from each ``FlatParameter`` to lists of FQNs corresponding to the
299
+ original parameters.
300
+
301
+ Args:
302
+ model (torch.nn.Module): Root module (which may or may not be a
303
+ :class:`FullyShardedDataParallel` instance).
304
+ dedup_shared_params (bool): For shared parameters, if ``True``, only
305
+ includes the FQNs corresponding to the first encounter of the
306
+ shared parameter in the module traversal; if ``False``, then
307
+ includes the FQNs across all encounters. (Default: ``True``)
308
+ """
309
+
310
+ def module_fn(module, prefix, tree_level, param_to_fqns):
311
+ for param_name, param in _named_parameters_with_duplicates(
312
+ module, recurse=False
313
+ ):
314
+ local_fqns = (
315
+ param._fqns
316
+ if isinstance(param, flat_param_file.FlatParameter)
317
+ else [param_name]
318
+ ) # prefixed from `module`
319
+ global_fqns = [
320
+ clean_tensor_name(prefix + name) for name in local_fqns
321
+ ] # prefixed from the top level `model` (i.e. including `prefix`)
322
+ is_shared_param = param in param_to_fqns
323
+ if not is_shared_param:
324
+ param_to_fqns[param] = global_fqns
325
+ else:
326
+ if isinstance(param, flat_param_file.FlatParameter):
327
+ # DMP overwrites `named_parameters` and skip (advance to
328
+ # the next child module) the wrapped_module (e.g.,
329
+ # _dmp_wrapped_module and _fsdp_wrapped_module). When a user
330
+ # calls `named_child` to traverse the module recursively and
331
+ # calls `named_parameters` with `recurse=False`, parameters
332
+ # will be traversed more than once.
333
+ # This hack is specified designed for DMP + FSDP. We
334
+ # overwrite the flat_parameters traversal result to only obtain
335
+ # the last one, which happens to be the correct one.
336
+ #
337
+ # TODO: Remove this hack once DMP + FSDP is not supported.
338
+ warnings.warn(
339
+ "FlatParameter is being traversed more than once. "
340
+ "This case should only happen when using "
341
+ "DistributedModelParallel with FullyShardedDataParallel."
342
+ )
343
+ param_to_fqns[param] = global_fqns
344
+ elif not dedup_shared_params:
345
+ param_to_fqns[param].extend(global_fqns)
346
+
347
+ def return_fn(param_to_fqns):
348
+ return param_to_fqns
349
+
350
+ param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = {}
351
+ return _apply_to_modules(
352
+ model,
353
+ module_fn,
354
+ return_fn,
355
+ [key for key, _ in _named_parameters_with_duplicates(model)],
356
+ param_to_unflat_param_names,
357
+ )
358
+
359
+
360
+ @no_type_check
361
+ def _log_post_backward_hook(
362
+ state: _FSDPState, handle: "FlatParamHandle", log: logging.Logger
363
+ ) -> None:
364
+ # Under TORCH_DISTRIBUTED_DEBUG=INFO, log the module names this hook fires for.
365
+ # Below logging of module names this post-bwd hook fires for can help debug certain
366
+ # cases where hooks don't fire, such as under certain activation checkpoint configs.
367
+ if state._use_orig_params and handle._debug_level == dist.DebugLevel.INFO:
368
+ param_fqns = _get_handle_fqns_from_root(state, handle)
369
+ log.warning("FSDP firing post-backward hooks for parameters %s", param_fqns)
370
+
371
+
372
+ @no_type_check
373
+ def _get_handle_fqns_from_root(
374
+ state: _FSDPState, handle: "FlatParamHandle"
375
+ ) -> Optional[List[str]]:
376
+ if handle is None:
377
+ return None
378
+ param_to_fqn = state._exec_order_data.param_to_fqn
379
+ handle_params = handle.flat_param._params # only populated for use_orig_params
380
+ param_fqns = [
381
+ fqn for fqn_list in [param_to_fqn[p] for p in handle_params] for fqn in fqn_list
382
+ ]
383
+ return param_fqns
384
+
385
+
386
+ def _apply_to_modules(
387
+ root_module: torch.nn.Module,
388
+ module_fn: Callable,
389
+ return_fn: Callable,
390
+ filter_fqns: Optional[List[str]] = None,
391
+ *args,
392
+ **kwargs,
393
+ ):
394
+ """
395
+ Performs a pre-order traversal of the modules in the hierarchy rooted at
396
+ ``root_module``, applying ``module_fn`` at each module and finally
397
+ returning a value using ``return_fn``. The traversal constructs the full
398
+ module prefix name (e.g. "module.submodule." just like in model state dict)
399
+ and makes that available to ``module_fn``.
400
+
401
+ ``filter_fqns`` is used because some module may have its own prefix similar
402
+ to ``FullyShardedDataParallel`` and the ``named_parameters()`` is overwritten
403
+ to remove the prefix.
404
+ """
405
+
406
+ def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs):
407
+ # Call the module function before recursing over children (pre-order)
408
+ module_fn(module, prefix, tree_level, *args, **kwargs)
409
+ for submodule_name, submodule in module.named_children():
410
+ if submodule is None:
411
+ continue
412
+ new_prefix = prefix + submodule_name + "."
413
+ new_tree_level = tree_level + 1
414
+ if filter_fqns is not None:
415
+ for fqn in filter_fqns:
416
+ if fqn.startswith(new_prefix):
417
+ break
418
+ else:
419
+ # DMP's named_parameter() will mess up the traversal with
420
+ # ``named_children`` + `named_parameter(recurse=False)``.
421
+ # This hack is a must to make the traversal work.
422
+ # TODO: Remove this hack once DMP + FSDP is not supported.
423
+ if (
424
+ submodule_name == "_fsdp_wrapped_module"
425
+ or submodule_name == "_dmp_wrapped_module"
426
+ ):
427
+ if (
428
+ not torch.distributed._functional_collectives.is_torchdynamo_compiling()
429
+ ):
430
+ # TODO(voz): Don't graph break on this
431
+ warnings.warn(
432
+ "An unexpected prefix is detected. This case "
433
+ " should only happen when using DMP with FSDP. "
434
+ f"prefix = {prefix}, "
435
+ f"submodule_name = {submodule_name}"
436
+ )
437
+ new_prefix = prefix
438
+ elif submodule_name == "module":
439
+ warnings.warn(
440
+ "An unexpected prefix is detected. This case "
441
+ " should only happen when DDP wraps the outer "
442
+ " modules while FSDP wraps the inner ones."
443
+ f"prefix = {prefix}, "
444
+ f"submodule_name = {submodule_name}"
445
+ )
446
+ new_prefix = prefix
447
+ f(submodule, new_prefix, new_tree_level, *args, **kwargs)
448
+
449
+ f(root_module, "", 0, *args, **kwargs)
450
+ return return_fn(*args, **kwargs)
451
+
452
+
453
+ @no_type_check
454
+ def _assert_in_training_states(
455
+ state: _FSDPState,
456
+ training_states: List[TrainingState],
457
+ ) -> None:
458
+ """Asserts that FSDP is in the states ``_training_states``."""
459
+ # Raise a `ValueError` instead of using `assert` to ensure that these
460
+ # logical assertions run even if `assert`s are disabled
461
+ if state.training_state not in training_states:
462
+ msg = (
463
+ f"expected to be in states {training_states} but current state is "
464
+ f"{state.training_state}"
465
+ )
466
+ # Print the error on rank 0 in case this is called in the backward pass
467
+ if state.rank == 0:
468
+ if isinstance(state, nn.Module):
469
+ print(f"Asserting FSDP instance is: {state}")
470
+ print(f"ERROR: {msg}")
471
+ traceback.print_stack()
472
+ raise ValueError(msg)
473
+
474
+
475
+ def _get_root_modules(modules: Set[nn.Module]) -> Set[nn.Module]:
476
+ """
477
+ Returns:
478
+ Set[nn.Module]: The subset of ``modules`` that are root modules (i.e.
479
+ parent-less) with respect to the modules in the set itself. In other
480
+ words, these are the modules in ``modules`` that are not the child of
481
+ any other module in ``modules``.
482
+ """
483
+ root_modules: Set[nn.Module] = set()
484
+ module_to_submodules = {module: set(module.modules()) for module in modules}
485
+ for candidate_module in modules:
486
+ is_root_module = True
487
+ for module, submodules in module_to_submodules.items():
488
+ is_child_module = (
489
+ candidate_module is not module and candidate_module in submodules
490
+ )
491
+ if is_child_module:
492
+ is_root_module = False
493
+ break
494
+ if is_root_module:
495
+ root_modules.add(candidate_module)
496
+ return root_modules
497
+
498
+
499
+ def _override_module_mixed_precision(
500
+ root: torch.nn.Module,
501
+ module_classes_to_override: Iterable[Type[nn.Module]],
502
+ wrap_override_dict: Dict[str, Any] = {"mixed_precision": None}, # noqa: B006
503
+ ) -> Set[Type[nn.Module]]:
504
+ module_classes_to_override = tuple(set(module_classes_to_override))
505
+ # Return a set of the actually overridden module classes
506
+ overridden_module_classes: Set[Type[nn.Module]] = set()
507
+ for mod in root.modules():
508
+ if isinstance(mod, module_classes_to_override):
509
+ overridden_module_classes.add(type(mod))
510
+ mod._wrap_overrides = wrap_override_dict # type: ignore[assignment]
511
+ # TODO: We need to run this mixed precision ignored module in fp32,
512
+ # but ensure subsequent modules, that may possibly be running with
513
+ # mixed precision, still receive the appropriate precision inputs
514
+ # without user having to adjust mixed precision config too much.
515
+ # As a result, we attach pre and post forward hooks to up / down
516
+ # cast. We should revisit this design.
517
+
518
+ def cast_fn(
519
+ dtype: torch.dtype, module: nn.Module, x: torch.Tensor
520
+ ) -> torch.Tensor:
521
+ if not torch.is_floating_point(x) or x.dtype == dtype:
522
+ return x
523
+ _MODULE_TO_INP_DTYPE[module] = x.dtype
524
+ return x.to(dtype)
525
+
526
+ def forward_pre_hook(module, args):
527
+ return _apply_to_tensors(partial(cast_fn, torch.float32, module), args)
528
+
529
+ def forward_post_hook(module, args, output):
530
+ # NOTE: If the forward did not have any floating-point tensors,
531
+ # then the dtype will not be set for this module, and we do not
532
+ # upcast the dtype.
533
+ if module in _MODULE_TO_INP_DTYPE:
534
+ old_dtype = _MODULE_TO_INP_DTYPE[module]
535
+ return _apply_to_tensors(
536
+ partial(cast_fn, old_dtype, module), output
537
+ )
538
+
539
+ # We intentionally append both of these hooks so that they run after
540
+ # all other hooks.
541
+ mod.register_forward_pre_hook(forward_pre_hook, prepend=False)
542
+ mod.register_forward_hook(forward_post_hook, prepend=False)
543
+ return overridden_module_classes
544
+
545
+
546
+ def _no_dispatch_record_stream(tensor: torch.Tensor, stream: torch.Stream) -> None:
547
+ # FIXME record_stream doesn't work with non-cuda tensors
548
+ if tensor.device.type not in ["cuda", torch._C._get_privateuse1_backend_name()]:
549
+ return
550
+
551
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
552
+ return
553
+ # from @ezyang:
554
+ # The no_dispatch was added in https://github.com/pytorch/pytorch/pull/88014 cc @fegin
555
+ # Looking over the PR, it looks like this is because we don't actually support Stream arguments
556
+ # in torch dispatch, so it just chokes.
557
+ # If Dynamo is able to answer "are there any torch dispatch modes" active (it should answer False),
558
+ # a better version of this would just be to check if there are any modes before disabling dispatch.
559
+ # TODO(voz): Extend a dynamo util to answer the above, unify the codepaths here.
560
+ tensor.record_stream(stream)
561
+ else:
562
+ with no_dispatch():
563
+ tensor.record_stream(stream)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_debug_utils.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+ from collections import defaultdict
4
+ from contextlib import contextmanager
5
+ from enum import Enum
6
+ from typing import Dict, Iterator, List, Set, Tuple
7
+
8
+ import torch
9
+ import torch.distributed as dist
10
+ import torch.distributed.fsdp._flat_param as flat_param_file
11
+ from torch.distributed.fsdp._common_utils import (
12
+ _apply_to_modules,
13
+ _get_module_fsdp_state,
14
+ clean_tensor_name,
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class SimpleProfiler:
21
+ class Type(str, Enum):
22
+ ALL = "all"
23
+ ALLGATHER = "all_gather"
24
+ ALLGATHER_OBJ = "all_gather_object"
25
+ RESHARDING = "resharding"
26
+ H2D = "H2D"
27
+ D2H = "D2H"
28
+
29
+ results: Dict[str, float] = defaultdict(float)
30
+ profiling: Set[str] = set()
31
+
32
+ @classmethod
33
+ def reset(cls) -> None:
34
+ cls.results.clear()
35
+ cls.profiling.clear()
36
+
37
+ @classmethod
38
+ @contextmanager
39
+ def profile(cls, profile_type: str) -> Iterator[None]:
40
+ assert profile_type not in cls.profiling, (
41
+ f"{profile_type} is already being profiled. "
42
+ "SimpleProfiler does not support profiling multiple instances at "
43
+ "the same time. "
44
+ )
45
+
46
+ cls.profiling.add(profile_type)
47
+ begin = time.monotonic()
48
+ try:
49
+ yield
50
+ finally:
51
+ end = time.monotonic()
52
+ cls.results[profile_type] += end - begin
53
+ cls.profiling.remove(profile_type)
54
+
55
+ @classmethod
56
+ def dump_and_reset(cls, msg: str) -> None:
57
+ # This cannot be combined with DETAIL distributed log
58
+ # as the profiling will be very incorrect.
59
+ if dist.get_rank() == 0 and dist.get_debug_level() == dist.DebugLevel.INFO:
60
+ logger.warning("%s %s", msg, cls.results)
61
+ cls.reset()
62
+
63
+
64
+ def _get_sharded_module_tree_with_module_name_to_fqns(
65
+ model: torch.nn.Module,
66
+ ) -> Tuple[str, Dict[str, List[str]]]:
67
+ """
68
+ It is used for composable fully_shard() code path, it returns
69
+ 1. sharded module tree info: each line reprents a submodule name that contats the
70
+ submodule's FQN and its submodule class name, if the submodule is sharded by `fully_shard`,
71
+ the submodule name will add a postfix with ' FULLY SHARDED'. Each increased tree
72
+ level adds 4 spaces before the printed name. A printed sharded module tree info for a toy model
73
+ is like this:
74
+ [CompositeModel] FULLY SHARDED
75
+ l1[Linear]
76
+ u1[UnitModule] FULLY SHARDED
77
+ u1.l1[Linear]
78
+ u1.seq[Sequential]
79
+ u1.seq.0[ReLU]
80
+ u1.seq.1[Linear]
81
+ u1.seq.2[ReLU]
82
+ u1.l2[Linear]
83
+ u2[UnitModule] FULLY SHARDED
84
+ u2.l1[Linear]
85
+ u2.seq[Sequential]
86
+ u2.seq.0[ReLU]
87
+ u2.seq.1[Linear]
88
+ u2.seq.2[ReLU]
89
+ u2.l2[Linear]
90
+ l2[Linear]
91
+ 2. a dict mapping from the concated module FQN and class name to a list of its managed
92
+ original parameters' FQNs. An example of the dict for the above toy sharded model is like this:
93
+ {'[CompositeModel]': ['l1.weight', 'l1.bias', 'l2.weight', 'l2.bias'],
94
+ 'u1[UnitModule]': ['u1.l1.weight', 'u1.l1.bias', 'u1.seq.1.weight', 'u1.seq.1.bias', 'u1.l2.weight', 'u1.l2.bias'],
95
+ 'u2[UnitModule]': ['u2.l1.weight', 'u2.l1.bias', 'u2.seq.1.weight', 'u2.seq.1.bias', 'u2.l2.weight', 'u2.l2.bias']
96
+ }
97
+ All FQNs are prefixed starting from ``model``.
98
+
99
+ Args:
100
+ model (torch.nn.Module): Root module (which may or may not be passed to
101
+ composable `fully_shard()`).
102
+ """
103
+
104
+ def module_fn(
105
+ module, prefix, tree_level, sharded_tree_info, sharded_module_name_to_fqns
106
+ ):
107
+ num_spaces = tree_level * 4
108
+ trimed_prefix = (
109
+ prefix[:-1] if (len(prefix) > 0 and prefix[-1] == ".") else prefix
110
+ )
111
+ prefixed_module_name = trimed_prefix + "[" + module.__class__.__name__ + "]"
112
+ printed_prefixed_module_name = " " * num_spaces + prefixed_module_name
113
+
114
+ state = _get_module_fsdp_state(module)
115
+ if state is None:
116
+ sharded_tree_info[0] += printed_prefixed_module_name + "\n"
117
+ return
118
+
119
+ handle = state._fully_sharded_module_to_handle.get(module, None)
120
+
121
+ if handle:
122
+ sharded_tree_info[0] += (
123
+ printed_prefixed_module_name + " FULLY SHARDED" + "\n"
124
+ )
125
+ else:
126
+ sharded_tree_info[0] += printed_prefixed_module_name + "\n"
127
+
128
+ if handle:
129
+ param = handle.flat_param
130
+ assert isinstance(param, flat_param_file.FlatParameter)
131
+ global_fqns = [
132
+ clean_tensor_name(prefix + name) for name in param._fqns
133
+ ] # prefixed from the top level `model` (i.e. including `prefix`)
134
+
135
+ if prefixed_module_name in sharded_module_name_to_fqns:
136
+ sharded_module_name_to_fqns[prefixed_module_name].extend(global_fqns)
137
+ else:
138
+ sharded_module_name_to_fqns[prefixed_module_name] = global_fqns
139
+
140
+ def return_fn(sharded_tree_info, sharded_module_name_to_fqns):
141
+ return sharded_tree_info[0], sharded_module_name_to_fqns
142
+
143
+ # Use List to mutate its value in place while running the recursive functions
144
+ sharded_tree_info: List[str] = [
145
+ "",
146
+ ]
147
+ sharded_module_name_to_fqns: Dict[str, List[str]] = {}
148
+ return _apply_to_modules(
149
+ model,
150
+ module_fn,
151
+ return_fn,
152
+ [key for key, _ in model.named_parameters()],
153
+ sharded_tree_info,
154
+ sharded_module_name_to_fqns,
155
+ )
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Set
2
+
3
+ import torch.nn as nn
4
+
5
+
6
+ def _annotate_modules_for_dynamo(
7
+ module: nn.Module,
8
+ ignored_modules: Set[nn.Module],
9
+ use_orig_params: bool,
10
+ ):
11
+ """
12
+ Annotates the submodules in ``module`` 's tree, except those in
13
+ ``ignored_modules``, indicating that the submodules are FSDP-managed and
14
+ saving the ``use_orig_params`` setting passed to the FSDP constructor.
15
+ """
16
+ for submodule in module.modules():
17
+ if submodule not in ignored_modules:
18
+ """[note: Dynamo treats FSDP wrapped modules as UnspecializedNNModule]
19
+
20
+ Dynamo doesn't get to see this instance (FullyShardedDataParallel) during tracing, since
21
+ it skips tracing all the torch.distributed.fsdp code.
22
+ - Why? Running the FSDP code eagerly avoids lots of issues trying to trace complex hooks, and also
23
+ gets us graph-breaks on FSDP module boundaries which we want anyway for comm ops.
24
+ - However, we _also_ want dynamo to treat the wrapped module inside FSDP 'unspecially' (*),
25
+ and we need a way to indicate to dynamo which modules are wrapped by FSDP.
26
+
27
+ (*) UnspecializedNNModules in dynamo are traced-through without any assumptions, and with thorough
28
+ guards. NNModules otherwise are 'specialized', meaning there is less overhead due to assuming
29
+ their code is well-behaved.
30
+
31
+ One particular issue with specialized NNModules for FSDP is that the
32
+ views created for orig_params are captured into the compiled graph on the first iteration, and while
33
+ they are always going to point to the correct flatparameter and give correct results, their order
34
+ of creation influences the order of backward execution, preventing overlap of comm and computation
35
+ during backward. We need to _use_ the new parameter views created on each forward iteration, in
36
+ order for backward to interleave hooks with compute per layer. UnspecializedNNModule lets us achieve
37
+ this by capturing the module code more 'functionally' and passing parameters in as inputs each time.
38
+ """
39
+ submodule._is_fsdp_managed_module = True # type: ignore[assignment]
40
+
41
+ # Dynamo only supports FSDP with use_orig_params=True.
42
+ # This is hacky, but I could not think of another way to add an assertion to dynamo
43
+ # for this, since Dynamo skips all the FSDP code frames and thus can't inspect the
44
+ # FSDP module directly
45
+ submodule._fsdp_use_orig_params = use_orig_params # type: ignore[assignment]
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_exec_order_utils.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from enum import auto, Enum
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
9
+ import torch.nn as nn
10
+ from torch.distributed.fsdp._common_utils import _FSDPState, _get_param_to_fqns
11
+ from torch.distributed.fsdp._flat_param import FlatParamHandle
12
+
13
+
14
+ class _ExecOrderWarnStatus(Enum):
15
+ """Used internally for execution order validation."""
16
+
17
+ NONE = auto() # no deviation yet
18
+ WARNING = auto() # deviated this iteration; currently issuing warnings
19
+ WARNED = auto() # deviated in a previous iteration
20
+
21
+
22
+ class _ExecOrderData:
23
+ """
24
+ This contains the data structures to track the execution order. We track
25
+ the pre-forward order on the *first* iteration for forward prefetching
26
+ (which thus assumes static graph) and the post-forward order on *every*
27
+ iteration for backward prefetching (which thus does not assume static
28
+ graph but may be provide an incorrect order).
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ debug_level: dist.DebugLevel,
34
+ backward_prefetch_limit: int,
35
+ forward_prefetch_limit: int,
36
+ ) -> None:
37
+ # Tracks the (static) pre-forward order for execution order validation
38
+ # and forward prefetching
39
+ self.handles_pre_forward_order: List[FlatParamHandle] = []
40
+ # Tracks the post-forward order for pre-backward prefetching
41
+ self.handles_post_forward_order: List[Optional[FlatParamHandle]] = []
42
+ self._iter = 0
43
+
44
+ # Gives the max number of backward/forward prefetched all-gathers by a
45
+ # single module
46
+ self._backward_prefetch_limit = backward_prefetch_limit
47
+ self._forward_prefetch_limit = forward_prefetch_limit
48
+
49
+ # Data structures for execution order validation
50
+ self._checking_order: bool = debug_level == dist.DebugLevel.DETAIL
51
+ self.process_group: Optional[dist.ProcessGroup] = None
52
+ self.world_size: Optional[int] = None
53
+ self.all_handles: List[FlatParamHandle] = []
54
+ # Names are prefixed from the root module
55
+ self.param_to_fqn: Dict[nn.Parameter, List[str]] = {}
56
+ # Current index in the pre-forward execution order
57
+ self.current_order_index = 0
58
+ self.warn_status = _ExecOrderWarnStatus.NONE
59
+
60
+ def init(
61
+ self,
62
+ state: _FSDPState,
63
+ root_module: nn.Module,
64
+ process_group: dist.ProcessGroup,
65
+ ) -> None:
66
+ """
67
+ Initializes the data structures needed for checking the forward order.
68
+ This should be called after a root FSDP instance has been set during
69
+ lazy initialization.
70
+ """
71
+ self.process_group = process_group
72
+ self.rank = process_group.rank()
73
+ self.world_size = process_group.size()
74
+ # Fix an order over the handles, which should be the same across ranks
75
+ for handle in traversal_utils._get_fsdp_handles(root_module):
76
+ index = len(self.all_handles)
77
+ self.all_handles.append(handle)
78
+ handle._handle_index = index
79
+ self.param_to_fqn = _get_param_to_fqns(root_module)
80
+ # TODO (awgu): We can broadcast the metadata of rank 0's `all_handles`
81
+ # to check that all ranks have the same handles in the same order.
82
+ # https://github.com/pytorch/pytorch/issues/79620
83
+
84
+ @property
85
+ def is_first_iter(self) -> bool:
86
+ return self._iter == 0
87
+
88
+ def get_handle_to_backward_prefetch(
89
+ self,
90
+ current_handle: FlatParamHandle,
91
+ ) -> Optional[FlatParamHandle]:
92
+ """
93
+ Returns a :class:`list` of the handles keys of the handles to backward
94
+ prefetch given the current handles key. If there are no valid handles
95
+ keys to prefetch, then this returns an empty :class:`list`.
96
+ """
97
+ current_index = current_handle._post_forward_index
98
+ if current_index is None:
99
+ return None
100
+ target_index = current_index - 1
101
+ target_handle: Optional[FlatParamHandle] = None
102
+ for _ in range(self._backward_prefetch_limit):
103
+ if target_index < 0:
104
+ break
105
+ target_handle = self.handles_post_forward_order[target_index]
106
+ target_index -= 1
107
+ return target_handle
108
+
109
+ def get_handle_to_forward_prefetch(
110
+ self,
111
+ current_handle: FlatParamHandle,
112
+ ) -> Optional[FlatParamHandle]:
113
+ """
114
+ Returns a :class:`list` of the handles keys of the handles to forward
115
+ prefetch given the current handles key. If there are no valid handles
116
+ keys to prefetch, then this returns an empty :class:`list`.
117
+ """
118
+ current_index = current_handle._pre_forward_order_index
119
+ if current_index is None:
120
+ return None
121
+ target_index = current_index + 1
122
+ target_handle: Optional[FlatParamHandle] = None
123
+ for _ in range(self._forward_prefetch_limit):
124
+ if target_index >= len(self.handles_pre_forward_order):
125
+ break
126
+ target_handle = self.handles_pre_forward_order[target_index]
127
+ target_index += 1
128
+ return target_handle
129
+
130
+ def record_post_forward(self, handle: Optional[FlatParamHandle]) -> None:
131
+ """
132
+ Records ``handles`` in the post-forward order, where ``handles`` should
133
+ be a group of handles used in the same module's forward. If ``handles``
134
+ is empty, then it is omitted.
135
+
136
+ Unlike :meth:`record_pre_forward`, this records the order *every*
137
+ iteration with the expectation that the recorded order is reset in
138
+ :meth:`next_iter`.
139
+ """
140
+ if not handle:
141
+ return
142
+ # Only record the first usage of a handles key
143
+ if handle._post_forward_index:
144
+ self.handles_post_forward_order.append(handle)
145
+ return
146
+ index = len(self.handles_post_forward_order)
147
+ handle._post_forward_index = index
148
+ self.handles_post_forward_order.append(handle)
149
+
150
+ def record_pre_forward(
151
+ self, handle: Optional[FlatParamHandle], is_training: bool
152
+ ) -> None:
153
+ """
154
+ Records ``handles`` in the pre-forward order, where ``handles`` should
155
+ be a group of handles used in the same module's forward. If ``handles``
156
+ is empty, then it is omitted.
157
+
158
+ On the first iteration, this checks the execution order across ranks.
159
+ See :meth:`_check_order` for details.
160
+ """
161
+ if not handle:
162
+ return
163
+ self._check_order(handle, is_training)
164
+ # Fix the order after the first iteration and only record the first
165
+ # usage of a handles key
166
+ if not self.is_first_iter or handle._pre_forward_order_index is not None:
167
+ return
168
+ index = len(self.handles_pre_forward_order)
169
+ handle._pre_forward_order_index = index
170
+ self.handles_pre_forward_order.append(handle)
171
+
172
+ def _check_order(self, handle: FlatParamHandle, is_training: bool) -> None:
173
+ """
174
+ Checks the forward execution order as long as ``is_training`` is
175
+ ``True`` since checking in eval mode is not supported. This only checks
176
+ if the distributed debug level is DETAIL.
177
+
178
+ - On the first iteration, this uses all-gathers to check that all ranks
179
+ are all-gathering the same handles and hence ``FlatParameter`` s,
180
+ raising an error if not.
181
+ - On subsequent iterations, this checks that each rank is locally
182
+ consistent with its own forward order from the first iteration, issuing
183
+ a warning if not. This issues a warning on the first deviating
184
+ iteration and stops warning thereafter.
185
+ """
186
+ # Do not check order in eval mode since the post-backward callback does
187
+ # not run so it cannot be used to mark the end of an iteration
188
+ if not is_training or not self._checking_order:
189
+ return
190
+ if self.is_first_iter:
191
+ msg_prefix = "Forward order differs across ranks:"
192
+ optional_local_indices: Tuple[
193
+ Optional[int], ...
194
+ ] = self._get_handle_indices(handle)
195
+ device = handle.device # guaranteed to be non-CPU
196
+ num_valid_indices = sum(
197
+ (index is not None) for index in optional_local_indices
198
+ )
199
+ tensor_kwargs: Dict[str, Union[torch.dtype, torch.device]] = {
200
+ "dtype": torch.int32,
201
+ "device": device,
202
+ }
203
+ world_num_valid_indices = torch.zeros(self.world_size, **tensor_kwargs) # type: ignore[arg-type, call-overload]
204
+ local_num_valid_indices = torch.tensor([num_valid_indices], **tensor_kwargs) # type: ignore[arg-type, call-overload]
205
+ dist.all_gather_into_tensor(
206
+ world_num_valid_indices,
207
+ local_num_valid_indices,
208
+ group=self.process_group,
209
+ )
210
+ # Copy entire tensor from D2H once to avoid per element D2H copies
211
+ world_num_valid_indices = world_num_valid_indices.cpu()
212
+ # Check that all ranks plan to all-gather the same number of
213
+ # parameters
214
+ # TODO (awgu): Since every module has at most one handle in the
215
+ # current implementation, this should never raise the error.
216
+ assert self.world_size is not None # mypy
217
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
218
+ # TODO(voz): Don't graph break on this - dynamo hates the n1 != n2
219
+ # tensor comparison control flow.
220
+ # https://github.com/pytorch/pytorch/issues/107055
221
+ for (r1, n1), (r2, n2) in itertools.combinations(
222
+ (
223
+ (rank, world_num_valid_indices[rank])
224
+ for rank in range(self.world_size)
225
+ ),
226
+ 2,
227
+ ):
228
+ if n1 != n2:
229
+ raise RuntimeError(
230
+ f"{msg_prefix} rank {r1} is all-gathering {n1} parameters "
231
+ f"while rank {r2} is all-gathering {n2} parameters"
232
+ )
233
+ world_indices = torch.zeros( # type: ignore[call-overload]
234
+ self.world_size * num_valid_indices, **tensor_kwargs
235
+ )
236
+ local_indices = torch.tensor(optional_local_indices, **tensor_kwargs) # type: ignore[arg-type]
237
+ dist.all_gather_into_tensor(
238
+ world_indices, local_indices, group=self.process_group
239
+ )
240
+ # Copy entire tensor from D2H once to avoid per element D2H copies
241
+ world_indices = world_indices.cpu()
242
+ # Check that all ranks plan to all-gather the same index parameters
243
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
244
+ # TODO(voz): Don't graph break on this - dynamo hates the i1 != i2
245
+ # tensor comparison control flow.
246
+ # https://github.com/pytorch/pytorch/issues/107055
247
+ for (r1, i1), (r2, i2) in itertools.combinations(
248
+ (
249
+ (
250
+ rank,
251
+ world_indices[
252
+ rank
253
+ * num_valid_indices : (rank + 1)
254
+ * num_valid_indices
255
+ ],
256
+ )
257
+ for rank in range(self.world_size)
258
+ ),
259
+ 2,
260
+ ):
261
+ if i1 != i2:
262
+ r1_param_names = self._get_names_from_handle_indices(i1)
263
+ r2_param_names = self._get_names_from_handle_indices(i2)
264
+ raise RuntimeError(
265
+ f"{msg_prefix} rank {r1} is all-gathering parameters "
266
+ f"for {r1_param_names} while rank {r2} is all-gathering "
267
+ f"parameters for {r2_param_names}"
268
+ )
269
+ else:
270
+ # Only issue warnings on the first deviating iteration and stop
271
+ # checking thereafter to avoid flooding the console
272
+ if self.warn_status == _ExecOrderWarnStatus.WARNED:
273
+ return
274
+ msg_prefix = None # non-`None` means we should warn
275
+ if self.current_order_index >= len(self.handles_pre_forward_order):
276
+ # This iteration sees extra all-gather(s) compared to the first
277
+ msg_prefix = (
278
+ "Expected to not all-gather any more parameters in the "
279
+ "forward but trying to all-gather parameters for "
280
+ )
281
+ else:
282
+ expected_handle = self.handles_pre_forward_order[
283
+ self.current_order_index
284
+ ]
285
+ if expected_handle != handle:
286
+ expected_param_names = self._get_names_from_handles(expected_handle)
287
+ msg_prefix = (
288
+ f"Expected to all-gather for {expected_param_names} "
289
+ "but trying to all-gather parameters for "
290
+ )
291
+ if msg_prefix is not None:
292
+ param_names = self._get_names_from_handles(handle)
293
+ msg_suffix = (
294
+ f"{param_names}"
295
+ if param_names
296
+ else "a newly-added parameter since construction time"
297
+ )
298
+ warnings.warn(
299
+ "Forward order differs from that of the first iteration "
300
+ f"on rank {self.rank}. Collectives are unchecked and may "
301
+ f"give incorrect results or hang.\n{msg_prefix}{msg_suffix}"
302
+ )
303
+ self.warn_status = _ExecOrderWarnStatus.WARNING
304
+ self.current_order_index += 1
305
+
306
+ def _get_handle_indices(
307
+ self,
308
+ handle: FlatParamHandle,
309
+ ) -> Tuple[Optional[int], ...]:
310
+ """
311
+ Returns the handle indices (i.e. indices into ``self.all_handles``)
312
+ corresponding to the handles in ``handle``. An entry in the
313
+ returned tuple is ``None`` if the handle is invalid.
314
+ """
315
+ indices: List[Optional[int]] = []
316
+ if handle:
317
+ indices.append(handle._handle_index)
318
+ return tuple(indices)
319
+
320
+ def _get_names_from_handle_indices(
321
+ self,
322
+ handle_indices: Tuple[int, ...],
323
+ ) -> List[List[str]]:
324
+ """
325
+ Returns a list of FQNs for each handle in ``handle_indices``. If a
326
+ handle index is invalid, then its FQNs are omitted from the returned
327
+ list.
328
+ """
329
+ fqns: List[List[str]] = []
330
+ for index in handle_indices:
331
+ if index is None or index < 0 or index >= len(self.all_handles):
332
+ continue
333
+ handle = self.all_handles[index]
334
+ flat_param = handle.flat_param
335
+ fqns.append(self.param_to_fqn[flat_param])
336
+ return fqns
337
+
338
+ def _get_names_from_handles(
339
+ self,
340
+ handle: FlatParamHandle,
341
+ ) -> List[List[str]]:
342
+ """
343
+ Returns a list of FQNs for each handle in ``handles_key``. If a handle
344
+ is invalid, then its FQNs are omitted from the returned list.
345
+ """
346
+ fqns: List[List[str]] = []
347
+ if handle:
348
+ flat_param = handle.flat_param
349
+ if flat_param in self.param_to_fqn:
350
+ fqns.append(self.param_to_fqn[flat_param])
351
+ return fqns
352
+
353
+ def next_iter(self):
354
+ """
355
+ Advances the internal data structures per iteration. This should be
356
+ called in the post-backward callback since that marks the true end of
357
+ an iteration.
358
+ """
359
+ self._iter += 1
360
+ self.handles_post_forward_order.clear()
361
+ if self._checking_order:
362
+ self.current_order_index = 0
363
+ if self.warn_status == _ExecOrderWarnStatus.WARNING:
364
+ self.warn_status = _ExecOrderWarnStatus.WARNED
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_fsdp_extensions.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, List, Optional, Tuple
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ from torch.distributed._shard.sharded_tensor.api import ShardedTensor
7
+ from torch.distributed._shard.sharded_tensor.shard import Shard
8
+ from torch.distributed._tensor import DeviceMesh, DTensor
9
+ from torch.distributed.fsdp._shard_utils import (
10
+ _all_gather_dtensor,
11
+ _create_chunk_dtensor,
12
+ _create_chunk_sharded_tensor,
13
+ )
14
+
15
+
16
+ class FSDPExtensions(ABC):
17
+ """
18
+ This enables some customizable hooks to enable composability with tensor
19
+ parallelism. To activate these hooks, use :func:`_set_fsdp_extensions` to
20
+ set a custom :class:`FSDPExtensions` that implements the hooks.
21
+ """
22
+
23
+ @abstractmethod
24
+ def pre_flatten_transform(
25
+ self,
26
+ tensor: torch.Tensor,
27
+ ) -> Tuple[torch.Tensor, Optional[Any]]:
28
+ """E.g. converting ``DistributedTensor`` to local tensor."""
29
+ ...
30
+
31
+ @abstractmethod
32
+ def post_unflatten_transform(
33
+ self,
34
+ tensor: torch.Tensor,
35
+ param_extension: Any,
36
+ ) -> torch.Tensor:
37
+ """E.g. converting local tensor to ``DistributedTensor``."""
38
+ ...
39
+
40
+ @abstractmethod
41
+ def chunk_tensor(
42
+ self,
43
+ tensor: torch.Tensor,
44
+ rank: int,
45
+ world_size: int,
46
+ num_devices_per_node: int,
47
+ pg: dist.ProcessGroup,
48
+ device: Optional[torch.device] = None,
49
+ ) -> torch.Tensor:
50
+ """Shards a tensor to chunks and returns the local chunk."""
51
+ ...
52
+
53
+ @abstractmethod
54
+ def chunk_dtensor(
55
+ self,
56
+ tensor: torch.Tensor,
57
+ rank: int,
58
+ device_mesh: DeviceMesh,
59
+ ) -> torch.Tensor:
60
+ """Shards a tensor/DTensor to DTensor and returns the local DTensor."""
61
+ ...
62
+
63
+ @abstractmethod
64
+ def pre_load_state_dict_transform(
65
+ self,
66
+ tensor: torch.Tensor,
67
+ ) -> Tuple[torch.Tensor, List[Shard]]:
68
+ """
69
+ This is to be called before loading a *sharded* model state dict and
70
+ should return the tensor and list of shards from which to load data.
71
+ """
72
+ ...
73
+
74
+ @abstractmethod
75
+ def all_gather_dtensor(
76
+ self,
77
+ tensor: DTensor,
78
+ parent_mesh: Optional[DeviceMesh],
79
+ ) -> torch.Tensor:
80
+ """
81
+ This is to be called before loading a *sharded* DTensor state dict.
82
+ This gathers tensor in FSDP dimension and returns local tensor of
83
+ TP DTensor.
84
+ """
85
+ ...
86
+
87
+
88
+ _extensions: Optional[FSDPExtensions] = None
89
+
90
+
91
+ def _set_fsdp_extensions(flattener: FSDPExtensions) -> None:
92
+ global _extensions
93
+ _extensions = flattener
94
+
95
+
96
+ def _ext_pre_flatten_transform(
97
+ tensor: torch.Tensor,
98
+ fsdp_extension: Optional[FSDPExtensions] = None,
99
+ ) -> Tuple[torch.Tensor, Optional[Any]]:
100
+ if fsdp_extension is not None:
101
+ new_tensor, param_extension = fsdp_extension.pre_flatten_transform(tensor)
102
+ if param_extension is not None:
103
+ return new_tensor, param_extension
104
+ return tensor, None
105
+
106
+
107
+ def _ext_post_unflatten_transform(
108
+ tensor: torch.Tensor,
109
+ param_extension: Any,
110
+ fsdp_extension: Optional[FSDPExtensions] = None,
111
+ ) -> torch.Tensor:
112
+ if fsdp_extension is not None and param_extension is not None:
113
+ return fsdp_extension.post_unflatten_transform(tensor, param_extension)
114
+ return tensor
115
+
116
+
117
+ def _ext_chunk_tensor(
118
+ tensor: torch.Tensor,
119
+ rank: int,
120
+ world_size: int,
121
+ num_devices_per_node: int,
122
+ pg: dist.ProcessGroup,
123
+ fsdp_extension: Optional[FSDPExtensions] = None,
124
+ ) -> torch.Tensor:
125
+ chunk_tensor_fn = (
126
+ fsdp_extension.chunk_tensor
127
+ if fsdp_extension is not None
128
+ else _create_chunk_sharded_tensor
129
+ )
130
+ return chunk_tensor_fn(
131
+ tensor,
132
+ rank,
133
+ world_size,
134
+ num_devices_per_node,
135
+ pg,
136
+ )
137
+
138
+
139
+ def _ext_chunk_dtensor(
140
+ tensor: torch.Tensor,
141
+ rank: int,
142
+ device_mesh: DeviceMesh,
143
+ fsdp_extension: Optional[FSDPExtensions] = None,
144
+ ) -> torch.Tensor:
145
+ chunk_dtensor_fn = (
146
+ fsdp_extension.chunk_dtensor
147
+ if fsdp_extension is not None
148
+ else _create_chunk_dtensor
149
+ )
150
+ return chunk_dtensor_fn(
151
+ tensor,
152
+ rank,
153
+ device_mesh,
154
+ )
155
+
156
+
157
+ def _ext_pre_load_state_dict_transform(
158
+ tensor: torch.Tensor,
159
+ fsdp_extension: Optional[FSDPExtensions] = None,
160
+ ) -> Tuple[torch.Tensor, List[Shard]]:
161
+ if fsdp_extension is not None:
162
+ return fsdp_extension.pre_load_state_dict_transform(tensor)
163
+
164
+ assert type(tensor) is ShardedTensor
165
+ shards = tensor.local_shards()
166
+ return (tensor, shards)
167
+
168
+
169
+ def _ext_all_gather_dtensor(
170
+ tensor: DTensor,
171
+ parent_mesh: Optional[DeviceMesh],
172
+ fsdp_extension: Optional[FSDPExtensions] = None,
173
+ ) -> torch.Tensor:
174
+ all_gather_dtensor_fn = (
175
+ fsdp_extension.all_gather_dtensor
176
+ if fsdp_extension is not None
177
+ else _all_gather_dtensor
178
+ )
179
+ return all_gather_dtensor_fn(tensor, parent_mesh)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import itertools
3
+ import os
4
+ import warnings
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ Deque,
9
+ Dict,
10
+ Generator,
11
+ Iterable,
12
+ Iterator,
13
+ List,
14
+ no_type_check,
15
+ Optional,
16
+ Set,
17
+ Tuple,
18
+ Union,
19
+ )
20
+
21
+ import torch
22
+ import torch.distributed as dist
23
+ import torch.distributed.fsdp._exec_order_utils as exec_order_utils
24
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
25
+ import torch.distributed.fsdp.fully_sharded_data_parallel as fsdp_file
26
+ import torch.nn as nn
27
+ from torch.distributed.algorithms._comm_hooks import default_hooks
28
+ from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
29
+ from torch.distributed.distributed_c10d import _get_default_group
30
+ from torch.distributed.fsdp._common_utils import (
31
+ _FSDPDeviceHandle,
32
+ _FSDPState,
33
+ _get_module_fsdp_state,
34
+ _is_fsdp_flattened,
35
+ _named_parameters_with_duplicates,
36
+ clean_tensor_name,
37
+ TrainingState,
38
+ )
39
+ from torch.distributed.fsdp._flat_param import (
40
+ _FSDP_USE_FULL_PREC_IN_EVAL,
41
+ FlatParameter,
42
+ FlatParamHandle,
43
+ HandleShardingStrategy,
44
+ )
45
+ from torch.distributed.fsdp._limiter_utils import _FreeEventQueue
46
+ from torch.distributed.fsdp.api import (
47
+ BackwardPrefetch,
48
+ CPUOffload,
49
+ FullOptimStateDictConfig,
50
+ FullStateDictConfig,
51
+ MixedPrecision,
52
+ ShardingStrategy,
53
+ StateDictConfig,
54
+ StateDictType,
55
+ )
56
+ from torch.distributed.fsdp.wrap import _Policy
57
+ from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
58
+ from torch.distributed.utils import _sync_params_and_buffers
59
+
60
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
61
+ from torch.utils.hooks import RemovableHandle
62
+
63
+ _TORCHDISTX_AVAIL = True
64
+ try:
65
+ from torchdistx import deferred_init, fake # type: ignore[import]
66
+ except ImportError:
67
+ _TORCHDISTX_AVAIL = False
68
+
69
+ PARAM_BROADCAST_BUCKET_SIZE = int(250 * 1024 * 1024)
70
+ FSDP_SYNCED = "_fsdp_synced"
71
+ # Specification of process groups for hybrid sharding strategies.
72
+ HybridShardProcessGroupType = Tuple[dist.ProcessGroup, dist.ProcessGroup]
73
+ # Overall specification of process group.
74
+ ProcessGroupType = Optional[Union[dist.ProcessGroup, HybridShardProcessGroupType]]
75
+
76
+
77
+ # TODO (awgu): Refactor this later
78
+ SHARDING_STRATEGY_MAP = {
79
+ ShardingStrategy.NO_SHARD: HandleShardingStrategy.NO_SHARD,
80
+ ShardingStrategy.FULL_SHARD: HandleShardingStrategy.FULL_SHARD,
81
+ ShardingStrategy.SHARD_GRAD_OP: HandleShardingStrategy.SHARD_GRAD_OP,
82
+ ShardingStrategy.HYBRID_SHARD: HandleShardingStrategy.HYBRID_SHARD,
83
+ ShardingStrategy._HYBRID_SHARD_ZERO2: HandleShardingStrategy._HYBRID_SHARD_ZERO2,
84
+ }
85
+ HYBRID_SHARDING_STRATEGIES = [
86
+ ShardingStrategy.HYBRID_SHARD,
87
+ ShardingStrategy._HYBRID_SHARD_ZERO2,
88
+ ]
89
+ NO_RESHARD_AFTER_FORWARD_STRATEGIES = (
90
+ ShardingStrategy.SHARD_GRAD_OP,
91
+ ShardingStrategy._HYBRID_SHARD_ZERO2,
92
+ )
93
+
94
+
95
+ # NOTE: Since non-self attributes cannot be type annotated, several attributes
96
+ # on `state` are defined first as local variables before being assigned.
97
+
98
+
99
+ @no_type_check
100
+ def _init_process_group_state(
101
+ state: _FSDPState,
102
+ process_group: ProcessGroupType,
103
+ sharding_strategy: ShardingStrategy,
104
+ policy: Optional[_Policy],
105
+ device_mesh: Optional[DeviceMesh] = None,
106
+ ) -> _FSDPState:
107
+ if process_group is not None and device_mesh is not None:
108
+ raise ValueError(
109
+ "Cannot pass both process_group and device_mesh at the "
110
+ "same time. Please just pass only one of them."
111
+ )
112
+ is_hybrid_strategy = sharding_strategy in HYBRID_SHARDING_STRATEGIES
113
+ if is_hybrid_strategy:
114
+ if process_group is None and policy is None and device_mesh is None:
115
+ # Raise an error here, since this is manual wrapping with no process group
116
+ # passed in, there is no way to ensure all wrapped FSDP instances use the same
117
+ # process groups.
118
+ raise ValueError(
119
+ f"Manual wrapping with {sharding_strategy}",
120
+ "requires explicit specification of process group or device_mesh.",
121
+ )
122
+ else:
123
+ state = _init_process_group_state_for_hybrid_shard(
124
+ state, process_group, device_mesh
125
+ )
126
+ else:
127
+ if device_mesh:
128
+ state._device_mesh = device_mesh
129
+ state.process_group = device_mesh.get_group(mesh_dim=0)
130
+ else:
131
+ state.process_group = (
132
+ process_group if process_group is not None else _get_default_group()
133
+ )
134
+
135
+ state.rank = state.process_group.rank()
136
+ state.world_size = state.process_group.size()
137
+ data_parallel_world_size = state.world_size
138
+ if is_hybrid_strategy:
139
+ data_parallel_world_size *= state._inter_node_pg.size()
140
+ state._gradient_predivide_factor = (
141
+ default_hooks.DefaultState._get_gradient_predivide_factor(
142
+ data_parallel_world_size
143
+ )
144
+ )
145
+ state._gradient_postdivide_factor = (
146
+ data_parallel_world_size / state._gradient_predivide_factor
147
+ )
148
+ return state
149
+
150
+
151
+ @no_type_check
152
+ def _init_process_group_state_for_hybrid_shard(
153
+ state: _FSDPState,
154
+ process_group: ProcessGroupType,
155
+ device_mesh: DeviceMesh,
156
+ ) -> _FSDPState:
157
+ if device_mesh:
158
+ if _is_valid_hybrid_shard_device_mesh(device_mesh):
159
+ state._device_mesh = device_mesh
160
+ # We currently only allow _inter_node_pg to be the outermost dimension, and the
161
+ # process_group(intra_node) to be the innermost dimension.
162
+ state._inter_node_pg = device_mesh.get_group(mesh_dim=0)
163
+ state.process_group = device_mesh.get_group(mesh_dim=1)
164
+ else:
165
+ raise ValueError(
166
+ "Expected device_mesh to have ndim=2 "
167
+ f"but got {len(device_mesh.get_group())}"
168
+ )
169
+ elif process_group is None:
170
+ default_group = _get_default_group()
171
+ intra_node_group, inter_node_group = _init_intra_and_inter_node_groups(
172
+ default_group, state._device_handle.device_count()
173
+ )
174
+ # we shard across intra-node
175
+ state.process_group = intra_node_group
176
+ # save _inter_node_pg to allreduce across.
177
+ state._inter_node_pg = inter_node_group
178
+ else:
179
+ # Check type and assign state.process_group and state._inter_node_pg.
180
+ if _is_valid_hybrid_shard_pg_type(process_group):
181
+ # Assuming that user passed in as intra node group and inter node group
182
+ # as documented.
183
+ state.process_group, state._inter_node_pg = process_group
184
+ else:
185
+ raise ValueError(
186
+ "Expected process_group to be passed in as either None or "
187
+ f"Tuple[dist.ProcessGroup, dist.ProcessGroup] but got {type(process_group)}"
188
+ )
189
+ # Create state for allreduce
190
+ state._inter_node_state = _get_default_comm_hook_state(
191
+ process_group=state._inter_node_pg,
192
+ )
193
+ return state
194
+
195
+
196
+ @no_type_check
197
+ def _is_valid_hybrid_shard_pg_type(process_group: Any) -> bool:
198
+ return (
199
+ isinstance(process_group, tuple)
200
+ and len(process_group) == 2
201
+ and all(isinstance(pg, dist.ProcessGroup) for pg in process_group)
202
+ )
203
+
204
+
205
+ @no_type_check
206
+ def _is_valid_hybrid_shard_device_mesh(device_mesh: DeviceMesh) -> bool:
207
+ return isinstance(device_mesh, DeviceMesh) and device_mesh.ndim == 2
208
+
209
+
210
+ @no_type_check
211
+ def _init_intra_node_process_group(num_devices_per_node: int) -> dist.ProcessGroup:
212
+ """
213
+ Return a process group across the current node.
214
+
215
+ For example, given each row is a distinct node:
216
+ 0 1 2 3 4 5 6 7 8
217
+ 9 10 11 12 13 14 15
218
+ This API would return an intra-node subgroup across
219
+ [0, 7] or [8, 15] depending on the process's rank.
220
+ For example, rank 3 would get [0, 7].
221
+ """
222
+ intra_node_subgroup, _ = dist.new_subgroups(num_devices_per_node)
223
+ return intra_node_subgroup
224
+
225
+
226
+ @no_type_check
227
+ def _init_inter_node_process_group(
228
+ global_process_group: dist.ProcessGroup,
229
+ num_devices_per_node: int,
230
+ ) -> dist.ProcessGroup:
231
+ """
232
+ Return an inter-node process group where each contained rank has the same local rank.
233
+
234
+ For example, given each row is a distinct node:
235
+ 0 1 2 3 4 5 6 7 8
236
+ 9 10 11 12 13 14 15
237
+ This API would return inter-node process group {0, 8}, {1, 9}, {2, 10}, and so forth
238
+ depending on the process's rank. For example, rank 1 would get {1, 9}, rank 5
239
+ would get {5, 13}.
240
+ """
241
+ # the inter-node pg that is returned
242
+ inter_node_pg = None
243
+ sharding_backend = dist.get_backend(global_process_group)
244
+ world_size = dist.get_world_size(global_process_group)
245
+ # Assuming fully homogeneous setup
246
+ num_nodes = world_size // num_devices_per_node
247
+ my_local_rank = dist.get_rank(global_process_group) % num_devices_per_node
248
+ for local_rank in range(num_devices_per_node):
249
+ ranks_for_inter_group = [
250
+ local_rank + (i * num_devices_per_node) for i in range(num_nodes)
251
+ ]
252
+ # every rank always needs to call dist.new_group
253
+ grp = dist.new_group(ranks=ranks_for_inter_group, backend=sharding_backend)
254
+ if local_rank == my_local_rank:
255
+ inter_node_pg = grp
256
+
257
+ assert (
258
+ inter_node_pg is not None
259
+ ), f"{my_local_rank} expected to assign inter-node pg, but did not"
260
+ return inter_node_pg
261
+
262
+
263
+ def _init_intra_and_inter_node_groups(
264
+ global_process_group: dist.ProcessGroup,
265
+ num_devices_per_node: int,
266
+ ) -> Tuple[dist.ProcessGroup, dist.ProcessGroup]:
267
+ """
268
+ Initialize intra and inter-node process groups and return the ones corresponding to this process's rank.
269
+
270
+ This function can be used to initialize process groups for ``HYBRID_SHARD`` or
271
+ ``_HYBRID_SHARD_ZERO2`` in FSDP.
272
+ This function assumes each node has an equal number of CUDA-enabled devices.
273
+ Returns:
274
+ Tuple[dist.ProcessGroup, dist.ProcessGroup]: Intra and inter-node process group.
275
+ """
276
+ return (
277
+ _init_intra_node_process_group(num_devices_per_node),
278
+ _init_inter_node_process_group(global_process_group, num_devices_per_node),
279
+ )
280
+
281
+
282
+ @no_type_check
283
+ def _init_ignored_module_states(
284
+ state: _FSDPState,
285
+ module: nn.Module,
286
+ ignored_modules: Optional[Iterable[torch.nn.Module]],
287
+ ignored_states: Union[
288
+ Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
289
+ ] = None,
290
+ ) -> _FSDPState:
291
+ if ignored_modules is not None and ignored_states is not None:
292
+ raise ValueError(
293
+ "Cannot pass both ignored_modules and ignored_states at the "
294
+ "same time. Please just pass ignored_states."
295
+ )
296
+ ignored_parameters = None
297
+ passed_as_ignored_states = ignored_states is not None
298
+ if passed_as_ignored_states:
299
+ ignored_states_list = list(ignored_states)
300
+ _check_ignored_states(ignored_states_list, True)
301
+ else:
302
+ ignored_states_list = []
303
+ _check_ignored_states(
304
+ list(ignored_modules) if ignored_modules is not None else [], False
305
+ )
306
+ if len(ignored_states_list) > 0:
307
+ if isinstance(ignored_states_list[0], nn.Parameter):
308
+ ignored_parameters = ignored_states_list
309
+ else:
310
+ ignored_modules = ignored_states_list
311
+ state._ignored_modules = _get_ignored_modules(module, ignored_modules)
312
+ state._ignored_params = _get_ignored_params(
313
+ module,
314
+ state._ignored_modules,
315
+ ignored_parameters,
316
+ )
317
+ state._ignored_buffer_names = _get_ignored_buffer_names(
318
+ module,
319
+ state._ignored_modules,
320
+ )
321
+ # TODO: FSDP's contract for buffers is not well-defined. They are
322
+ # implicitly ignored for most functionality since they are not sharded;
323
+ # however, FSDP still imposes some semantics on buffers (e.g. buffer mixed
324
+ # precision). We should formalize this contract and decide if we need to
325
+ # compute and store `_ignored_buffers`.
326
+ return state
327
+
328
+
329
+ def _check_ignored_states(
330
+ ignored_states: List[Any], passed_as_ignored_states: bool
331
+ ) -> None:
332
+ """
333
+ Check that the ignored states are uniformly parameters or uniformly modules.
334
+
335
+ We may remove this check in the future if we permit mixing.
336
+ """
337
+ if len(ignored_states) == 0:
338
+ return
339
+ if passed_as_ignored_states:
340
+ all_params = all(isinstance(state, nn.Parameter) for state in ignored_states)
341
+ all_modules = all(isinstance(state, nn.Module) for state in ignored_states)
342
+ if not all_params and not all_modules:
343
+ # Sort for consistent ordering for unit test regex matching
344
+ sorted_types = sorted({type(state) for state in ignored_states}, key=repr)
345
+ raise ValueError(
346
+ "ignored_states expects all nn.Parameter or all nn.Module list "
347
+ f"elements but got types {sorted_types}"
348
+ )
349
+ else:
350
+ if not all(isinstance(state, nn.Module) for state in ignored_states):
351
+ sorted_types = sorted({type(state) for state in ignored_states}, key=repr)
352
+ raise ValueError(
353
+ "ignored_modules expects nn.Module list elements but got "
354
+ f"types {sorted_types}"
355
+ )
356
+
357
+
358
+ @no_type_check
359
+ def _init_device_handle(
360
+ state: _FSDPState,
361
+ module: nn.Module,
362
+ ignored_params: Set[nn.Parameter],
363
+ device_id: Optional[Union[int, torch.device]],
364
+ ) -> _FSDPState:
365
+ """
366
+ Determine device handle used for initializing FSDP.
367
+
368
+ If a device is specified by ``device_id``,
369
+ then returns device handle corresponds to that device type. Otherwise, If the
370
+ module is already on a non-CPU device, then the device type is that non-CPU device type.
371
+ If the module is on CPU or meta, then the device type is the current cuda device.
372
+
373
+ This method will be called once ignored paramters was determined, as the device handle maybe needed
374
+ for other initialization.
375
+ """
376
+ determined_device = None
377
+ if device_id is not None:
378
+ determined_device = (
379
+ device_id
380
+ if isinstance(device_id, torch.device)
381
+ else torch.device(device_id)
382
+ )
383
+ if determined_device is None:
384
+ for param in _get_orig_params(module, ignored_params):
385
+ if param.device.type in {"cpu", "meta"}:
386
+ continue
387
+ if determined_device is None:
388
+ determined_device = param.device
389
+ else:
390
+ if param.device.type != determined_device.type:
391
+ raise RuntimeError(
392
+ f"FSDP does not support modules with different device types "
393
+ f"but got params on {determined_device.type} and {param.device.type}"
394
+ )
395
+ determined_device = determined_device or torch.device(
396
+ "cuda", torch.cuda.current_device()
397
+ )
398
+
399
+ state._device_handle = _FSDPDeviceHandle.from_device(determined_device)
400
+ return state
401
+
402
+
403
+ @no_type_check
404
+ def _init_buffer_state(
405
+ state: _FSDPState,
406
+ module: nn.Module,
407
+ ) -> _FSDPState:
408
+ state._buffer_names = _get_buffer_names(module)
409
+ # Save a mapping from clean fully-qualified buffer name (starting from
410
+ # `module`) to its original dtype for restoring that dtype during model
411
+ # checkpointing when buffer mixed precision is enabled. The names should
412
+ # be clean since the casting happens in a `summon_full_params()` context.
413
+ _buffer_name_to_orig_dtype: Dict[str, torch.dtype] = {}
414
+ for buffer_name, buffer in module.named_buffers():
415
+ buffer_name = clean_tensor_name(buffer_name)
416
+ _buffer_name_to_orig_dtype[buffer_name] = buffer.dtype
417
+ state._buffer_name_to_orig_dtype = _buffer_name_to_orig_dtype
418
+ return state
419
+
420
+
421
+ @no_type_check
422
+ def _init_core_state(
423
+ state: _FSDPState,
424
+ sharding_strategy: Optional[ShardingStrategy],
425
+ mixed_precision: Optional[MixedPrecision],
426
+ cpu_offload: Optional[CPUOffload],
427
+ limit_all_gathers: bool,
428
+ use_orig_params: bool,
429
+ backward_prefetch_limit: int,
430
+ forward_prefetch_limit: int,
431
+ ) -> _FSDPState:
432
+ # We clamp the strategy to `NO_SHARD` for world size of 1 since they are
433
+ # currently functionally equivalent. This may change if/when we integrate
434
+ # FSDP with MoE.
435
+ if state.world_size == 1:
436
+ if sharding_strategy != ShardingStrategy.NO_SHARD:
437
+ warnings.warn(
438
+ "FSDP is switching to use `NO_SHARD` instead of "
439
+ f"{sharding_strategy or ShardingStrategy.FULL_SHARD} since "
440
+ "the world size is 1."
441
+ )
442
+ sharding_strategy = ShardingStrategy.NO_SHARD
443
+ elif sharding_strategy == ShardingStrategy.NO_SHARD:
444
+ warnings.warn(
445
+ "The `NO_SHARD` sharding strategy is deprecated. If having issues, "
446
+ "please use DistributedDataParallel instead.",
447
+ # Level 1 is here, level 2 is from `FullyShardedDataParallel`, and
448
+ # level 3 is from the true caller
449
+ stacklevel=3,
450
+ )
451
+ state.sharding_strategy = sharding_strategy or ShardingStrategy.FULL_SHARD
452
+ state.mixed_precision = mixed_precision or MixedPrecision()
453
+ if mixed_precision is not None:
454
+ torch._C._log_api_usage_once(
455
+ f"torch.distributed.fsdp.mixed_precision.{str(state.mixed_precision)}"
456
+ )
457
+ state._use_full_prec_in_eval = (
458
+ os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1"
459
+ )
460
+ state.cpu_offload = cpu_offload or CPUOffload()
461
+ state.limit_all_gathers = limit_all_gathers
462
+ state._use_orig_params = use_orig_params
463
+ state.training_state = TrainingState.IDLE
464
+ state._is_root = None
465
+ state._free_event_queue = _FreeEventQueue()
466
+ state._debug_level = dist.get_debug_level()
467
+ state._exec_order_data = exec_order_utils._ExecOrderData(
468
+ state._debug_level,
469
+ backward_prefetch_limit,
470
+ forward_prefetch_limit,
471
+ )
472
+ # Mapping from fully sharded module to the handles it is responsible to
473
+ # unshard and reshard (see [Note: Fully Sharded Module])
474
+ _fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = dict()
475
+ state._fully_sharded_module_to_handle = _fully_sharded_module_to_handle
476
+ # Invariant: `state.params` contains exactly the `FlatParameter`s of the
477
+ # handles in `state._handle`
478
+ _handle: FlatParamHandle = None
479
+ state._handle = _handle
480
+ params: List[FlatParameter] = []
481
+ state.params = params
482
+ return state
483
+
484
+
485
+ @no_type_check
486
+ def _init_runtime_state(
487
+ state: _FSDPState,
488
+ ) -> _FSDPState:
489
+ _root_pre_forward_handles: List[RemovableHandle] = []
490
+ state._root_pre_forward_handles = _root_pre_forward_handles
491
+ _pre_forward_handles: List[RemovableHandle] = []
492
+ state._pre_forward_handles = _pre_forward_handles
493
+ _post_forward_handles: List[RemovableHandle] = []
494
+ state._post_forward_handles = _post_forward_handles
495
+ state._sync_gradients = True
496
+ state._comm_hook = None
497
+ state._comm_hook_state = None
498
+ # Used to prevent running the pre-backward hook multiple times
499
+ return state
500
+
501
+
502
+ @no_type_check
503
+ def _init_prefetching_state(
504
+ state: _FSDPState,
505
+ backward_prefetch: BackwardPrefetch,
506
+ forward_prefetch: bool,
507
+ ) -> _FSDPState:
508
+ state.backward_prefetch = backward_prefetch
509
+ state.forward_prefetch = forward_prefetch
510
+ # The data structures use tuples of handles to generalize over the case
511
+ # where a module's forward involves multiple handles.
512
+ return state
513
+
514
+
515
+ @no_type_check
516
+ def _init_extension(state: _FSDPState, device_mesh: DeviceMesh = None) -> _FSDPState:
517
+ # TODO: we need to add additional check once we support FSDP + PiPPy.
518
+ # This check is currently sufficient, since we only support FSDP + TP.
519
+ if device_mesh and _mesh_resources.get_parent_mesh(state._device_mesh) is not None:
520
+ state._fsdp_extension = DTensorExtensions(state._device_handle)
521
+ else:
522
+ # We need to explicilty set _fsdp_extension to None.
523
+ # Otherwise, we will run into an infinite recursion when getting the attribute.
524
+ state._fsdp_extension = None
525
+ return state
526
+
527
+
528
+ @no_type_check
529
+ def _init_state_dict_state(state: _FSDPState) -> _FSDPState:
530
+ state._state_dict_type = StateDictType.FULL_STATE_DICT
531
+ state_dict_config: StateDictConfig = FullStateDictConfig()
532
+ state._optim_state_dict_config = FullOptimStateDictConfig()
533
+ state._state_dict_config = state_dict_config
534
+ unshard_params_ctx: Dict[nn.Module, Generator] = {}
535
+ state._unshard_params_ctx = unshard_params_ctx
536
+
537
+ return state
538
+
539
+
540
+ @no_type_check
541
+ def _init_param_handle_from_module(
542
+ state: _FSDPState,
543
+ fully_sharded_module: nn.Module,
544
+ device_id: Optional[Union[int, torch.device]],
545
+ param_init_fn: Optional[Callable[[nn.Module], None]],
546
+ sync_module_states: bool,
547
+ ) -> _FSDPState:
548
+ """Initialize a ``FlatParamHandle`` from a module ``fully_sharded_module``."""
549
+ _check_single_device_module(fully_sharded_module, state._ignored_params, device_id)
550
+ device_from_device_id = _get_device_from_device_id(device_id, state.rank)
551
+ is_meta_module, is_torchdistX_deferred_init = _need_to_materialize_module(
552
+ fully_sharded_module, state._ignored_params, state._ignored_modules
553
+ )
554
+ # Materialize the module if needed
555
+ if (is_meta_module or is_torchdistX_deferred_init) and param_init_fn is not None:
556
+ _materialize_with_param_init_fn(
557
+ fully_sharded_module, param_init_fn, state._ignored_modules
558
+ )
559
+ elif is_meta_module:
560
+ _materialize_meta_module(
561
+ fully_sharded_module, device_id, state._ignored_modules
562
+ )
563
+ elif is_torchdistX_deferred_init:
564
+ deferred_init.materialize_module(
565
+ fully_sharded_module,
566
+ check_fn=lambda submodule: _get_module_fsdp_state(submodule) is None
567
+ and submodule not in state._ignored_modules,
568
+ )
569
+
570
+ ignored_buffers = {
571
+ buffer
572
+ for ignored_module in state._ignored_modules
573
+ for buffer in ignored_module.buffers()
574
+ }
575
+
576
+ _move_module_to_device(
577
+ fully_sharded_module,
578
+ state._ignored_params,
579
+ ignored_buffers,
580
+ device_from_device_id,
581
+ )
582
+ state.compute_device = _get_compute_device(
583
+ fully_sharded_module,
584
+ state._ignored_params,
585
+ device_from_device_id,
586
+ state.rank,
587
+ )
588
+
589
+ managed_params = list(_get_orig_params(fully_sharded_module, state._ignored_params))
590
+ if sync_module_states:
591
+ _sync_module_params_and_buffers(
592
+ fully_sharded_module, managed_params, state.process_group
593
+ )
594
+ if state.sharding_strategy in HYBRID_SHARDING_STRATEGIES:
595
+ _sync_module_params_and_buffers(
596
+ fully_sharded_module, managed_params, state._inter_node_pg
597
+ )
598
+ _init_param_handle_from_params(state, managed_params, fully_sharded_module)
599
+ return state
600
+
601
+
602
+ @no_type_check
603
+ def _init_param_handle_from_params(
604
+ state: _FSDPState,
605
+ params: List[nn.Parameter],
606
+ fully_sharded_module: nn.Module,
607
+ ):
608
+ if len(params) == 0:
609
+ return
610
+ handle = FlatParamHandle(
611
+ params,
612
+ fully_sharded_module,
613
+ state.compute_device,
614
+ SHARDING_STRATEGY_MAP[state.sharding_strategy],
615
+ state.cpu_offload.offload_params,
616
+ state.mixed_precision.param_dtype,
617
+ state.mixed_precision.reduce_dtype,
618
+ state.mixed_precision.keep_low_precision_grads,
619
+ state.process_group,
620
+ state._use_orig_params,
621
+ fsdp_extension=state._fsdp_extension,
622
+ )
623
+ handle.shard()
624
+ assert not state._handle
625
+ state.params.append(handle.flat_param)
626
+ state._handle = handle
627
+ state._fully_sharded_module_to_handle[handle._fully_sharded_module] = handle
628
+ cpu_device = torch.device("cpu")
629
+ if state.cpu_offload.offload_params and handle.flat_param.device != cpu_device:
630
+ handle.flat_param_to(cpu_device)
631
+
632
+
633
+ def _get_ignored_modules(
634
+ root_module: nn.Module,
635
+ _ignored_modules: Optional[Iterable[torch.nn.Module]],
636
+ ) -> Set[nn.Module]:
637
+ """
638
+ Check that ``_ignored_modules`` is an iterable of ``nn.Module`` s without any FSDP instances.
639
+
640
+ Return the modules contained in their module
641
+ subtrees as a :class:`set`. Nested FSDP instances are excluded, but their
642
+ already-computed ignored modules are included.
643
+
644
+ ``_ignored_modules`` represents the argument passed by the user to FSDP.
645
+ """
646
+ msg_prefix = "`ignored_modules` should be an iterable of `torch.nn.Module`s "
647
+ try:
648
+ ignored_root_modules = (
649
+ set(_ignored_modules) if _ignored_modules is not None else set()
650
+ )
651
+ except TypeError as e:
652
+ raise TypeError(msg_prefix + f"but got {type(_ignored_modules)}") from e
653
+ for module in ignored_root_modules:
654
+ if not isinstance(module, torch.nn.Module):
655
+ raise TypeError(msg_prefix + f"but got an iterable with {type(module)}")
656
+ if _get_module_fsdp_state(module):
657
+ # TODO: We may relax this by taking the FSDP instance's wrapped
658
+ # module to provide more flexibility to the user.
659
+ raise ValueError("`ignored_modules` should not include FSDP modules")
660
+ # Treat modules that cannot compose with `fully_shard` as ignored modules,
661
+ # meaning that their subtrees are ignored
662
+ for module in root_module.modules():
663
+ if not traversal_utils._composable(module):
664
+ ignored_root_modules.add(module)
665
+ # NOTE: Even if `ignored_root_modules` is empty, do not return early so
666
+ # that this FSDP instance can get any ignored modules from its children.
667
+
668
+ # Include child modules and exclude nested FSDP modules themselves
669
+ ignored_modules = {
670
+ child
671
+ for module in ignored_root_modules
672
+ for child in module.modules()
673
+ if not isinstance(child, fsdp_file.FullyShardedDataParallel)
674
+ }
675
+ if root_module in ignored_modules:
676
+ warnings.warn(
677
+ "Trying to ignore the top-level module passed into the FSDP "
678
+ "constructor itself will result in all parameters being "
679
+ f"ignored and is not well-supported: {module}"
680
+ )
681
+ # Include nested FSDP modules' ignored modules
682
+ for submodule in root_module.modules():
683
+ optional_fsdp_state = _get_module_fsdp_state(submodule)
684
+ if optional_fsdp_state is not None:
685
+ assert hasattr(optional_fsdp_state, "_ignored_modules")
686
+ ignored_modules.update(optional_fsdp_state._ignored_modules)
687
+ return ignored_modules
688
+
689
+
690
+ def _get_ignored_params(
691
+ root_module: torch.nn.Module,
692
+ ignored_modules: Set[torch.nn.Module],
693
+ ignored_parameters: Optional[Iterable[torch.nn.Parameter]] = None,
694
+ ) -> Set[torch.nn.Parameter]:
695
+ """
696
+ Return the parameters of the modules in ``ignored_modules`` and the parameters in ``ignored_parameters``.
697
+
698
+ :class:`FlatParameter` s are excluded from the result.
699
+ """
700
+ all_ignored_params: Set[torch.nn.Parameter] = set()
701
+
702
+ params_in_ignored_modules = {
703
+ p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p)
704
+ }
705
+
706
+ all_ignored_params.update(params_in_ignored_modules)
707
+
708
+ if ignored_parameters is not None:
709
+ params_in_ignored_parameters = {
710
+ p for p in ignored_parameters if not _is_fsdp_flattened(p)
711
+ }
712
+ all_ignored_params.update(params_in_ignored_parameters)
713
+
714
+ # Always include nested FSDP modules' ignored parameters
715
+ for submodule in root_module.modules():
716
+ optional_fsdp_state = _get_module_fsdp_state(submodule)
717
+ if optional_fsdp_state is not None:
718
+ assert hasattr(optional_fsdp_state, "_ignored_params")
719
+ all_ignored_params.update(optional_fsdp_state._ignored_params)
720
+
721
+ return all_ignored_params
722
+
723
+
724
+ def _get_ignored_buffer_names(
725
+ root_module: torch.nn.Module,
726
+ ignored_modules: Set[torch.nn.Module],
727
+ ) -> Set[str]:
728
+ """Return the cleaned buffer FQNs in ``ignored_modules``."""
729
+ all_ignored_buffer_names: Set[str] = set()
730
+
731
+ buffers_in_ignored_modules = {
732
+ buffer for m in ignored_modules for buffer in m.buffers()
733
+ }
734
+
735
+ all_ignored_buffer_names.update(
736
+ {
737
+ clean_tensor_name(buffer_name)
738
+ for buffer_name, buffer in root_module.named_buffers()
739
+ if buffer in buffers_in_ignored_modules
740
+ }
741
+ )
742
+
743
+ # Always include nested FSDP modules' ignored buffer names
744
+ for submodule in root_module.modules():
745
+ optional_fsdp_state = _get_module_fsdp_state(submodule)
746
+ if optional_fsdp_state is not None:
747
+ assert hasattr(optional_fsdp_state, "_ignored_buffer_names")
748
+ all_ignored_buffer_names.update(optional_fsdp_state._ignored_buffer_names)
749
+
750
+ return all_ignored_buffer_names
751
+
752
+
753
+ def _get_buffer_names(root_module: nn.Module) -> Set[str]:
754
+ """Return the fully prefixed names of all buffers in the module hierarchy rooted at ``root_module`` as a class:`set`."""
755
+ return {
756
+ clean_tensor_name(buffer_name) for buffer_name, _ in root_module.named_buffers()
757
+ }
758
+
759
+
760
+ def _check_single_device_module(
761
+ module: nn.Module,
762
+ ignored_params: Set[nn.Parameter],
763
+ device_id: Optional[Union[int, torch.device]],
764
+ ) -> None:
765
+ """
766
+ Raise an error if ``module`` has original parameters on multiple devices, ignoring the parameters in ``ignored_params``.
767
+
768
+ Thus, after this method, the
769
+ module must be either fully on the CPU or fully on a non-CPU device.
770
+ """
771
+ devices = {param.device for param in _get_orig_params(module, ignored_params)}
772
+ # We allow module to be partially on CPU and partially on GPU if device_id is not
773
+ # None, since the device_id arg will result in the CPU portion being moved to
774
+ # GPU. This is useful in cases where part of the module may be parallelized
775
+ # by another algorithm and may already be on GPU. We'd like to enforce device_id
776
+ # to not be None, otherwise we'd flatten parameters in a mixed module which is
777
+ # not supported.
778
+ if len(devices) == 2 and torch.device("cpu") in devices:
779
+ if device_id is None:
780
+ raise RuntimeError(
781
+ "To support a module with both CPU and GPU params, "
782
+ "please pass in device_id argument."
783
+ )
784
+ elif len(devices) > 1:
785
+ raise RuntimeError(
786
+ f"FSDP only supports single device modules but got params on {devices}"
787
+ )
788
+
789
+
790
+ def _get_device_from_device_id(
791
+ device_id: Optional[Union[int, torch.device]],
792
+ rank: int,
793
+ ) -> Optional[torch.device]:
794
+ """
795
+ Return a ``torch.device`` for the specified ``device_id``.
796
+
797
+ Processes ``device_id`` and returns either the corresponding device or
798
+ ``None`` if ``device_id`` is ``None``.
799
+ """
800
+ if device_id is None:
801
+ return None
802
+ device = (
803
+ device_id if isinstance(device_id, torch.device) else torch.device(device_id)
804
+ )
805
+ if device == torch.device("cuda"):
806
+ warnings.warn(
807
+ f"FSDP got the argument `device_id` {device_id} on rank "
808
+ f"{rank}, which does not have an explicit index. "
809
+ f"FSDP will use the current device {torch.cuda.current_device()}. "
810
+ "If this is incorrect, please explicitly call `torch.cuda.set_device()` "
811
+ "before FSDP initialization or pass in the explicit device "
812
+ "index as the `device_id` argument."
813
+ )
814
+ device = torch.device("cuda", torch.cuda.current_device())
815
+ return device
816
+
817
+
818
+ def _need_to_materialize_module(
819
+ module: nn.Module,
820
+ ignored_params: Set[nn.Parameter],
821
+ ignored_modules: Set[nn.Module],
822
+ ) -> Tuple[bool, bool]:
823
+ """
824
+ Return if ``module`` has parameters on meta device and if ``module`` is using torchdistX deferred initialization.
825
+
826
+ At most of the returned bools can
827
+ be ``True``. If either is ``True``, then ``module`` needs to be
828
+ materialized.
829
+ """
830
+ managed_params = list(_get_orig_params(module, ignored_params))
831
+ is_meta_module = any(param.is_meta for param in managed_params)
832
+ # TODO: We need to establish a contract for FSDP and buffers. For now, we
833
+ # skip checking for meta buffers from ignored modules. We should consider
834
+ # refactoring the initialization holistically to avoid so many traversals.
835
+ for submodule in module.modules():
836
+ if submodule in ignored_modules:
837
+ continue
838
+ for buf in submodule.buffers(recurse=False):
839
+ is_meta_module |= buf.is_meta
840
+ is_torchdistX_deferred_init = (
841
+ not is_meta_module
842
+ and _TORCHDISTX_AVAIL
843
+ and any(fake.is_fake(param) for param in managed_params)
844
+ )
845
+ return is_meta_module, is_torchdistX_deferred_init
846
+
847
+
848
+ def _materialize_with_param_init_fn(
849
+ root_module: nn.Module,
850
+ param_init_fn: Callable[[nn.Module], None],
851
+ ignored_modules: Set[nn.Module],
852
+ ) -> None:
853
+ if not callable(param_init_fn):
854
+ raise ValueError(
855
+ f"Expected {param_init_fn} to be callable but got {type(param_init_fn)}"
856
+ )
857
+ modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules)
858
+ for module in modules_to_materialize:
859
+ param_init_fn(module)
860
+
861
+
862
+ def _materialize_meta_module(
863
+ root_module: nn.Module,
864
+ device_from_device_id: Optional[torch.device],
865
+ ignored_modules: Set[nn.Module],
866
+ ):
867
+ # Run default meta device initialization
868
+ materialization_device = device_from_device_id or torch.device(
869
+ torch.cuda.current_device()
870
+ )
871
+ modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules)
872
+ try:
873
+ # Assume that each module's `reset_parameters()` only initializes its
874
+ # own parameters and not those of its children
875
+ with torch.no_grad():
876
+ for module in modules_to_materialize:
877
+ # As a contract to the user, only call `reset_parameters()` if
878
+ # the module has directly managed parameters/buffers
879
+ module_state_iter = itertools.chain(
880
+ module.parameters(recurse=False), module.buffers(recurse=False)
881
+ )
882
+ has_module_states = len(list(module_state_iter)) > 0
883
+ if has_module_states:
884
+ module.to_empty(device=materialization_device, recurse=False)
885
+ module.reset_parameters() # type: ignore[operator]
886
+ except BaseException as e:
887
+ warnings.warn(
888
+ "Unable to call `reset_parameters()` for module on meta "
889
+ f"device with error {str(e)}. Please ensure that your module of"
890
+ f"type {type(module)} implements a `reset_parameters()` method." # type: ignore[possibly-undefined]
891
+ )
892
+ raise e
893
+
894
+
895
+ def _get_modules_to_materialize(
896
+ root_module: nn.Module, ignored_modules: Set[nn.Module]
897
+ ) -> List[nn.Module]:
898
+ # Run BFS to collect the modules to materialize via `reset_parameters()`,
899
+ # stopping at any module with FSDP already applied or at ignored modules.
900
+ modules_to_materialize: List[nn.Module] = []
901
+ queue = collections.deque([root_module])
902
+ visited_modules: Set[nn.Module] = {root_module}
903
+ while queue:
904
+ module = queue.popleft()
905
+ modules_to_materialize.append(module)
906
+ for child_module in module.children():
907
+ if (
908
+ child_module not in visited_modules
909
+ and _get_module_fsdp_state(child_module) is None
910
+ and child_module not in ignored_modules
911
+ ):
912
+ visited_modules.add(child_module)
913
+ queue.append(child_module)
914
+ return modules_to_materialize
915
+
916
+
917
+ def _move_module_to_device(
918
+ module: nn.Module,
919
+ ignored_params: Set[nn.Parameter],
920
+ ignored_buffers: Set[torch.Tensor],
921
+ device_from_device_id: Optional[torch.device],
922
+ ) -> None:
923
+ """
924
+ Move ``module`` depending on ``device_from_device_id`` and its current device.
925
+
926
+ This includes moving ignored modules' parameters.
927
+
928
+ - If ``device_from_device_id`` is not ``None``, then this moves
929
+ ``module`` to the device.
930
+ - If ``device_from_device_id`` is ``None``, then this does not move
931
+ ``module`` but warns the user if it is on CPU.
932
+
933
+ Precondition: ``_check_single_device_module()``.
934
+ """
935
+ cpu_device = torch.device("cpu")
936
+ if device_from_device_id is not None:
937
+ # BFS from `module` without traversing any nested FSDP instances to
938
+ # collect the parameters/buffers that have not yet been managed
939
+ queue: Deque[nn.Module] = collections.deque()
940
+ queue.append(module)
941
+ params: List[nn.Parameter] = []
942
+ buffers: List[torch.Tensor] = []
943
+ while queue:
944
+ curr_module = queue.popleft()
945
+ # NOTE: We include a check to only move parameters/buffers that are
946
+ # on CPU device. If they are on a CUDA device different from the
947
+ # one specified by `device_id`, then this does NOT move them. This
948
+ # is so that we can raise an error in `_get_compute_device()`.
949
+ params.extend(
950
+ param
951
+ for param in curr_module.parameters(recurse=False)
952
+ if param.device == cpu_device
953
+ )
954
+ buffers.extend(
955
+ buffer
956
+ for buffer in curr_module.buffers(recurse=False)
957
+ if buffer.device == cpu_device
958
+ )
959
+ for submodule in curr_module.children():
960
+ if not isinstance(submodule, fsdp_file.FullyShardedDataParallel):
961
+ queue.append(submodule)
962
+ params_to_move = [p for p in params if p not in ignored_params]
963
+ bufs_to_move = [p for p in buffers if p not in ignored_buffers]
964
+ _move_states_to_device(params_to_move, bufs_to_move, device_from_device_id)
965
+ return
966
+ param = next(_get_orig_params(module, ignored_params), None)
967
+ if param is not None and param.device == cpu_device:
968
+ _warn_cpu_init()
969
+
970
+
971
+ def _move_states_to_device(
972
+ params: List[nn.Parameter],
973
+ buffers: List[torch.Tensor],
974
+ device_from_device_id: Optional[torch.device],
975
+ ) -> None:
976
+ """
977
+ Move states to the specified device.
978
+
979
+ Precondition: ``_check_single_device_module()`` and module's parameters and
980
+ buffers have been materialized if needed.
981
+ """
982
+ if len(params) == 0 and len(buffers) == 0:
983
+ return
984
+ if len(params) > 0:
985
+ current_device = params[0].device
986
+ elif len(buffers) > 0:
987
+ current_device = buffers[0].device
988
+ cpu_device = torch.device("cpu")
989
+ if device_from_device_id is not None:
990
+ # Move the parameters and buffers like the `.data` code path in
991
+ # `nn.Module._apply()`, which underlies `nn.Module.to()`
992
+ for param in params:
993
+ with torch.no_grad():
994
+ param.data = param.to(device_from_device_id)
995
+ if param.grad is not None:
996
+ param.grad.data = param.grad.to(device_from_device_id)
997
+ for buffer in buffers:
998
+ buffer.data = buffer.to(device_from_device_id)
999
+ elif current_device == cpu_device: # type: ignore[possibly-undefined]
1000
+ _warn_cpu_init()
1001
+
1002
+
1003
+ def _warn_cpu_init():
1004
+ warnings.warn(
1005
+ "The passed-in `module` is on CPU and will thus have FSDP's sharding "
1006
+ "initialization run on CPU, which may be slower than on GPU. We "
1007
+ "recommend passing in the `device_id` argument for FSDP to move "
1008
+ "`module` to GPU for the sharding initialization. `module` must also "
1009
+ "be on GPU device to work with the `sync_module_states=True` flag "
1010
+ "since that requires GPU communication."
1011
+ )
1012
+
1013
+
1014
+ def _get_compute_device(
1015
+ module: nn.Module,
1016
+ ignored_params: Set[nn.Parameter],
1017
+ device_from_device_id: Optional[torch.device],
1018
+ rank: int,
1019
+ ) -> torch.device:
1020
+ """
1021
+ Determine and return this FSDP instance's compute device.
1022
+
1023
+ If a device is
1024
+ specified by ``device_id``, then returns that device. Otherwise, If the
1025
+ module is already on a non-CPU device, then the compute device is that non-CPU
1026
+ device. If the module is on CPU, then the compute device is the current
1027
+ device.
1028
+
1029
+ Since this method should be called after materializing the module, any
1030
+ non-CPU device should not be meta device. For now, the compute device is
1031
+ always a CUDA GPU device with its explicit index.
1032
+
1033
+ Precondition: ``_check_single_device_module()`` and
1034
+ ``_move_module_to_device()``.
1035
+ """
1036
+ param = next(_get_orig_params(module, ignored_params), None)
1037
+ if param is not None and param.device.type != "cpu":
1038
+ compute_device = param.device # Determined by model param placement
1039
+ else:
1040
+ if device_from_device_id is not None and device_from_device_id.type != "cuda":
1041
+ compute_device = device_from_device_id # Determined by custom backend
1042
+ else:
1043
+ compute_device = torch.device("cuda", torch.cuda.current_device())
1044
+ if device_from_device_id is not None and compute_device != device_from_device_id:
1045
+ raise ValueError(
1046
+ f"Inconsistent compute device and `device_id` on rank {rank}: "
1047
+ f"{compute_device} vs {device_from_device_id}"
1048
+ )
1049
+ return compute_device
1050
+
1051
+
1052
+ # TODO: See how to deprecate!
1053
+ def _sync_module_params_and_buffers(
1054
+ module: nn.Module,
1055
+ params: List[nn.Parameter],
1056
+ process_group: dist.ProcessGroup,
1057
+ ) -> None:
1058
+ """
1059
+ Synchronize module states (i.e. parameters ``params`` and all not-yet-synced buffers) by broadcasting from rank 0 to all ranks.
1060
+
1061
+ Precondition: ``sync_module_states == True`` and ``self.process_group`` has
1062
+ been set.
1063
+ """
1064
+ module_states: List[torch.Tensor] = []
1065
+ for buffer in module.buffers():
1066
+ # Avoid re-synchronizing buffers in case of nested wrapping
1067
+ if not getattr(buffer, FSDP_SYNCED, False):
1068
+ setattr(buffer, FSDP_SYNCED, True)
1069
+ detached_buffer = buffer.detach()
1070
+ if is_traceable_wrapper_subclass(detached_buffer):
1071
+ # NOTE: Here we assume no nested subclasses, at most one level of subclass
1072
+ # in both model's buffers and params
1073
+ attrs, _ = detached_buffer.__tensor_flatten__() # type: ignore[attr-defined]
1074
+ inner_buffers = [getattr(detached_buffer, attr) for attr in attrs]
1075
+ module_states.extend(inner_buffers)
1076
+ else:
1077
+ module_states.append(detached_buffer)
1078
+
1079
+ for param in params:
1080
+ detached_param = param.detach()
1081
+ if is_traceable_wrapper_subclass(detached_param):
1082
+ attrs, _ = detached_param.__tensor_flatten__() # type: ignore[attr-defined]
1083
+ inner_params = [getattr(detached_param, attr) for attr in attrs]
1084
+ module_states.extend(inner_params)
1085
+ else:
1086
+ module_states.append(detached_param)
1087
+
1088
+ _check_module_states_for_sync_module_states(module_states)
1089
+ _sync_params_and_buffers(
1090
+ process_group,
1091
+ module_states,
1092
+ PARAM_BROADCAST_BUCKET_SIZE,
1093
+ src=0,
1094
+ )
1095
+
1096
+
1097
+ def _sync_module_states(
1098
+ params: List[nn.Parameter],
1099
+ buffers: List[torch.Tensor],
1100
+ process_group: dist.ProcessGroup,
1101
+ ) -> None:
1102
+ # Assumes that each call to this method passes in disjoint `params` and
1103
+ # and `buffers` across calls, so there is no chance of re-synchronizing
1104
+ params_and_buffers = [param.detach() for param in params] + [
1105
+ buffer.detach() for buffer in buffers
1106
+ ]
1107
+ _check_module_states_for_sync_module_states(params_and_buffers)
1108
+ _sync_params_and_buffers(
1109
+ process_group,
1110
+ params_and_buffers,
1111
+ PARAM_BROADCAST_BUCKET_SIZE,
1112
+ src=0,
1113
+ )
1114
+
1115
+
1116
+ def _check_module_states_for_sync_module_states(
1117
+ module_states: List[torch.Tensor],
1118
+ ) -> None:
1119
+ if module_states and any(
1120
+ tensor.device == torch.device("cpu") for tensor in module_states
1121
+ ):
1122
+ raise ValueError(
1123
+ "The module has CPU parameters or buffers when `sync_module_states=True`, "
1124
+ "which requires them to be on GPU. Please specify the `device_id` argument "
1125
+ "or move the module to GPU before passing it to FSDP."
1126
+ )
1127
+
1128
+
1129
+ def _get_orig_params(
1130
+ module: nn.Module,
1131
+ ignored_params: Set[nn.Parameter],
1132
+ ) -> Iterator[nn.Parameter]:
1133
+ """
1134
+ Return an iterator over the original parameters in ``module``.
1135
+
1136
+ The iterator does not return
1137
+ the parameters in ``ignored_params``, any ``FlatParameter`` s (which may be
1138
+ present due to nested FSDP wrapping), or any original parameters already
1139
+ flattened (only relevant when ``use_orig_params=True``).
1140
+ """
1141
+ param_gen = module.parameters()
1142
+ try:
1143
+ while True:
1144
+ param = next(param_gen)
1145
+ if param not in ignored_params and not _is_fsdp_flattened(param):
1146
+ yield param
1147
+ except StopIteration:
1148
+ pass
1149
+
1150
+
1151
+ def _check_orig_params_flattened(
1152
+ fsdp_module,
1153
+ ignored_params: Set[nn.Parameter],
1154
+ ) -> None:
1155
+ """
1156
+ Check that original parameters in ``fsdp_module`` have been flattened.
1157
+
1158
+ The flattened parameters are made
1159
+ invisible to ``named_parameters()`` for the module hierarchy rooted at
1160
+ ``fsdp_module``. This should be called as a sanity check after flattening
1161
+ the wrapped module's parameters.
1162
+ """
1163
+ for param_name, param in _named_parameters_with_duplicates(fsdp_module):
1164
+ if param not in ignored_params and not _is_fsdp_flattened(param):
1165
+ raise RuntimeError(
1166
+ f"Found an unflattened parameter: {param_name}; "
1167
+ f"{param.size()} {param.__class__}"
1168
+ )
1169
+
1170
+
1171
+ def _get_default_comm_hook(sharding_strategy: ShardingStrategy):
1172
+ return (
1173
+ default_hooks.allreduce_hook
1174
+ if sharding_strategy == ShardingStrategy.NO_SHARD
1175
+ else default_hooks.reduce_scatter_hook
1176
+ )
1177
+
1178
+
1179
+ def _get_default_comm_hook_state(
1180
+ process_group: dist.ProcessGroup,
1181
+ ) -> default_hooks.DefaultState:
1182
+ return default_hooks.DefaultState(process_group=process_group)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_limiter_utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from typing import Deque, Optional
3
+
4
+ import torch
5
+
6
+
7
+ class _FreeEventQueue:
8
+ """
9
+ This tracks all pending frees corresponding to inflight all-gathers. The
10
+ queueing pattern is iterative enqueues with a single dequeue per iteration
11
+ once the limit ``_max_num_inflight_all_gathers`` is reached.
12
+ """
13
+
14
+ def __init__(self) -> None:
15
+ self._queue: Deque[torch.cuda.Event] = collections.deque()
16
+ self._max_num_inflight_all_gathers = 2 # empirically chosen
17
+
18
+ def enqueue(self, free_event: torch.cuda.Event) -> None:
19
+ """Enqueues a free event."""
20
+ self._queue.append(free_event)
21
+
22
+ def dequeue_if_needed(self) -> Optional[torch.cuda.Event]:
23
+ """Dequeues a single event if the limit is reached."""
24
+ if len(self._queue) >= self._max_num_inflight_all_gathers:
25
+ return self._dequeue()
26
+ return None
27
+
28
+ def _dequeue(self) -> Optional[torch.cuda.Event]:
29
+ """Dequeues a free event if possible."""
30
+ if self._queue:
31
+ event = self._queue.popleft()
32
+ return event
33
+ return None
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py ADDED
@@ -0,0 +1,2086 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import logging
4
+ import warnings
5
+ from contextlib import ExitStack
6
+ from dataclasses import dataclass, field
7
+ from typing import (
8
+ Any,
9
+ cast,
10
+ Dict,
11
+ Iterable,
12
+ Iterator,
13
+ List,
14
+ NamedTuple,
15
+ no_type_check,
16
+ Optional,
17
+ Sequence,
18
+ Set,
19
+ Tuple,
20
+ Union,
21
+ )
22
+
23
+ import torch
24
+ import torch.distributed as dist
25
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
26
+ import torch.nn as nn
27
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
28
+ from torch.distributed._state_dict_utils import _gather_state_dict
29
+ from torch.distributed._tensor import DTensor, Replicate
30
+ from torch.distributed.distributed_c10d import _get_pg_default_device
31
+ from torch.distributed.fsdp._common_utils import (
32
+ _apply_to_modules,
33
+ _FSDPState,
34
+ _get_module_fsdp_state_if_fully_sharded_module,
35
+ _get_param_to_fqns,
36
+ _module_handle,
37
+ _named_parameters_with_duplicates,
38
+ clean_tensor_name,
39
+ )
40
+ from torch.distributed.fsdp._debug_utils import SimpleProfiler
41
+ from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle
42
+ from torch.distributed.fsdp._fsdp_extensions import (
43
+ _ext_chunk_dtensor,
44
+ _ext_chunk_tensor,
45
+ )
46
+ from torch.distributed.fsdp._runtime_utils import (
47
+ _lazy_init,
48
+ _reset_flat_param_grad_info_if_needed,
49
+ )
50
+ from torch.distributed.fsdp.api import (
51
+ ShardingStrategy,
52
+ StateDictSettings,
53
+ StateDictType,
54
+ )
55
+ from torch.utils._pytree import tree_map_only
56
+
57
+
58
+ logger = logging.getLogger(__name__)
59
+
60
+
61
+ @dataclass
62
+ class FSDPParamInfo:
63
+ state: _FSDPState
64
+ handle: FlatParamHandle
65
+ param_indices: Dict[str, int]
66
+ param_requires_grad: List[bool]
67
+
68
+
69
+ def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]:
70
+ keys = sorted(dictionary.keys())
71
+ for k in keys:
72
+ yield k, dictionary[k]
73
+
74
+
75
+ @dataclass
76
+ class _ConsolidatedOptimState:
77
+ """
78
+ This holds the consolidated optimizer state on the target rank. Positive-
79
+ dimension tensor state is communicated across ranks, while zero-dimension
80
+ tensor state and non-tensor state is taken directly from the target rank.
81
+
82
+ PyTorch version 1.12 moved to using zero-dimension tensors for scalar
83
+ values, but user implemented optimizers may still use float (i.e. a
84
+ non-tensor). Thus, we support both and handle them identically.
85
+
86
+ Attributes:
87
+ tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
88
+ tensor state name to the unsharded flat tensor representing the
89
+ state.
90
+ zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
91
+ dimension tensor state name to its value.
92
+ non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
93
+ name to its value.
94
+ """
95
+
96
+ tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
97
+ zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
98
+ non_tensor_state: Dict[str, Any] = field(default_factory=dict)
99
+
100
+
101
+ class _PosDimTensorInfo(NamedTuple):
102
+ """
103
+ Meatadata for positive-dimension tensors used internally for
104
+ :meth:`scatter_full_optim_state_dict`.
105
+
106
+ Attributes:
107
+ shape (torch.Size): Sharded tensor shape (which is equal to the
108
+ unsharded tensor shape if the tensor is optimizer state for a
109
+ non-FSDP parameter and is hence not sharded).
110
+ dtype (torch.dtype): Data type of the tensor.
111
+ """
112
+
113
+ shape: torch.Size
114
+ dtype: torch.dtype
115
+
116
+
117
+ class _OptimStateKey(NamedTuple):
118
+ """
119
+ This represents an optimizer state key that may be used commonly across
120
+ ranks. It is based on the unflattened parameter names rather than parameter
121
+ IDs to make it independent of each rank's own optimizer construction.
122
+ """
123
+
124
+ unflat_param_names: Tuple[str, ...]
125
+ is_fsdp_managed: bool
126
+
127
+
128
+ def _unflatten_optim_state(
129
+ fsdp_param_info: FSDPParamInfo,
130
+ flat_param_state: Dict[str, Any],
131
+ to_save: bool,
132
+ shard_state: bool,
133
+ cpu_offload: bool,
134
+ ) -> List[Dict[str, Any]]:
135
+ """
136
+ Unflattens the optimizer state, consisting of the "state" part and the
137
+ "param_groups" part. Unflattening the "state" part involves consolidating
138
+ the state on the target rank and remapping from flattened to unflattened
139
+ parameter IDs, and the "param_groups" part only involves remapping from
140
+ flattened to unflattened parameter IDs.
141
+
142
+ Args:
143
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
144
+ mapping from FQN to original parameter index.
145
+ flat_param_state (Dict[str, Any]): Entry for the flat parameter in the
146
+ "state" part of the optimizer state dict.
147
+ to_save (bool): Whether to save the state on this rank.
148
+
149
+ Returns:
150
+ List[Dict[str, Any]]: A :class:`list` holding the entries in the
151
+ "state" part of the optimizer state dict corresponding to the
152
+ unflattened parameters comprising the flat parameter if on the target
153
+ rank or an empty :class:`list` otherwise. The final optimizer state
154
+ dict will need to map these entries using the proper unflattened
155
+ parameter IDs.
156
+ """
157
+ assert (
158
+ not shard_state or to_save
159
+ ), "If ``shard_state`` is True, ``to_save`` has to be True."
160
+ consolidated_state = _communicate_optim_state(
161
+ fsdp_param_info,
162
+ flat_param_state,
163
+ )
164
+ if to_save:
165
+ unflat_param_state = _unflatten_communicated_optim_state(
166
+ fsdp_param_info,
167
+ consolidated_state,
168
+ shard_state,
169
+ )
170
+ for optim_state in unflat_param_state:
171
+ # We can't use .items() below cuz we'd run into a concurrent modification error
172
+ if cpu_offload:
173
+ for key in list(optim_state.keys()):
174
+ state = optim_state[key]
175
+ if not isinstance(state, torch.Tensor):
176
+ continue
177
+ optim_state[key] = state.cpu()
178
+ return unflat_param_state
179
+ else:
180
+ return []
181
+
182
+
183
+ def _is_zero_dim_tensor(x: Any) -> bool:
184
+ return torch.is_tensor(x) and x.dim() == 0
185
+
186
+
187
+ def _communicate_optim_state(
188
+ fsdp_param_info: FSDPParamInfo,
189
+ flat_param_state: Dict[str, Any],
190
+ ) -> _ConsolidatedOptimState:
191
+ """
192
+ Communicates the optimizer state for a flat parameter across ranks. All
193
+ ranks will hold the entire non-sharded optimizer state on GPU.
194
+
195
+ If ``N`` is the number of tensor optimizer states in the optimizer state
196
+ dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
197
+ otherwise (where the plus 1 comes from all-gathering the padding per rank).
198
+
199
+ Args:
200
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
201
+ mapping from FQN to original parameter index.
202
+ flat_param_state (Dict[str, Any]): The entry in the "state" part of the
203
+ optimizer state dict corresponding to the flat parameter.
204
+
205
+ Returns:
206
+ ConsolidatedOptimState: Consolidated optimizer state for the target
207
+ flat parameter.
208
+ """
209
+ fsdp_state = fsdp_param_info.state
210
+ flat_param = fsdp_param_info.handle.flat_param
211
+ state = _ConsolidatedOptimState()
212
+ tensor_state, zero_dim_tensor_state, non_tensor_state = (
213
+ state.tensor_state,
214
+ state.zero_dim_tensor_state,
215
+ state.non_tensor_state,
216
+ )
217
+
218
+ for state_name, value in sorted_items(flat_param_state):
219
+ # Positive-dimension tensor state: communicate across ranks
220
+ if torch.is_tensor(value) and value.dim() > 0:
221
+ # If the parameter is not sharded, then neither is the
222
+ # positive-dimension tensor state, so no need to communicate it --
223
+ # we take the target rank's value
224
+ if (
225
+ fsdp_state.world_size == 1
226
+ or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
227
+ ):
228
+ tensor_state[state_name] = value
229
+ continue
230
+ assert (
231
+ fsdp_state.compute_device is not None
232
+ ), "compute_device has not been initialized"
233
+ if value.device.type != fsdp_state.compute_device.type:
234
+ value = value.to(fsdp_state.compute_device)
235
+ # Assume that positive-dimension tensor optimizer state
236
+ # has the same shape as the sharded flat parameter
237
+ buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
238
+ tensor_buffer = value.new_zeros(*buffer_size)
239
+ dist.all_gather_into_tensor(
240
+ tensor_buffer, value, group=fsdp_state.process_group
241
+ )
242
+ fsdp_state._device_handle.synchronize()
243
+ unpadded_numel = cast(
244
+ nn.Parameter, flat_param._unpadded_unsharded_size
245
+ ).numel()
246
+ tensor_state[state_name] = tensor_buffer[:unpadded_numel]
247
+ # Zero-dimension tensor state and non-tensor state: take this rank's
248
+ # value directly
249
+ else:
250
+ if _is_zero_dim_tensor(value):
251
+ zero_dim_tensor_state[state_name] = value.detach().clone()
252
+ else:
253
+ non_tensor_state[state_name] = value
254
+ return state
255
+
256
+
257
+ def _unflatten_communicated_optim_state(
258
+ fsdp_param_info: FSDPParamInfo,
259
+ state: _ConsolidatedOptimState,
260
+ shard_state: bool,
261
+ ) -> List[Dict[str, Any]]:
262
+ """
263
+ Unflattens the communicated optimizer state (given by ``tensor_state``,
264
+ ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat
265
+ parameter. This should only be called on the target rank.
266
+
267
+ Args:
268
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
269
+ mapping from FQN to original parameter index.
270
+ state (_ConsolidatedOptimState): Consolidated optimizer state.
271
+
272
+ Returns:
273
+ List[Dict[str, Any]]: A :class:`list` holding the entries in the
274
+ "state" part of the optimizer state dict corresponding to the
275
+ unflattened parameters comprising the flat parameter. The final
276
+ optimizer state dict will need to map these entries using the proper
277
+ unflattened parameter IDs.
278
+ """
279
+ fsdp_state = fsdp_param_info.state
280
+ handle = fsdp_param_info.handle
281
+ flat_param = handle.flat_param
282
+ unflat_param_state: List[Dict[str, Any]] = []
283
+ flat_param_views: Dict[str, Iterator] = {}
284
+ num_unflat_params = flat_param._num_params
285
+ tensor_state, zero_dim_tensor_state, non_tensor_state = (
286
+ state.tensor_state,
287
+ state.zero_dim_tensor_state,
288
+ state.non_tensor_state,
289
+ )
290
+
291
+ for _ in range(num_unflat_params):
292
+ unflat_state_param = {}
293
+ # Add positive-dimension tensor state: unflatten with views
294
+ for state_name, flat_tensor in sorted_items(tensor_state):
295
+ views_generated = state_name in flat_param_views
296
+ if not views_generated:
297
+ views = handle._get_unflat_views(flat_tensor)
298
+ flat_param_views[state_name] = views
299
+ else:
300
+ views = flat_param_views[state_name]
301
+ optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views)
302
+ if shard_state:
303
+ osd_config = fsdp_state._optim_state_dict_config
304
+ if getattr(osd_config, "_use_dtensor", False):
305
+ assert fsdp_state._device_mesh is not None
306
+ optim_state = _ext_chunk_dtensor(
307
+ optim_state,
308
+ fsdp_state.rank,
309
+ fsdp_state._device_mesh,
310
+ fsdp_state._fsdp_extension,
311
+ )
312
+ else:
313
+ assert fsdp_state.process_group is not None
314
+ optim_state = _ext_chunk_tensor(
315
+ optim_state,
316
+ fsdp_state.rank,
317
+ fsdp_state.world_size,
318
+ fsdp_state._device_handle.device_count(),
319
+ fsdp_state.process_group,
320
+ fsdp_state._fsdp_extension,
321
+ )
322
+ unflat_state_param[state_name] = optim_state
323
+
324
+ # Add zero-dimension tensor state: take the target rank's value
325
+ for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state):
326
+ unflat_state_param[state_name] = zero_dim_tensor
327
+ # Add non-tensor state: take the target rank's value
328
+ for state_name, non_tensor in sorted_items(non_tensor_state):
329
+ unflat_state_param[state_name] = non_tensor
330
+ unflat_param_state.append(unflat_state_param)
331
+ return unflat_param_state
332
+
333
+
334
+ def _broadcast_processed_state(
335
+ fsdp_state: _FSDPState,
336
+ optim_state: Dict[str, Any],
337
+ group: Optional[dist.ProcessGroup],
338
+ ) -> Dict[str, Any]:
339
+ objects: List[Any] = [None]
340
+ if fsdp_state.rank == 0:
341
+ objects[0] = tree_map_only(
342
+ torch.Tensor,
343
+ lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr]
344
+ optim_state,
345
+ )
346
+ dist.broadcast_object_list(objects, src=0, group=group)
347
+ if fsdp_state.rank == 0:
348
+ return optim_state
349
+ else:
350
+ return objects[0]
351
+
352
+
353
+ def _broadcast_state(
354
+ fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup]
355
+ ) -> Any:
356
+ if fsdp_state.rank == 0:
357
+ if not isinstance(state, torch.Tensor) or state.dim() == 0:
358
+ return state
359
+ tensor = state.to(fsdp_state.compute_device)
360
+ else:
361
+ if isinstance(state, torch.Tensor):
362
+ assert state.dim() == 0, (
363
+ "For non-zero ranks, a tensor state should have zero dimension, "
364
+ "but got the state with shape {state.shape()}."
365
+ )
366
+ return state
367
+ elif not isinstance(state, _PosDimTensorInfo):
368
+ return state
369
+ tensor = torch.zeros(
370
+ state.shape, dtype=state.dtype, device=fsdp_state.compute_device
371
+ )
372
+ dist.broadcast(tensor, src=0, group=group)
373
+ return tensor
374
+
375
+
376
+ def _shard_orig_param_state(
377
+ fsdp_param_info: FSDPParamInfo,
378
+ fqn: str,
379
+ optim_state: Dict[str, Any],
380
+ ) -> Dict[str, Any]:
381
+ """
382
+ Shard the optimizer state for the original parameter with the name ``fqn``.
383
+ This API should only be used when ``use_orig_params`` is True.
384
+ """
385
+ if not optim_state:
386
+ return {}
387
+ fsdp_state = fsdp_param_info.state
388
+ flat_param = fsdp_param_info.handle.flat_param
389
+ param_idx = fsdp_param_info.param_indices[fqn]
390
+ shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined]
391
+ optim_state = _gather_state_dict(
392
+ optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device
393
+ )
394
+ if not shard_param_info.in_shard:
395
+ return {}
396
+ # Flatten and shard the state.
397
+ new_optim_state: Dict[str, Any] = {}
398
+ intra_param_start_idx = shard_param_info.intra_param_start_idx
399
+ intra_param_end_idx = shard_param_info.intra_param_end_idx
400
+ for state_name, value in optim_state.items():
401
+ if (
402
+ torch.is_tensor(value)
403
+ and value.dim() > 0
404
+ and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
405
+ ):
406
+ value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator]
407
+ new_optim_state[state_name] = value
408
+ return new_optim_state
409
+
410
+
411
+ def _flatten_optim_state_dict(
412
+ optim_state_dict: Dict[str, Any],
413
+ model: nn.Module,
414
+ use_orig_params: bool = False,
415
+ optim: Optional[torch.optim.Optimizer] = None,
416
+ rank0_only: bool = False,
417
+ group: Optional[dist.ProcessGroup] = None,
418
+ ) -> Dict[str, Any]:
419
+ """
420
+ Flattens the full optimizer state dict, still keying by unflattened parameter
421
+ names.
422
+
423
+ If ``use_orig_params`` is True, each rank will have all FSDP-managed
424
+ parameters but some of these parameters may be empty due to the sharding.
425
+ For a regular optim.Optimizer, states for those empty parameters will
426
+ not be initialized. So, when aggregating the FQNs across ranks, no assert
427
+ will be raised on a rank even if it does not have all the states -- it is
428
+ valid and FSDP know how to aggregate them. However, FSDP has to ignore
429
+ handling those parameters that are not managed by FSDP and do not exist on
430
+ the local rank -- it is managed by other parallelism and FSDP does not
431
+ know ho to handle/aggregate them.
432
+
433
+ Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to
434
+ flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require
435
+ all the states even if the corresponding parameters are empty. To this end,
436
+ ``optim`` will be used to to get the initial state of the empty parameters.
437
+ ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or
438
+ NamedOptimizer.
439
+
440
+ Returns:
441
+ Dict[str, Any]: The flattened optimizer state dict.
442
+ """
443
+ SimpleProfiler.reset()
444
+
445
+ unflat_osd = optim_state_dict
446
+ if "state" not in unflat_osd and not rank0_only:
447
+ raise ValueError(
448
+ '`optim_state_dict` must have the keys "state"'
449
+ "to be a valid optimizer state dict"
450
+ )
451
+ param_to_fqns = _get_param_to_fqns(model)
452
+ fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
453
+ fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state
454
+
455
+ # Broadcast unflat_osd without non-scalar tensor if rank0_only is True.
456
+ if rank0_only:
457
+ unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group)
458
+
459
+ # Construct the "state" part
460
+ flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {}
461
+ unflat_osd_state = unflat_osd["state"]
462
+ all_state_keys = set(unflat_osd_state.keys())
463
+
464
+ for param, fqns in param_to_fqns.items():
465
+ fqn = fqns[0]
466
+ if fqn not in unflat_osd_state:
467
+ continue
468
+ all_state_keys.difference_update(fqns)
469
+
470
+ if rank0_only:
471
+ for fqn in fqns:
472
+ if not unflat_osd_state[fqn]:
473
+ continue
474
+ for state_name in unflat_osd_state[fqn].keys():
475
+ unflat_osd_state[fqn][state_name] = _broadcast_state(
476
+ fsdp_state, unflat_osd_state[fqn][state_name], group=group
477
+ )
478
+ fqn = fqns[0]
479
+ if fqn in fqn_to_fsdp_param_info:
480
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
481
+ if use_orig_params:
482
+ with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
483
+ flat_state = _shard_orig_param_state(
484
+ fsdp_param_info,
485
+ fqn,
486
+ unflat_osd_state[fqn],
487
+ )
488
+ else:
489
+ flat_state = _flatten_optim_state(
490
+ fsdp_param_info,
491
+ unflat_osd_state,
492
+ fqns,
493
+ )
494
+ key = _OptimStateKey(tuple(fqns), True)
495
+ # Only include non-empty states since as expected by
496
+ # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer
497
+ # or NamedOptimizer.
498
+ if flat_state:
499
+ flat_osd_state[key] = flat_state
500
+ elif use_orig_params:
501
+ assert (
502
+ len(fqns) == 1
503
+ ), f"use_orig_params is True but there are multiple FQNs, {fqns}."
504
+ if optim is not None: # NamedOptimizer or KeyedOptimizer case.
505
+ state = optim.state.get(param, None) # type: ignore[call-overload]
506
+ if state is not None:
507
+ flat_osd_state[key] = copy.deepcopy(state)
508
+ else:
509
+ warnings.warn(
510
+ f"optim_state[{key}] is not on rank{fsdp_state.rank}."
511
+ )
512
+
513
+ else:
514
+ raise RuntimeError(
515
+ f"The state of {key} is empty. This should happen when "
516
+ "use_orig_params=True."
517
+ )
518
+ else: # do not flatten non-FSDP parameters' states
519
+ assert len(fqns) == 1
520
+ key = _OptimStateKey(tuple(fqns), False)
521
+ flat_osd_state[key] = copy.copy(unflat_osd_state[fqn])
522
+
523
+ if rank0_only:
524
+ for fqn in fqns:
525
+ if not unflat_osd_state[fqn]:
526
+ continue
527
+ for state_name, param_state in list(unflat_osd_state[fqn].items()):
528
+ if fsdp_state.rank > 0:
529
+ # Deference the tensor so that PyTorch can collect the memory.
530
+ del unflat_osd_state[fqn][state_name]
531
+ else:
532
+ # Move the tensor in the original osd back to CPU to make the
533
+ # original osd unaffected.
534
+ unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][
535
+ state_name
536
+ ].cpu()
537
+
538
+ # Handle user-defined state, states that are not associated with parameters.
539
+ for key in all_state_keys:
540
+ user_state = unflat_osd_state[key]
541
+ if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params:
542
+ user_state = _broadcast_state(fsdp_state, user_state, group=group)
543
+ flat_osd_state[key] = copy.copy(user_state)
544
+
545
+ SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ")
546
+ # Construct the "param_groups" part -- copy as is since it will be
547
+ # rekeyed later according to the target rank's optimizer
548
+ # Only copy param_groups if it exists in unflat_osd
549
+ if "param_groups" in unflat_osd:
550
+ flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"])
551
+ return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
552
+ else:
553
+ return {"state": flat_osd_state}
554
+
555
+
556
+ def _flatten_optim_state(
557
+ fsdp_param_info: FSDPParamInfo,
558
+ unflat_osd_state: Dict[str, Dict[str, Any]],
559
+ unflat_param_names: List[str],
560
+ ) -> Dict[str, Any]:
561
+ """
562
+ Flattens the optimizer state in ``full_optim_state_dict`` for a single
563
+ flat parameter in ``fsdp_param_info`` corresponding to the unflattened
564
+ parameter names in ``unflat_param_names``.
565
+
566
+ Args:
567
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
568
+ mapping from FQN to original parameter index.
569
+ unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
570
+ optimizer state dict corresponding to the unflattened parameters.
571
+ unflat_param_names (List[str]): A :class:`list` of unflattened
572
+ parameter names corresponding to the flat parameter ``flat_param``.
573
+
574
+ Returns:
575
+ Dict[str, Any]: A :class:`dict` mapping state names to their values for
576
+ a particular flat parameter. The sharded optimizer state dict's "state"
577
+ part will map a key to this returned value.
578
+ """
579
+ fsdp_state = fsdp_param_info.state
580
+ handle = fsdp_param_info.handle
581
+ flat_param = handle.flat_param
582
+ num_unflat_params = len(unflat_param_names)
583
+ assert num_unflat_params > 0, (
584
+ "Expects at least one unflattened parameter corresponding to the "
585
+ "flat parameter"
586
+ )
587
+ unflat_param_shapes = flat_param._shapes
588
+ num_unflat_param_shapes = len(unflat_param_shapes)
589
+ assert (
590
+ num_unflat_params == num_unflat_param_shapes
591
+ ), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
592
+
593
+ # Check if these unflattened parameters have any optimizer state
594
+ has_state = [
595
+ bool(unflat_param_name in unflat_osd_state)
596
+ for unflat_param_name in unflat_param_names
597
+ ]
598
+ # If none of the unflattened parameters comprising this flat parameter have
599
+ # any state, then we do not want an entry in the optimizer state dict
600
+ if not any(has_state):
601
+ return {} # no need to flatten any state
602
+ # There may still be some unflattened parameters with state and some
603
+ # without
604
+ unflat_param_states = [
605
+ _gather_state_dict(
606
+ unflat_osd_state[unflat_param_name],
607
+ pg=fsdp_state.process_group,
608
+ device=fsdp_state.compute_device,
609
+ )
610
+ if unflat_param_name in unflat_osd_state
611
+ else None
612
+ for unflat_param_name in unflat_param_names
613
+ ]
614
+ # Check that the unflattened parameters have the same state names
615
+ state_names = None
616
+ for unflat_param_state in unflat_param_states:
617
+ if unflat_param_state is None:
618
+ continue
619
+ if state_names is None:
620
+ state_names = set(unflat_param_state.keys())
621
+ else:
622
+ if state_names != set(unflat_param_state.keys()):
623
+ raise ValueError(
624
+ "Differing optimizer state names for the unflattened "
625
+ f"parameters: {unflat_param_names}"
626
+ )
627
+ assert state_names is not None
628
+
629
+ # Flatten the state
630
+ flat_state: Dict[str, Any] = {}
631
+ for state_name in state_names:
632
+ state_values = [
633
+ unflat_param_state[state_name] if unflat_param_state is not None else None
634
+ for unflat_param_state in unflat_param_states
635
+ ]
636
+ non_none_state_values = [v for v in state_values if v is not None]
637
+ # If all ranks have None, this is a None value
638
+ if not non_none_state_values:
639
+ flat_state[state_name] = None
640
+ continue
641
+ are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
642
+ for v in non_none_state_values:
643
+ are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
644
+ are_zero_dim_tensors &= _is_zero_dim_tensor(v)
645
+ are_non_tensors &= not torch.is_tensor(v)
646
+ types = {type(v) for v in non_none_state_values}
647
+ if len(types) != 1 or not (
648
+ are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
649
+ ):
650
+ raise ValueError(
651
+ f"Differing optimizer state types for state {state_name}, "
652
+ f"values {non_none_state_values}, and unflattened parameter "
653
+ f"names {unflat_param_names}"
654
+ )
655
+ if are_pos_dim_tensors:
656
+ flat_tensor = _flatten_tensor_optim_state(
657
+ state_name,
658
+ state_values,
659
+ unflat_param_names,
660
+ unflat_param_shapes,
661
+ handle,
662
+ )
663
+ # Shard the flattened tensor immediately to minimize max memory
664
+ # usage
665
+ if (
666
+ fsdp_state.world_size != 1
667
+ and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
668
+ ):
669
+ sharded_flat_tensor, _ = FlatParamHandle._get_shard(
670
+ flat_tensor,
671
+ fsdp_state.rank,
672
+ fsdp_state.world_size,
673
+ )
674
+ else:
675
+ sharded_flat_tensor = flat_tensor
676
+ flat_state[state_name] = sharded_flat_tensor
677
+ elif are_zero_dim_tensors:
678
+ flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
679
+ state_name,
680
+ state_values,
681
+ unflat_param_names,
682
+ )
683
+ else:
684
+ assert are_non_tensors
685
+ flat_state[state_name] = _flatten_non_tensor_optim_state(
686
+ state_name,
687
+ state_values,
688
+ unflat_param_names,
689
+ )
690
+
691
+ return flat_state
692
+
693
+
694
+ def _flatten_tensor_optim_state(
695
+ state_name: str,
696
+ pos_dim_tensors: List[torch.Tensor],
697
+ unflat_param_names: List[str],
698
+ unflat_param_shapes: Sequence[torch.Size],
699
+ handle: FlatParamHandle,
700
+ ) -> torch.Tensor:
701
+ """
702
+ Flattens the positive-dimension tensor optimizer state given by the values
703
+ ``tensors`` for the state ``state_name`` for a single flat parameter
704
+ from ``handle`` corresponding to the unflattened parameter names
705
+ ``unflat_param_names`` and unflatted parameter shapes
706
+ ``unflat_param_shapes``. This flattens each unflattened parameter's tensor
707
+ state into one tensor.
708
+
709
+ NOTE: We use zero tensors for any unflattened parameters without state
710
+ since some value is required to fill those entries. This assumes that the
711
+ zero tensor is mathematically equivalent to having no state, which is true
712
+ for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
713
+ optimizers.
714
+
715
+ Args:
716
+ state_name (str): Optimizer state name.
717
+ pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
718
+ optimizer state values for the unflattened parameters corresponding
719
+ to the single flat parameter.
720
+ unflat_param_names (List[str]): A :class:`list` of unflattened
721
+ parameter names corresponding to the single flat parameter.
722
+ unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
723
+ corresponding to the single flat parameter.
724
+ handle (FlatParamHandle): The flat parameter's handle.
725
+
726
+ Returns:
727
+ torch.Tensor: A flat tensor containing the optimizer state
728
+ corresponding to ``state_name`` constructed by concatenating the
729
+ unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
730
+ tensors for any unflattened parameters without the state).
731
+ """
732
+ flat_param = handle.flat_param
733
+ non_none_tensors = [t for t in pos_dim_tensors if t is not None]
734
+ # Check that all are tensors with the same dtype
735
+ dtypes = {t.dtype for t in non_none_tensors}
736
+ if len(dtypes) != 1:
737
+ raise ValueError(
738
+ "All unflattened parameters comprising a single flat "
739
+ "parameter must have positive-dimension tensor state with the "
740
+ f"same dtype but got dtypes {dtypes} for state {state_name} and "
741
+ f"unflattened parameter names {unflat_param_names}"
742
+ )
743
+ dtype = next(iter(dtypes))
744
+ # Check that each tensor state matches its parameter's shape
745
+ for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
746
+ if tensor is None and len(shape) == 0:
747
+ raise ValueError("Flattening a zero-dimension parameter is not supported")
748
+ elif tensor is not None and tensor.shape != shape:
749
+ raise ValueError(
750
+ "Tensor optimizer state does not have same shape as its "
751
+ f"parameter: {tensor.shape} {shape}"
752
+ )
753
+ # Flatten the tensor states: we do not need to add any right-hand-side
754
+ # padding since the flat optimizer state tensor is sharded via
755
+ # `_get_shard()`, which pads the shard as needed (just like for the flat
756
+ # parameter)
757
+ cpu_device = torch.device("cpu")
758
+ tensors_to_flatten = [
759
+ torch.flatten(state_value.to(cpu_device))
760
+ if state_value is not None
761
+ else torch.flatten(
762
+ torch.zeros(
763
+ size=shape,
764
+ dtype=dtype,
765
+ device=cpu_device,
766
+ )
767
+ )
768
+ for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes)
769
+ ]
770
+ flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel)
771
+ flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined]
772
+ assert flat_tensor.shape == flat_param_shape, (
773
+ f"tensor optim state: {flat_tensor.shape} "
774
+ f"flat parameter: {flat_param_shape}"
775
+ )
776
+ return flat_tensor
777
+
778
+
779
+ def _flatten_zero_dim_tensor_optim_state(
780
+ state_name: str,
781
+ zero_dim_tensors: List[torch.Tensor],
782
+ unflat_param_names: List[str],
783
+ ) -> torch.Tensor:
784
+ """
785
+ Flattens the zero-dimension tensor optimizer state given by the values
786
+ ``zero_dim_tensors`` for the state ``state_name`` for a single flat
787
+ parameter corresponding to the unflattened parameter names
788
+ ``unflat_param_names`` by enforcing that all tensors are the same and using
789
+ that common value.
790
+
791
+ NOTE: The requirement that the tensors are the same across all unflattened
792
+ parameters comprising the flat parameter is needed to maintain the
793
+ invariant that FSDP performs the same computation as its non-sharded
794
+ equivalent. This means that none of the unflattened parameters can be
795
+ missing this state since imposing a value may differ from having no value.
796
+ For example, for Adam's "step", no value means maximum bias correction,
797
+ while having some positive value means less bias correction.
798
+
799
+ Args:
800
+ state_name (str): Optimizer state name.
801
+ zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
802
+ for the unflattened parameters corresponding to the single
803
+ flat parameter.
804
+ unflat_param_names (List[str]): A :class:`list` of unflattened
805
+ parameter names corresponding to the single flat parameter.
806
+
807
+ Returns:
808
+ torch.Tensor: A zero-dimensional tensor giving the value of the state
809
+ ``state_name`` for all unflattened parameters corresponding to the
810
+ names ``unflat_param_names``.
811
+ """
812
+ non_none_tensors = [t for t in zero_dim_tensors if t is not None]
813
+ # Enforce that all have the same value and dtype
814
+ values_set = {t.item() if t is not None else None for t in zero_dim_tensors}
815
+ dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors}
816
+ if (
817
+ len(non_none_tensors) != len(zero_dim_tensors)
818
+ or len(values_set) != 1
819
+ or len(dtypes) != 1
820
+ ):
821
+ raise ValueError(
822
+ "All unflattened parameters comprising a single flat "
823
+ "parameter must have scalar state with the same value and dtype "
824
+ f"but got values {values_set} and dtypes {dtypes} for state "
825
+ f"{state_name} and unflattened parameter names "
826
+ f"{unflat_param_names}"
827
+ )
828
+ value = next(iter(values_set))
829
+ dtype = next(iter(dtypes))
830
+ return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
831
+
832
+
833
+ def _flatten_non_tensor_optim_state(
834
+ state_name: str,
835
+ non_tensors: List[Any],
836
+ unflat_param_names: List[str],
837
+ ) -> Any:
838
+ """
839
+ Flattens the non-tensor optimizer state given by the values ``non_tensors``
840
+ for the state ``state_name`` for a single flat parameter corresponding
841
+ to the unflattened parameter names ``unflat_param_names`` by enforcing that
842
+ all values are the same and using that common value.
843
+
844
+ See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
845
+
846
+ Args:
847
+ state_name (str): Optimizer state name.
848
+ non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
849
+ parameters corresponding to the single flat parameter.
850
+ unflat_param_names (List[str]): A :class:`list` of unflattened
851
+ parameter names corresponding to the single flat parameter.
852
+
853
+ Returns:
854
+ Any: A non-tensor giving the value of the state ``state_name`` for all
855
+ unflattened parameters corresponding to the names
856
+ ``unflat_param_names``.
857
+ """
858
+ non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
859
+ # Enforce that all have the same value (same type already checked)
860
+ non_tensor_set = set(non_tensors)
861
+ if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1:
862
+ raise ValueError(
863
+ "All unflattened parameters comprising a single flat "
864
+ "parameter must have scalar state with the same value and dtype "
865
+ f"but got values {non_tensor_set} for state {state_name} and "
866
+ f"unflattened parameter names {unflat_param_names}"
867
+ )
868
+ non_tensor = next(iter(non_tensor_set))
869
+ return non_tensor
870
+
871
+
872
+ def _rekey_sharded_optim_state_dict(
873
+ sharded_osd: Dict[str, Any],
874
+ model: nn.Module,
875
+ optim: torch.optim.Optimizer,
876
+ optim_input: Optional[
877
+ Union[
878
+ List[Dict[str, Any]],
879
+ Iterable[nn.Parameter],
880
+ ]
881
+ ],
882
+ using_optim_input: bool,
883
+ is_named_optimizer: bool = False,
884
+ ) -> Dict[str, Any]:
885
+ """
886
+ Rekeys the optimizer state dict from unflattened parameter names to flat
887
+ parameter IDs according to the calling rank's ``optim``, which may be
888
+ different across ranks. In particular, the unflattened parameter names are
889
+ represented as :class:`_OptimStateKey` s.
890
+ """
891
+ param_to_fqns = _get_param_to_fqns(model)
892
+ flat_param_to_fqn = _get_flat_param_to_fqn(model)
893
+ param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast(
894
+ Dict[nn.Parameter, Union[int, str]],
895
+ (
896
+ _get_param_to_param_id_from_optim_input(model, optim_input)
897
+ if using_optim_input
898
+ else _get_param_to_param_key(
899
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
900
+ )
901
+ ),
902
+ )
903
+ # All parameter keys in `param_to_param_key` should be in
904
+ # `param_to_fqns` -- strict inequality follows when not all parameters are
905
+ # passed to the optimizer
906
+ assert len(param_to_param_key) <= len(param_to_fqns)
907
+
908
+ unflat_param_names_to_flat_param_key: Dict[
909
+ Tuple[str, ...], Union[int, str]
910
+ ] = {} # for "state"
911
+ unflat_param_name_to_flat_param_key: Dict[
912
+ str, Union[int, str]
913
+ ] = {} # for "param_groups"
914
+ for param, unflat_param_names in param_to_fqns.items():
915
+ if param not in param_to_param_key:
916
+ # This parameter was not passed to the optimizer
917
+ continue
918
+ flat_param_key = param_to_param_key[param]
919
+ unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key
920
+ for unflat_param_name in unflat_param_names:
921
+ unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key
922
+
923
+ sharded_osd_state = sharded_osd["state"]
924
+ rekeyed_osd_state: Dict[Union[str, int], Any] = {}
925
+ for key, param_state in sharded_osd_state.items():
926
+ if isinstance(key, str):
927
+ rekeyed_osd_state[key] = param_state
928
+ continue
929
+ flat_param_key = unflat_param_names_to_flat_param_key.get(
930
+ key.unflat_param_names, key.unflat_param_names
931
+ )
932
+ rekeyed_osd_state[flat_param_key] = param_state
933
+
934
+ # Only process param_groups if it exists in sharded_osd
935
+ if "param_groups" in sharded_osd:
936
+ rekeyed_osd_param_groups: List[Dict[str, Any]] = []
937
+ for unflat_param_group in sharded_osd["param_groups"]:
938
+ flat_param_group = copy.deepcopy(unflat_param_group)
939
+ flat_param_keys = sorted(
940
+ {
941
+ unflat_param_name_to_flat_param_key[unflat_param_name]
942
+ for unflat_param_name in unflat_param_group["params"]
943
+ }
944
+ )
945
+ flat_param_group["params"] = flat_param_keys
946
+ rekeyed_osd_param_groups.append(flat_param_group)
947
+ return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
948
+ else:
949
+ return {"state": rekeyed_osd_state}
950
+
951
+
952
+ def _get_param_id_to_param_from_optim_input(
953
+ model: nn.Module,
954
+ optim_input: Optional[
955
+ Union[
956
+ List[Dict[str, Any]],
957
+ Iterable[nn.Parameter],
958
+ ]
959
+ ] = None,
960
+ ) -> Dict[int, nn.Parameter]:
961
+ """
962
+ Constructs a mapping from parameter IDs to parameters. This may be used
963
+ both for models with ``FlatParameter`` s and without.
964
+
965
+ NOTE: This method is only preserved for backward compatibility. The method
966
+ :meth:`_get_param_key_to_param` is the preferred code path that does not
967
+ rely on ``optim_input``.
968
+
969
+ NOTE: We critically assume that, whether the optimizer input is a list of
970
+ parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
971
+ enumerates the parameter IDs in order. In other words, for a parameter list
972
+ input, the parameter IDs should be in that list order, and for a parameter
973
+ groups input, the parameter IDs should be in order within each parameter
974
+ group and in order across parameter groups.
975
+
976
+ Args:
977
+ model (nn.Module): Model whose parameters are passed into the
978
+ optimizer.
979
+ optim_input (Optional[Union[List[Dict[str, Any]],
980
+ Iterable[nn.Parameter]]]): Input passed into the optimizer
981
+ representing either a :class:`list` of parameter groups or an
982
+ iterable of parameters; if ``None``, then this method assumes the
983
+ input was ``model.parameters()``. (Default: ``None``)
984
+
985
+ Returns:
986
+ List[nn.Parameter]: Mapping from parameter IDs to parameters,
987
+ where the parameter ID is implicitly the index in the :class:`list`.
988
+ """
989
+ # Assume the standard case of passing `model.parameters()` to the optimizer
990
+ # if `optim_input` is not specified
991
+ if optim_input is None:
992
+ return dict(enumerate(model.parameters()))
993
+ try:
994
+ params = cast(List[nn.Parameter], list(optim_input))
995
+ except TypeError as e:
996
+ raise TypeError(
997
+ "Optimizer input should be an iterable of Tensors or dicts, "
998
+ f"but got {optim_input}"
999
+ ) from e
1000
+ if len(params) == 0:
1001
+ raise ValueError("Optimizer input should not be empty")
1002
+
1003
+ # Check if the optimizer input represents tensors or parameter groups
1004
+ all_tensors = True
1005
+ all_dicts = True
1006
+ for param in params:
1007
+ all_tensors &= isinstance(param, torch.Tensor)
1008
+ all_dicts &= isinstance(param, dict)
1009
+ if not all_tensors and not all_dicts:
1010
+ raise TypeError("Optimizer input should be an iterable of Tensors or dicts")
1011
+ if all_tensors:
1012
+ return dict(enumerate(params))
1013
+ assert all_dicts
1014
+ param_id_to_param: List[nn.Parameter] = []
1015
+ for param_group in params:
1016
+ has_params_key = "params" in param_group # type: ignore[operator]
1017
+ assert has_params_key, (
1018
+ 'A parameter group should map "params" to a list of the '
1019
+ "parameters in the group"
1020
+ )
1021
+ # Implicitly map `flat_param_id` (current length of the list) to
1022
+ # `param`
1023
+ param_id_to_param.extend(param_group["params"]) # type: ignore[index]
1024
+ return dict(enumerate(param_id_to_param))
1025
+
1026
+
1027
+ def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]:
1028
+ """
1029
+ Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes
1030
+ from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical"
1031
+ because ``FlatParameter`` s do not come from the original module but are
1032
+ registered only after FSDP has been applied. This function returns the FSDP-given
1033
+ name for the ``FlatParameter`` (usually module._flat_param) as opposed to the
1034
+ canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``).
1035
+
1036
+ Consequently, this function will only return a non-empty mapping if FSDP was
1037
+ applied with ``use_orig_params=False`` as, otherwise, the original parameters
1038
+ are used within the module and there would be no ``FlatParameter`` s in the module.
1039
+
1040
+ """
1041
+
1042
+ def module_fn(module, prefix, tree_level, flat_param_to_fqn):
1043
+ for param_name, param in _named_parameters_with_duplicates(
1044
+ module, recurse=False
1045
+ ):
1046
+ if not isinstance(param, FlatParameter):
1047
+ continue
1048
+ fqn = clean_tensor_name(prefix + param_name)
1049
+ flat_param_to_fqn[param] = fqn
1050
+
1051
+ def return_fn(flat_param_to_fqn):
1052
+ return flat_param_to_fqn
1053
+
1054
+ flat_param_to_fqn_ret: Dict[FlatParameter, str] = {}
1055
+ return _apply_to_modules(
1056
+ model,
1057
+ module_fn,
1058
+ return_fn,
1059
+ [fqn for fqn, _ in _named_parameters_with_duplicates(model)],
1060
+ flat_param_to_fqn_ret,
1061
+ )
1062
+
1063
+
1064
+ def _get_param_key_to_param(
1065
+ optim: torch.optim.Optimizer,
1066
+ model: Optional[nn.Module] = None,
1067
+ is_named_optimizer: bool = False,
1068
+ param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
1069
+ flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
1070
+ ) -> Dict[Union[int, str], nn.Parameter]:
1071
+ """
1072
+ Constructs a mapping from parameter keys to parameters. For the regular
1073
+ optimizers, the keys are parameter IDs. For NamedOptimizer, the keys
1074
+ are FQNs. This API may be used both for models with ``FlatParameter`` s and
1075
+ without.
1076
+ """
1077
+ clean_fqn_to_curr_fqn: Dict[str, str] = {}
1078
+ if is_named_optimizer:
1079
+ assert (
1080
+ param_to_fqns is not None and flat_param_to_fqn is not None
1081
+ ), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None."
1082
+ assert model is not None
1083
+ for key, _ in _named_parameters_with_duplicates(model):
1084
+ clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key
1085
+
1086
+ param_key_to_param: Dict[Union[str, int], nn.Parameter] = {}
1087
+ pid = 0
1088
+ for param_group in optim.param_groups:
1089
+ if is_named_optimizer:
1090
+ for param in param_group["params"]:
1091
+ assert flat_param_to_fqn is not None
1092
+ if param in flat_param_to_fqn:
1093
+ # FlatParameter case
1094
+ key = flat_param_to_fqn[param]
1095
+ else:
1096
+ assert param_to_fqns is not None
1097
+ # use_orig_params case
1098
+ assert len(param_to_fqns[param]) == 1
1099
+ key = param_to_fqns[param][0]
1100
+ try:
1101
+ key = clean_fqn_to_curr_fqn[key]
1102
+ except KeyError as e:
1103
+ raise KeyError(
1104
+ f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}."
1105
+ ) from e
1106
+ param_key_to_param[key] = param
1107
+ else:
1108
+ for param in param_group["params"]:
1109
+ param_key_to_param[pid] = param
1110
+ pid += 1
1111
+
1112
+ return param_key_to_param
1113
+
1114
+
1115
+ def _get_param_to_param_key(
1116
+ optim: torch.optim.Optimizer,
1117
+ model: Optional[nn.Module] = None,
1118
+ is_named_optimizer: bool = False,
1119
+ param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
1120
+ flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
1121
+ ) -> Dict[nn.Parameter, Union[int, str]]:
1122
+ """
1123
+ Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API
1124
+ only supports the case where `optim` is a regular optimizer, not NamedOptimizer.
1125
+ So the parameter keys will be parameter ids.
1126
+ """
1127
+ param_id_to_param = _get_param_key_to_param(
1128
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
1129
+ )
1130
+ return {param: param_id for param_id, param in param_id_to_param.items()}
1131
+
1132
+
1133
+ def _get_param_to_param_id_from_optim_input(
1134
+ model: nn.Module,
1135
+ optim_input: Optional[
1136
+ Union[
1137
+ List[Dict[str, Any]],
1138
+ Iterable[nn.Parameter],
1139
+ ]
1140
+ ] = None,
1141
+ ) -> Dict[nn.Parameter, int]:
1142
+ """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`."""
1143
+ param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input)
1144
+ return {param: param_id for param_id, param in param_id_to_param.items()}
1145
+
1146
+
1147
+ def _check_missing_keys_on_rank(
1148
+ r0_optim_state_keys: List[_OptimStateKey],
1149
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]],
1150
+ param_key_to_param: Dict[Union[str, int], nn.Parameter],
1151
+ group: Optional[dist.ProcessGroup],
1152
+ ) -> None:
1153
+ # Ensure that all ranks have at least the optimizer states needed by
1154
+ # rank 0's optimizer
1155
+ missing_keys: List[_OptimStateKey] = []
1156
+ for r0_optim_state_key in r0_optim_state_keys:
1157
+ if r0_optim_state_key not in optim_state_key_to_param_key:
1158
+ # A parameter from rank 0's optimizer does not exist for this
1159
+ # rank's optimizer
1160
+ missing_keys.append(r0_optim_state_key)
1161
+ continue
1162
+ param_key = optim_state_key_to_param_key[r0_optim_state_key]
1163
+ if isinstance(param_key, int):
1164
+ assert param_key >= 0 and param_key < len(
1165
+ param_key_to_param
1166
+ ), "Check the `param_key_to_param` construction"
1167
+ # We cannot use FSDPState.compute_device as this API is a global view.
1168
+ device = _get_pg_default_device(group)
1169
+ num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device)
1170
+ dist.all_reduce(num_missing, group=group)
1171
+ if num_missing.item() > 0:
1172
+ obj_list = [None for _ in range(dist.get_world_size(group))]
1173
+ dist.all_gather_object(obj_list, missing_keys, group=group)
1174
+ error_msg = (
1175
+ "FSDP currently requires each rank to have at least the "
1176
+ "optimizer states needed by rank 0's optimizer but some ranks "
1177
+ "are missing some of those states"
1178
+ )
1179
+ for rank, keys in enumerate(obj_list):
1180
+ keys = cast(List[_OptimStateKey], keys)
1181
+ if len(keys) > 0:
1182
+ error_msg += (
1183
+ f"\nRank {rank} is missing states for the parameters: "
1184
+ f"{[key.unflat_param_names for key in keys]}"
1185
+ )
1186
+ raise RuntimeError(error_msg)
1187
+
1188
+
1189
+ def _map_param_key_to_optim_keys(
1190
+ optim_state_dict: Dict[str, Any],
1191
+ group: Optional[dist.ProcessGroup],
1192
+ param_key_to_param: Dict[Union[int, str], nn.Parameter],
1193
+ param_to_fqns: Dict[nn.Parameter, List[str]],
1194
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1195
+ merge_keys: bool = False,
1196
+ ) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]:
1197
+ """
1198
+ Construct the local mapping between the ``_OptimStateKey`` and parameter keys
1199
+ and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0
1200
+ must contain all the ``_OptimStateKey``, an exception will be raised otherwise.
1201
+ Note that ``merge_keys`` should equal to ``use_orig_params``.
1202
+ """
1203
+ rank = dist.get_rank(group)
1204
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local
1205
+ all_optim_state_keys: List[_OptimStateKey] = []
1206
+
1207
+ for param_key, param in param_key_to_param.items():
1208
+ # Do not include parameters without state to avoid empty mappings
1209
+ # just like in normal `torch.optim.Optimizer.state_dict()`
1210
+ if param_key not in optim_state_dict["state"]:
1211
+ continue
1212
+ fqns = param_to_fqns[param]
1213
+ is_fsdp_managed = isinstance(param, FlatParameter)
1214
+ if is_fsdp_managed:
1215
+ assert fqns[0] in fqn_to_fsdp_param_info, (
1216
+ fqns[0],
1217
+ list(fqn_to_fsdp_param_info.keys()),
1218
+ )
1219
+ is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info
1220
+ optim_state_key = _OptimStateKey(
1221
+ unflat_param_names=tuple(fqns),
1222
+ is_fsdp_managed=is_fsdp_managed,
1223
+ )
1224
+ if rank == 0 or merge_keys:
1225
+ all_optim_state_keys.append(optim_state_key)
1226
+ optim_state_key_to_param_key[optim_state_key] = param_key
1227
+
1228
+ if merge_keys:
1229
+ all_keys: List[List[_OptimStateKey]] = [
1230
+ [] for _ in range(dist.get_world_size(group))
1231
+ ]
1232
+ dist.all_gather_object(all_keys, all_optim_state_keys, group=group)
1233
+ merge_all_optim_state_keys = [
1234
+ key for local_keys in all_keys for key in local_keys
1235
+ ]
1236
+ all_optim_state_keys = sorted(set(merge_all_optim_state_keys))
1237
+ else:
1238
+ key_obj_list: List[Optional[List[_OptimStateKey]]] = (
1239
+ [all_optim_state_keys] if rank == 0 else [None]
1240
+ )
1241
+ dist.broadcast_object_list(key_obj_list, src=0, group=group)
1242
+ assert key_obj_list[0] is not None
1243
+ all_optim_state_keys = key_obj_list[0]
1244
+ _check_missing_keys_on_rank(
1245
+ all_optim_state_keys,
1246
+ optim_state_key_to_param_key,
1247
+ param_key_to_param,
1248
+ group,
1249
+ )
1250
+
1251
+ return all_optim_state_keys, optim_state_key_to_param_key
1252
+
1253
+
1254
+ def _unflatten_param_groups(
1255
+ state_dict: Dict[str, Any],
1256
+ param_key_to_param: Dict[Union[int, str], nn.Parameter],
1257
+ param_to_fqns: Dict[nn.Parameter, List[str]],
1258
+ ) -> List[Dict[str, Any]]:
1259
+ param_groups: List[Dict[str, Any]] = []
1260
+ for flat_param_group in state_dict["param_groups"]:
1261
+ unflat_param_group = copy.deepcopy(flat_param_group)
1262
+ param_group_params = [
1263
+ param_key_to_param[flat_param_key]
1264
+ for flat_param_key in flat_param_group["params"]
1265
+ ]
1266
+ nested_unflat_param_names = [
1267
+ param_to_fqns[param] for param in param_group_params
1268
+ ]
1269
+ unflat_param_group["params"] = [
1270
+ unflat_param_name
1271
+ for unflat_param_names in nested_unflat_param_names
1272
+ for unflat_param_name in unflat_param_names
1273
+ ] # flatten the list of lists
1274
+ param_groups.append(unflat_param_group)
1275
+ return param_groups
1276
+
1277
+
1278
+ def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool:
1279
+ """
1280
+ Returns whether the state_dict is from a NamedOptimizer.
1281
+ This function checks that the keys in the state_dict['state'] are strings
1282
+ (which usually are FQNs) versus integers (which usually refer to param_ids
1283
+ from a vanilla torch.optim.Optimizer).
1284
+ """
1285
+ state = optim_state_dict.get("state", None)
1286
+ if not state:
1287
+ # If we cannot find a state, assume it is not NamedOptimizer as
1288
+ # NamedOptimizer has eager initialization.
1289
+ return False
1290
+ try:
1291
+ key = next(iter(state.keys()))
1292
+ except Exception as e:
1293
+ raise Exception(optim_state_dict) from e
1294
+ return isinstance(key, str)
1295
+
1296
+
1297
+ @dataclass
1298
+ class StateInfo:
1299
+ # The key of these dictionaries are the state name, e.g., `exp_avg`.
1300
+ tensors: Dict[str, _PosDimTensorInfo]
1301
+ scalar_tensors: Dict[str, torch.Tensor]
1302
+ non_tensors: Dict[str, Any]
1303
+
1304
+
1305
+ def _allgather_state_info(
1306
+ fsdp_state: _FSDPState,
1307
+ input_states: Dict[str, Any],
1308
+ ) -> List[Dict[str, StateInfo]]:
1309
+ """
1310
+ Given the ``input_states``, allgather StateInfo for each state. The function
1311
+ uses all_gather_object to gather StateInfo so no GPU tensors are sent.
1312
+ """
1313
+
1314
+ processed_state_dict: Dict[str, StateInfo] = {}
1315
+ gathered_state_info: List[Dict[str, StateInfo]] = [
1316
+ {} for _ in range(fsdp_state.world_size)
1317
+ ]
1318
+
1319
+ for fqn, optim_state in input_states.items():
1320
+ # Allgather the scalar tensor state, non-tensor states and tensors metadata.
1321
+ processed_state = StateInfo({}, {}, {})
1322
+ for state_name, value in sorted_items(optim_state):
1323
+ if torch.is_tensor(value):
1324
+ if value.dim() == 0:
1325
+ # Ensure that `step` is on CPU.
1326
+ processed_state.scalar_tensors[state_name] = value.cpu()
1327
+ else:
1328
+ processed_state.tensors[state_name] = _PosDimTensorInfo(
1329
+ value.shape, value.dtype
1330
+ )
1331
+ else:
1332
+ processed_state.non_tensors[state_name] = value
1333
+ processed_state_dict[fqn] = processed_state
1334
+ dist.all_gather_object(
1335
+ gathered_state_info,
1336
+ processed_state_dict,
1337
+ group=fsdp_state.process_group,
1338
+ )
1339
+ return gathered_state_info
1340
+
1341
+
1342
+ def _convert_all_state_info(
1343
+ fsdp_param_info: FSDPParamInfo,
1344
+ gathered_state_info: List[Dict[str, StateInfo]],
1345
+ input_states: Dict[str, Any],
1346
+ output_states: Dict[str, Dict[str, Any]],
1347
+ ) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]:
1348
+ """
1349
+ Given the ``gathered_state_info`` and ``input_states``, the API converted
1350
+ the StateInfo into the original state if the state is not a non-scalar
1351
+ tensor. For a multi-dimensional tensor, the local state will be stored in
1352
+ ``state_buffer`` in a correct order for later allgather purpose.
1353
+ """
1354
+
1355
+ state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {}
1356
+
1357
+ for fqn, gathered_state in output_states.items():
1358
+ state_info = [s[fqn] for s in gathered_state_info]
1359
+ all_tensor_states = sorted(
1360
+ {n for state in state_info for n in state.tensors.keys()}
1361
+ )
1362
+ empty_ranks: Set[int] = set()
1363
+ dtype: Optional[torch.dtype] = None
1364
+ # First check all the non-scalar states and get the information of
1365
+ # states on each rank.
1366
+ for state_name in all_tensor_states:
1367
+ numels = []
1368
+ _empty_ranks: Set[int] = set()
1369
+ for rank, object_state in enumerate(state_info):
1370
+ numels.append(0)
1371
+ info = object_state.tensors.get(state_name, None)
1372
+ if info is not None:
1373
+ numels[-1] = info.shape.numel()
1374
+ if not dtype:
1375
+ dtype = info.dtype
1376
+ else:
1377
+ assert dtype == info.dtype
1378
+ if numels[-1] == 0:
1379
+ _empty_ranks.add(rank)
1380
+
1381
+ assert not empty_ranks or empty_ranks == _empty_ranks
1382
+ empty_ranks = _empty_ranks
1383
+ if state_name not in state_buffers:
1384
+ state_buffers[state_name] = [
1385
+ None for _ in fsdp_param_info.param_indices
1386
+ ]
1387
+ local_state = input_states[fqn].get(state_name, None)
1388
+ # N.B. We need to move the state to compute_device. The reason is
1389
+ # not yet clear and we need to figure out why the state may be on a
1390
+ # different device.
1391
+ if local_state is not None:
1392
+ local_state = local_state.to(fsdp_param_info.state.compute_device)
1393
+ state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state
1394
+
1395
+ # Restoring the scalar and non-tensor states. If the corresponding
1396
+ # non-scalar states do not exist on the rank, we also skip the scalar
1397
+ # non-tensor states on that rank.
1398
+ for rank, object_state in enumerate(state_info):
1399
+ if rank in empty_ranks:
1400
+ continue
1401
+ for name, non_tensor_value in object_state.non_tensors.items():
1402
+ curr_non_tensor_value = gathered_state.get(name, None)
1403
+ assert (
1404
+ curr_non_tensor_value is None
1405
+ or curr_non_tensor_value == non_tensor_value
1406
+ ), (
1407
+ f"Rank {rank} has different values for {name}: {non_tensor_value}."
1408
+ + f" Other ranks: {curr_non_tensor_value}"
1409
+ )
1410
+ gathered_state[name] = non_tensor_value
1411
+
1412
+ for name, scalar_tensor_value in object_state.scalar_tensors.items():
1413
+ curr_scalar_tensor_value = gathered_state.get(name, None)
1414
+ assert curr_scalar_tensor_value is None or torch.equal(
1415
+ scalar_tensor_value, curr_scalar_tensor_value
1416
+ ), (
1417
+ f"Rank {rank} has different values for {name}: {scalar_tensor_value}."
1418
+ + f" Other ranks: {curr_scalar_tensor_value}"
1419
+ )
1420
+ gathered_state[name] = scalar_tensor_value
1421
+
1422
+ return dtype, state_buffers # type: ignore[possibly-undefined]
1423
+
1424
+
1425
+ def _unflatten_orig_param_states(
1426
+ fsdp_param_info: FSDPParamInfo,
1427
+ output_states: Dict[str, Dict[str, Any]],
1428
+ state_name: str,
1429
+ shard_state: bool,
1430
+ to_save: bool,
1431
+ cpu_offload: bool,
1432
+ ) -> None:
1433
+ """
1434
+ Given a output state dict, ``output_states``, which the keys are FQNs to the
1435
+ original parameters (not FlatParameters nor parmeter ID), and the values
1436
+ are gathered states, unflatten the states to the original dimensions.
1437
+
1438
+ This function performs the unflattening process in-place.
1439
+ """
1440
+ if not to_save:
1441
+ return
1442
+ flat_param = fsdp_param_info.handle.flat_param
1443
+ fsdp_state = fsdp_param_info.state
1444
+ for fqn, gathered_state in output_states.items():
1445
+ value = gathered_state[state_name]
1446
+ param_idx = fsdp_param_info.param_indices[fqn]
1447
+
1448
+ # TODO: This solution is not general and only apply to PTD TP solution.
1449
+ if isinstance(value, DTensor):
1450
+ placement = value.placements[0]
1451
+ # If gathered state is a DTensor and its TP placement is not Replicate(), we need to
1452
+ # gather the tensor on its TP dimension before chunking them into DTensor again.
1453
+ if placement != Replicate():
1454
+ placement_dim = placement.dim # type: ignore[attr-defined]
1455
+ value_local = value.redistribute(placements=(Replicate(),))
1456
+ reshape_size = list(flat_param._shapes[param_idx])
1457
+ reshape_size[placement_dim] *= value.device_mesh.size(0)
1458
+ reshape_size = torch.Size(reshape_size)
1459
+ value = value.reshape(reshape_size)
1460
+ # If gathered state is a replicate DTensor, we directly reshape it.
1461
+ else:
1462
+ value = value.reshape(flat_param._shapes[param_idx])
1463
+ else:
1464
+ # If gathered state is a tensor, we directly reshape it into unflatten state.
1465
+ value = value.reshape(flat_param._shapes[param_idx])
1466
+
1467
+ if shard_state:
1468
+ osd_config = fsdp_state._optim_state_dict_config
1469
+ if getattr(osd_config, "_use_dtensor", False):
1470
+ assert fsdp_state._device_mesh is not None
1471
+ value = _ext_chunk_dtensor(
1472
+ value,
1473
+ fsdp_state.rank,
1474
+ fsdp_state._device_mesh,
1475
+ fsdp_state._fsdp_extension,
1476
+ )
1477
+ else:
1478
+ assert fsdp_state.process_group is not None
1479
+ value = _ext_chunk_tensor(
1480
+ value,
1481
+ fsdp_state.rank,
1482
+ fsdp_state.world_size,
1483
+ fsdp_state._device_handle.device_count(),
1484
+ fsdp_state.process_group,
1485
+ fsdp_state._fsdp_extension,
1486
+ )
1487
+ elif not cpu_offload:
1488
+ with SimpleProfiler.profile("clone"):
1489
+ value = value.detach().clone()
1490
+
1491
+ if cpu_offload:
1492
+ with SimpleProfiler.profile(SimpleProfiler.Type.D2H):
1493
+ value = value.cpu()
1494
+ gathered_state[state_name] = value
1495
+
1496
+
1497
+ def _allgather_orig_param_states(
1498
+ fsdp_param_info: FSDPParamInfo,
1499
+ gathered_state_info: List[Dict[str, StateInfo]],
1500
+ input_states: Dict[str, Any],
1501
+ shard_state: bool,
1502
+ to_save: bool,
1503
+ cpu_offload: bool,
1504
+ ) -> Dict[str, Dict[str, Any]]:
1505
+ """
1506
+ Given the ``gathered_state_info`` and ``input_states``, the API allgathers
1507
+ all tensor states and restore non-tensor states from ``gathered_state_info``.
1508
+ """
1509
+ fsdp_state = fsdp_param_info.state
1510
+ if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL:
1511
+ logger.warning(
1512
+ "CUDA Memory Summary before calling to _allgather_orig_param_states %s",
1513
+ torch.cuda.memory_summary(),
1514
+ )
1515
+
1516
+ output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()}
1517
+
1518
+ dtype, state_buffers = _convert_all_state_info(
1519
+ fsdp_param_info, gathered_state_info, input_states, output_states
1520
+ )
1521
+
1522
+ if len(state_buffers) == 0:
1523
+ return output_states
1524
+
1525
+ has_state_params: List[bool] = [
1526
+ True if fqn in output_states else False
1527
+ for fqn, idx in fsdp_param_info.param_indices.items()
1528
+ ]
1529
+
1530
+ # Loop through the ``state_buffers`` and construct the flattened, concatenated,
1531
+ # sharded states. The size of the constructed state will be the same size as
1532
+ # flat_param (also sharded).
1533
+ # Then we perform an allgather_into_tensor to get the full flat_param state.
1534
+ # The full flat_param state is the result of concatenation of multiple states
1535
+ # the order of of flat_param._fqns.
1536
+ # The final step is to split the flat_param state into original param states
1537
+ # and return the result.
1538
+ flat_param = fsdp_param_info.handle.flat_param
1539
+ empty_func = functools.partial(
1540
+ torch.empty, dtype=dtype, device=fsdp_state.compute_device
1541
+ )
1542
+ gathered_tensor = empty_func(flat_param._padded_unsharded_size)
1543
+ # Synchronize can be slow but this will be easier for us to debug.
1544
+ torch.cuda.synchronize()
1545
+ for state_name, buffers in state_buffers.items():
1546
+ local_buffers: List[torch.Tensor] = []
1547
+ begin = fsdp_state.rank * flat_param._sharded_size.numel()
1548
+ # End is inclusive.
1549
+ end = begin + flat_param._sharded_size.numel() - 1
1550
+ # param_idx corresponds to the parameter index in the FlatParameter.
1551
+ mem_offset, param_idx = 0, 0
1552
+ for numel, is_padding in zip(
1553
+ flat_param._numels_with_padding, flat_param._is_padding_mask
1554
+ ):
1555
+ frozen_and_no_state = not is_padding and (
1556
+ not fsdp_param_info.param_requires_grad[param_idx]
1557
+ and not has_state_params[param_idx]
1558
+ )
1559
+
1560
+ if is_padding or frozen_and_no_state:
1561
+ # This memory range is a padding or the param is frozen and does
1562
+ # not require gradient. For the later case, we treat it as a
1563
+ # padding and add empty values to the local_buffers.
1564
+
1565
+ padding_begin, padding_end = mem_offset, mem_offset + numel - 1
1566
+ if padding_begin <= begin <= padding_end:
1567
+ # The range is an align padding before the first parameter in
1568
+ # the shard. The shard includes parts of this align padding.
1569
+ padding_len = (
1570
+ padding_end - begin + 1
1571
+ if end >= padding_end
1572
+ else end - begin + 1
1573
+ )
1574
+ elif padding_begin <= end <= padding_end:
1575
+ # The range is an align padding after the last parameter in
1576
+ # the shard. The shard includes parts of this align padding.
1577
+ padding_len = (
1578
+ end - padding_begin + 1
1579
+ if begin <= padding_begin
1580
+ else end - begin + 1
1581
+ )
1582
+ elif begin < padding_begin <= padding_end < end:
1583
+ # The range is an align padding that is completely in the
1584
+ # shard.
1585
+ padding_len = numel
1586
+ else:
1587
+ padding_len = 0
1588
+ if padding_len:
1589
+ local_buffers.append(empty_func(padding_len))
1590
+
1591
+ if not is_padding:
1592
+ # This memory range is a parameter in FlatParameter. So there
1593
+ # should be an corresponding state in the optimizer unless the
1594
+ # parameter is frozen, which we treat it as a padding above.
1595
+
1596
+ # We need to check if this rank owns the buffer. If this is None:
1597
+ # 1.) the rank does not own any part of the original parameter.
1598
+ # As a result, there is no corresponding optimizer state on
1599
+ # the rank as well.
1600
+ # 2.) the parameter is frozen AND no optimizer state for the
1601
+ # parameter. If a parameter is frozen, there can still be
1602
+ # optimizer state if the parameter is not frozen in the
1603
+ # previous steps.
1604
+ if buffers[param_idx] is not None:
1605
+ local_buffers.append(cast(torch.Tensor, buffers[param_idx]))
1606
+ param_idx += 1
1607
+
1608
+ mem_offset += numel
1609
+
1610
+ shard_numel_padded = flat_param._sharded_size.numel() - (
1611
+ sum(t.numel() for t in local_buffers)
1612
+ )
1613
+
1614
+ assert flat_param._shard_numel_padded == shard_numel_padded, (
1615
+ "Manually calculated _sharded_numel_padded is incorrect. "
1616
+ f"_shard_numel_padded={flat_param._shard_numel_padded}, "
1617
+ f"shard_numel_padded={shard_numel_padded}, "
1618
+ f"_sharded_size.numel={flat_param._sharded_size.numel()}, "
1619
+ f"_numels_with_padding={flat_param._numels_with_padding}, "
1620
+ f"begin={begin}, end={end},"
1621
+ )
1622
+ if shard_numel_padded > 0:
1623
+ # Add right-handed padding.
1624
+ local_buffers.append(empty_func(shard_numel_padded))
1625
+ local_shard = torch.cat(local_buffers)
1626
+ assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), (
1627
+ "The size of local shard times the world size should equal to the "
1628
+ "gathered tensor size. The inconsistency may be from a bug of "
1629
+ "FlatParameter's metadata or the reconstruction logic in optimizer "
1630
+ "state dict."
1631
+ )
1632
+ torch.cuda.synchronize()
1633
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
1634
+ dist.all_gather_into_tensor(
1635
+ gathered_tensor, local_shard, group=fsdp_state.process_group
1636
+ )
1637
+ # Synchronize can be slow but this will be easier for us to debug.
1638
+ torch.cuda.synchronize()
1639
+
1640
+ unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()]
1641
+ flat_param_handle = fsdp_param_info.handle
1642
+ orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor)
1643
+ assert len(orig_states) == len(fsdp_param_info.param_indices), (
1644
+ "The number of parameters from FlatParameter is not consistent to "
1645
+ "the number of states used by optimizer state dict reconstruction "
1646
+ "logic."
1647
+ )
1648
+ for fqn, idx in fsdp_param_info.param_indices.items():
1649
+ if fsdp_param_info.param_requires_grad[idx] or fqn in output_states:
1650
+ output_states[fqn][state_name] = orig_states[idx]
1651
+
1652
+ _unflatten_orig_param_states(
1653
+ fsdp_param_info,
1654
+ output_states,
1655
+ state_name,
1656
+ shard_state,
1657
+ to_save,
1658
+ cpu_offload,
1659
+ )
1660
+
1661
+ del gathered_tensor
1662
+ return output_states
1663
+
1664
+
1665
+ def _gather_all_orig_param_state(
1666
+ fsdp_param_info: FSDPParamInfo,
1667
+ input_states: Dict[str, Any],
1668
+ shard_state: bool,
1669
+ to_save: bool,
1670
+ cpu_offload: bool,
1671
+ ) -> Dict[str, Any]:
1672
+ """
1673
+ Given a optimizer state dict, ``input_states``, which the keys are FQNs to the
1674
+ original parameters (not FlatParameters nor parmeter ID), gather all the
1675
+ states and unflatten them to the original dimensions. Note that all the
1676
+ params referred by the ``input_states`` must be managed by FSDP.
1677
+ """
1678
+ fsdp_state = fsdp_param_info.state
1679
+ if (
1680
+ fsdp_state.world_size == 1
1681
+ or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
1682
+ ):
1683
+ return input_states if to_save else {}
1684
+
1685
+ with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
1686
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):
1687
+ gathered_state_info = _allgather_state_info(fsdp_state, input_states)
1688
+ output_states = _allgather_orig_param_states(
1689
+ fsdp_param_info,
1690
+ gathered_state_info,
1691
+ input_states,
1692
+ shard_state,
1693
+ to_save,
1694
+ cpu_offload,
1695
+ )
1696
+ if to_save:
1697
+ for key, idx in fsdp_param_info.param_indices.items():
1698
+ if key in output_states:
1699
+ continue
1700
+ if not fsdp_param_info.param_requires_grad[idx]:
1701
+ continue
1702
+
1703
+ raise RuntimeError(
1704
+ f"{key} is not in the output state. "
1705
+ "The FSDPParamInfo has the param keys "
1706
+ f"{sorted(fsdp_param_info.param_indices.keys())} while "
1707
+ "the output_states has the param keys "
1708
+ f"{sorted(output_states.keys())}."
1709
+ )
1710
+ return output_states
1711
+ else:
1712
+ return {}
1713
+
1714
+
1715
+ def _convert_state_with_orig_params(
1716
+ all_optim_state_keys: List[_OptimStateKey],
1717
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
1718
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1719
+ optim_state_dict: Dict[Union[str, int], Any],
1720
+ to_save: bool,
1721
+ shard_state: bool,
1722
+ cpu_offload: bool = True,
1723
+ ) -> Dict[str, Any]:
1724
+ fsdp_osd_state: Dict[str, Any] = {}
1725
+ # This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo
1726
+ # usually corresponds to multiple parameters. We could not use FSDPParamInfo
1727
+ # as the key because FSDPParamInfo is not hashable. As a result, we fall back
1728
+ # to `id(FSDPParamInfo)`, which the type is an integer.
1729
+ all_states: Dict[int, Dict[str, Any]] = {}
1730
+ # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
1731
+ # across ranks
1732
+ for optim_state_key in all_optim_state_keys:
1733
+ param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
1734
+ optim_state_key, None
1735
+ )
1736
+
1737
+ if param_key is None and not optim_state_key.is_fsdp_managed:
1738
+ continue
1739
+
1740
+ if optim_state_key.is_fsdp_managed:
1741
+ fqn = optim_state_key.unflat_param_names[0]
1742
+ fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None)
1743
+ if fsdp_param_info is None:
1744
+ # This can happen if the not all FSDP instances have all the
1745
+ # parameters. This can happen with FSDP + some MPMD style
1746
+ # parallelism.
1747
+
1748
+ # TODO: it is unclear if we need to do the same check with
1749
+ # non-FSDP managed keys.
1750
+ continue
1751
+ state = {} if param_key is None else optim_state_dict[param_key]
1752
+ if id(fsdp_param_info) not in all_states:
1753
+ all_states[id(fsdp_param_info)] = {}
1754
+ all_states[id(fsdp_param_info)][fqn] = state
1755
+
1756
+ elif to_save:
1757
+ assert len(optim_state_key.unflat_param_names) == 1
1758
+ unflat_param_name = optim_state_key.unflat_param_names[0]
1759
+ with SimpleProfiler.profile("none_fsdp_managed_copy"):
1760
+ param_key = cast(Union[str, int], param_key)
1761
+ fsdp_osd_state[unflat_param_name] = copy.copy(
1762
+ optim_state_dict[param_key]
1763
+ )
1764
+ if cpu_offload:
1765
+ for state_name, value in sorted_items(
1766
+ fsdp_osd_state[unflat_param_name]
1767
+ ):
1768
+ if not torch.is_tensor(value):
1769
+ continue
1770
+ fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
1771
+
1772
+ # Instead of gathering the state of each parameter individually, we perform
1773
+ # the gathering all at once to speed up the process.
1774
+ for _all_states in all_states.values():
1775
+ fqn = next(iter(_all_states.keys()))
1776
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
1777
+ assert len(fsdp_param_info.param_requires_grad) > 0, (
1778
+ "With use_orig_params, FSDPParamInfo should have requires_grad "
1779
+ "information. However, the length is zero."
1780
+ )
1781
+ for key, idx in fsdp_param_info.param_indices.items():
1782
+ if key in _all_states:
1783
+ continue
1784
+ if not fsdp_param_info.param_requires_grad[idx]:
1785
+ continue
1786
+ raise RuntimeError(
1787
+ f"{key} is not in the optimizer state. "
1788
+ "The FSDPParamInfo has the param keys "
1789
+ f"{sorted(fsdp_param_info.param_indices.keys())} while "
1790
+ "the optimizer has the param keys "
1791
+ f"{sorted(_all_states.keys())}."
1792
+ )
1793
+ fsdp_osd_state.update(
1794
+ _gather_all_orig_param_state(
1795
+ fsdp_param_info,
1796
+ _all_states,
1797
+ shard_state,
1798
+ to_save,
1799
+ cpu_offload,
1800
+ )
1801
+ )
1802
+
1803
+ return fsdp_osd_state
1804
+
1805
+
1806
+ def _convert_state_with_flat_params(
1807
+ all_optim_state_keys: List[_OptimStateKey],
1808
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
1809
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1810
+ optim_state_dict: Dict[Union[str, int], Any],
1811
+ to_save: bool,
1812
+ shard_state: bool,
1813
+ cpu_offload: bool = True,
1814
+ ) -> Dict[str, Any]:
1815
+ fsdp_osd_state: Dict[str, Any] = {}
1816
+ # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
1817
+ # across ranks
1818
+ for optim_state_key in all_optim_state_keys:
1819
+ param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
1820
+ optim_state_key, None
1821
+ )
1822
+
1823
+ assert param_key is not None, (
1824
+ "If use_orig_params is False, we must be able to find the "
1825
+ f"corresponding param id. {optim_state_key} {param_key}"
1826
+ )
1827
+
1828
+ if optim_state_key.is_fsdp_managed:
1829
+ # If there are multiple unflat_param_names (not use_orig_params),
1830
+ # they share the same FSDPParamInfo. So the first unflat_param_name
1831
+ # is sufficient to fetch the FSDPParamInfo.
1832
+ fqn = optim_state_key.unflat_param_names[0]
1833
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
1834
+ unflat_state = _unflatten_optim_state(
1835
+ fsdp_param_info,
1836
+ optim_state_dict[param_key],
1837
+ to_save,
1838
+ shard_state,
1839
+ cpu_offload,
1840
+ )
1841
+ if to_save:
1842
+ assert len(unflat_state) == len(optim_state_key.unflat_param_names)
1843
+ for unflat_param_name, unflat_param_state in zip(
1844
+ optim_state_key.unflat_param_names,
1845
+ unflat_state,
1846
+ ):
1847
+ fsdp_osd_state[unflat_param_name] = unflat_param_state
1848
+ elif to_save:
1849
+ assert len(optim_state_key.unflat_param_names) == 1
1850
+ unflat_param_name = optim_state_key.unflat_param_names[0]
1851
+ fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key])
1852
+ if cpu_offload:
1853
+ for state_name, value in sorted_items(
1854
+ fsdp_osd_state[unflat_param_name]
1855
+ ):
1856
+ if not torch.is_tensor(value):
1857
+ continue
1858
+ fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
1859
+
1860
+ return fsdp_osd_state
1861
+
1862
+
1863
+ @torch.no_grad()
1864
+ def _optim_state_dict(
1865
+ model: nn.Module,
1866
+ optim: torch.optim.Optimizer,
1867
+ optim_state_dict: Dict[str, Any],
1868
+ optim_input: Optional[
1869
+ Union[
1870
+ List[Dict[str, Any]],
1871
+ Iterable[nn.Parameter],
1872
+ ]
1873
+ ],
1874
+ rank0_only: bool,
1875
+ shard_state: bool,
1876
+ group: Optional[dist.ProcessGroup],
1877
+ using_optim_input: bool,
1878
+ use_orig_params: bool = False,
1879
+ cpu_offload: bool = True,
1880
+ ) -> Dict[str, Any]:
1881
+ """
1882
+ Consolidates the optimizer state and returns it as a :class:`dict`
1883
+ following the convention of :meth:`torch.optim.Optimizer.state_dict`,
1884
+ i.e. with keys ``"state"`` and ``"param_groups"``.
1885
+ The flat parameters in ``FSDP`` modules contained in ``model`` are mapped
1886
+ back to their unflattened parameters.
1887
+
1888
+ Parameter keys are not well-defined. For a regular optimizer, the optimizer
1889
+ state_dict contains a mapping from parameter IDs to parameter states.
1890
+ Parameter IDs are the order of parameters in ``optim.param_groups()`` across
1891
+ all the groups. This API also allows user to pass ``optim_input`` for the
1892
+ mapping between parameters and parameter IDs. Using ``optim_input`` is being
1893
+ deprecated.
1894
+
1895
+ If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not
1896
+ contain parameter IDs mapping but a mapping from parameter FQNs to parameter
1897
+ states. This API finds the mapping from FQNs to parameters if the optimizer
1898
+ is a ``NamedOptimizer``.
1899
+
1900
+ If ``use_orig_params`` is True, each rank will have all FSDP-managed
1901
+ parameters but some of these parameters may be empty due to the sharding.
1902
+ For a regular optim.Optimizer, states for those empty parameters will
1903
+ not be initialized. So, when aggregating the FQNs across ranks, no assert
1904
+ will be raised on a rank even if it does not have all the states -- it is
1905
+ valid and FSDP knows how to aggregate them. However, FSDP has to ignore
1906
+ handling those parameters that are not managed by FSDP and do not exist on
1907
+ the local rank -- those are managed by other parallelisms and FSDP does not
1908
+ know how to handle/aggregate them.
1909
+
1910
+ Args:
1911
+ model (nn.Module): Root module (which may or may not be a
1912
+ :class:`FullyShardedDataParallel` instance) whose parameters
1913
+ were passed into the optimizer ``optim``.
1914
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1915
+ parameters.
1916
+ rank0_only (bool): If ``True``, saves the populated :class:`dict`
1917
+ only on rank 0; if ``False``, saves it on all ranks. (Default:
1918
+ ``True``)
1919
+ shard_state (bool): If ``True``, shard and distribute all
1920
+ non-zero-dimension states.
1921
+
1922
+ Returns:
1923
+ Dict[str, Any]: A :class:`dict` containing the optimizer state for
1924
+ ``model`` 's original unflattened parameters and including keys
1925
+ "state" and "param_groups" following the convention of
1926
+ :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``,
1927
+ then nonzero ranks return an empty :class:`dict`.
1928
+ """
1929
+ SimpleProfiler.reset()
1930
+ cm = ExitStack()
1931
+ cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL))
1932
+ _reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model))
1933
+ to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state
1934
+
1935
+ with SimpleProfiler.profile("preprocessing"):
1936
+ param_to_fqns = _get_param_to_fqns(model)
1937
+ flat_param_to_fqn = _get_flat_param_to_fqn(model)
1938
+ is_named_optimizer = _is_named_optimizer(optim_state_dict)
1939
+
1940
+ param_key_to_param = cast(
1941
+ Dict[Union[int, str], nn.Parameter],
1942
+ (
1943
+ _get_param_id_to_param_from_optim_input(model, optim_input)
1944
+ if using_optim_input
1945
+ else _get_param_key_to_param(
1946
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
1947
+ )
1948
+ ),
1949
+ )
1950
+ fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
1951
+
1952
+ with SimpleProfiler.profile("preprocessing_with_comm"):
1953
+ (
1954
+ all_optim_state_keys,
1955
+ optim_state_key_to_param_key,
1956
+ ) = _map_param_key_to_optim_keys(
1957
+ optim_state_dict,
1958
+ group,
1959
+ param_key_to_param,
1960
+ param_to_fqns,
1961
+ fqn_to_fsdp_param_info,
1962
+ merge_keys=use_orig_params,
1963
+ )
1964
+
1965
+ with SimpleProfiler.profile("state_converting"):
1966
+ convert_fn = (
1967
+ _convert_state_with_orig_params
1968
+ if use_orig_params
1969
+ else _convert_state_with_flat_params
1970
+ )
1971
+ fsdp_osd_state = convert_fn(
1972
+ all_optim_state_keys,
1973
+ optim_state_key_to_param_key,
1974
+ fqn_to_fsdp_param_info,
1975
+ optim_state_dict["state"],
1976
+ to_save,
1977
+ shard_state,
1978
+ cpu_offload,
1979
+ )
1980
+
1981
+ # At this point, communication is complete and ranks can return early if nothing
1982
+ # will be saved on that rank.
1983
+ if not to_save:
1984
+ return {}
1985
+
1986
+ fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state}
1987
+
1988
+ flat_param_fqns = set(flat_param_to_fqn.values())
1989
+ for key, value in optim_state_dict["state"].items():
1990
+ if key in fsdp_osd_state:
1991
+ continue
1992
+ if key in flat_param_fqns:
1993
+ continue
1994
+ if key in param_key_to_param:
1995
+ continue
1996
+ # This key is not recognized by FSDP. It may be a user-defined state
1997
+ # or some parameters state that FSDP is unable to map from
1998
+ # ``optim.param_groups``.
1999
+ warnings.warn(
2000
+ f"Found a optim state, {key}, that FSDP cannot process. FSDP "
2001
+ "will directly copy everything to the returned state_dict. In "
2002
+ "most cases, this is a user-defined state that is not "
2003
+ "associated with any particular parameter. Another possible "
2004
+ "case is this state is managed by TorchRec. Otherwise, there may "
2005
+ " be a mismatched assumption of optim_state_dict of this mode."
2006
+ )
2007
+ fsdp_osd_state[key] = value
2008
+
2009
+ if "param_groups" in optim_state_dict:
2010
+ fsdp_osd["param_groups"] = _unflatten_param_groups(
2011
+ optim_state_dict, param_key_to_param, param_to_fqns
2012
+ )
2013
+
2014
+ cm.close()
2015
+ SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ")
2016
+
2017
+ return fsdp_osd
2018
+
2019
+
2020
+ def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]:
2021
+ """
2022
+ Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo``
2023
+ if the param is managed by FSDP. Shared parameters, or original parameters that
2024
+ are shared across multiple nn.Modules, are required to belong to one and only
2025
+ one FSDP instance and thus correspond to one ``FlatParameter``. Within the one
2026
+ ``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared
2027
+ parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters.
2028
+ """
2029
+
2030
+ def module_fn(module, prefix, tree_level, fqn_to_param_info):
2031
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
2032
+ if fsdp_state is None:
2033
+ return
2034
+ _lazy_init(fsdp_state, module)
2035
+ handle = _module_handle(fsdp_state, module)
2036
+ if not handle:
2037
+ return
2038
+ flat_param = handle.flat_param
2039
+ fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, [])
2040
+ # NOTE: `idx` indexes into the data structures *without* padding
2041
+ # elements
2042
+ for idx, local_fqn in enumerate(flat_param._fqns):
2043
+ fqn = clean_tensor_name(prefix + local_fqn)
2044
+ if fqn in fqn_to_param_info:
2045
+ assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn
2046
+ fqn_to_param_info[fqn] = fsdp_param_info
2047
+ fsdp_param_info.param_indices[fqn] = idx
2048
+ if flat_param._params is not None:
2049
+ fsdp_param_info.param_requires_grad.append(
2050
+ flat_param._params[idx].requires_grad
2051
+ )
2052
+
2053
+ def return_fn(fqn_to_param_info):
2054
+ return fqn_to_param_info
2055
+
2056
+ fqn_to_param_info: Dict[str, FSDPParamInfo] = {}
2057
+ # FlatParameter._fqns stores the local fqn, starting from the root of the
2058
+ # FSDP. Using _apply_to_modules() with model (may not be the FSDP root
2059
+ # module) allows us to construct the global fqn.
2060
+ return _apply_to_modules(
2061
+ model,
2062
+ module_fn,
2063
+ return_fn,
2064
+ [fqn for fqn, _ in _named_parameters_with_duplicates(model)],
2065
+ fqn_to_param_info,
2066
+ )
2067
+
2068
+
2069
+ @no_type_check
2070
+ def _set_optim_use_dtensor(
2071
+ fsdp_state: _FSDPState,
2072
+ state_dict_settings: StateDictSettings,
2073
+ ) -> None:
2074
+ # If device_mesh is passed in when initalizing FSDP, we automatically turn the
2075
+ # _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type
2076
+ # has to be set to SHARDED_STATE_DICT.
2077
+ if getattr(fsdp_state, "_device_mesh", None):
2078
+ state_dict_type = state_dict_settings.state_dict_type
2079
+ if state_dict_type == StateDictType.LOCAL_STATE_DICT:
2080
+ raise RuntimeError(
2081
+ "Found state_dict_type LOCAL_STATE_DICT.",
2082
+ "DeviceMesh is not compatible with LOCAL_STATE_DICT.",
2083
+ "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
2084
+ )
2085
+ else:
2086
+ state_dict_settings.optim_state_dict_config._use_dtensor = True
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py ADDED
@@ -0,0 +1,1630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ from enum import auto, Enum
4
+ from typing import Any, Callable, Dict, List, no_type_check, Optional, Set, Tuple
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torch.autograd import Variable
12
+ from torch.autograd.graph import register_multi_grad_hook
13
+ from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS
14
+ from torch.distributed.fsdp._common_utils import (
15
+ _assert_in_training_states,
16
+ _FSDPState,
17
+ _get_module_fsdp_state,
18
+ _is_composable,
19
+ _log_post_backward_hook,
20
+ _no_dispatch_record_stream,
21
+ clean_tensor_name,
22
+ TrainingState,
23
+ )
24
+ from torch.distributed.fsdp._flat_param import (
25
+ FlatParameter,
26
+ FlatParamHandle,
27
+ HandleShardingStrategy,
28
+ HandleTrainingState,
29
+ RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES,
30
+ )
31
+ from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES
32
+ from torch.distributed.fsdp.api import BackwardPrefetch
33
+ from torch.distributed.utils import (
34
+ _apply_to_tensors,
35
+ _cast_forward_inputs,
36
+ _p_assert,
37
+ _to_kwargs,
38
+ )
39
+ from torch.utils import _pytree as pytree
40
+
41
+ log = logging.getLogger(__name__)
42
+
43
+ # Do not include "process_group" to enable hybrid shard and MoE cases
44
+ HOMOGENEOUS_ATTR_NAMES = (
45
+ "_use_orig_params",
46
+ "limit_all_gathers",
47
+ "_use_full_prec_in_eval",
48
+ )
49
+
50
+
51
+ class _PrefetchMode(Enum):
52
+ BACKWARD = auto()
53
+ FORWARD = auto()
54
+
55
+
56
+ def _get_fsdp_root_states_with_modules(
57
+ module: nn.Module,
58
+ ) -> Tuple[List[_FSDPState], List[nn.Module]]:
59
+ """
60
+ Returns a tuple containing:
61
+ 1. A list of the root ``_FSDPState`` instances in the module tree rooted at
62
+ ``module`` without any duplicates and following the ``module.modules()``
63
+ traversal order (which is assumed to be depth-first).
64
+ 2. A corresponding list of the root modules owning the states in the first
65
+ list.
66
+
67
+ This is similar to :func:`_get_fsdp_states_with_modules` except that we
68
+ must call :func:`_is_fsdp_root` to force a lazy initialization to determine
69
+ the FSDP root in case lazy initialization has not yet happened.
70
+ """
71
+ fsdp_root_states: List[_FSDPState] = []
72
+ fsdp_root_modules: List[nn.Module] = []
73
+ visited_fsdp_states: Set[_FSDPState] = set()
74
+ # NOTE: This function assumes that `module.modules()` proceeds top-down.
75
+ for submodule in module.modules():
76
+ optional_state = _get_module_fsdp_state(submodule)
77
+ if (
78
+ optional_state is not None
79
+ and optional_state not in visited_fsdp_states
80
+ and _is_fsdp_root(optional_state, submodule)
81
+ ):
82
+ visited_fsdp_states.add(optional_state)
83
+ fsdp_root_states.append(optional_state)
84
+ fsdp_root_modules.append(submodule)
85
+ return fsdp_root_states, fsdp_root_modules
86
+
87
+
88
+ def _get_fsdp_root_states(module: nn.Module) -> List[_FSDPState]:
89
+ """See :func:`_get_fsdp_root_states_with_modules`."""
90
+ fsdp_root_states, _ = _get_fsdp_root_states_with_modules(module)
91
+ return fsdp_root_states
92
+
93
+
94
+ def _is_fsdp_root(state: _FSDPState, module: nn.Module) -> bool:
95
+ """
96
+ Returns if ``state`` corresponds to that of an FSDP root.
97
+
98
+ For the wrapper code path, ``state`` and ``module`` should be the same. For
99
+ the non-wrapper code path, ``state`` should be ``module`` 's state.
100
+ """
101
+ # Force a lazy initialization to determine the FSDP root
102
+ _lazy_init(state, module)
103
+ assert state._is_root is not None # mypy
104
+ return state._is_root
105
+
106
+
107
+ @no_type_check
108
+ def _lazy_init(
109
+ state: _FSDPState,
110
+ root_module: nn.Module,
111
+ ) -> _FSDPState:
112
+ """
113
+ Performs initialization lazily, typically right before the first forward
114
+ pass. The laziness is needed to ensure that the parameter device/dtype and
115
+ the FSDP hierarchy have finalized. This method's actual logic only runs on
116
+ the root FSDP instance, which performs initialization for all non-root FSDP
117
+ instances to avoid partial initialization.
118
+
119
+ For the non-composable code path, ``state`` and ``root_module`` should be
120
+ the same, namely the FSDP instance itself.
121
+ """
122
+ if state._is_root is not None:
123
+ return # no-op: already lazily initialized
124
+ if not state._device_handle.is_available():
125
+ # Allow the FSDP constructor to run even without CUDA but check this
126
+ # once we start real execution
127
+ raise RuntimeError("FSDP does not support CPU only execution")
128
+ # The following logic is only run on the root FSDP instance since it will
129
+ # set `_is_root=False` for the non-root instances
130
+ state._is_root = True
131
+ _assert_in_training_states(state, [TrainingState.IDLE])
132
+ _check_flat_params_on_expected_device(state, root_module)
133
+ state._all_fsdp_states = traversal_utils._get_fsdp_states(root_module)
134
+ _init_streams(state)
135
+ buffers, buffer_dtypes = _get_buffers_and_dtypes_for_computation(state, root_module)
136
+ _cast_buffers_to_dtype_and_device(buffers, buffer_dtypes, state.compute_device)
137
+ state._exec_order_data.init(state, root_module, state.process_group)
138
+ _share_state_and_init_handle_attrs(state, root_module)
139
+ return state
140
+
141
+
142
+ def _check_flat_params_on_expected_device(state: _FSDPState, module: nn.Module):
143
+ """
144
+ Checks that all ``FlatParameter``s in ``module`` 's tree managed by
145
+ ``state`` are on the expected device for *lazy initialization*.
146
+ """
147
+ cpu_device = torch.device("cpu")
148
+ for handle in traversal_utils._get_fsdp_handles(module):
149
+ if (
150
+ not handle._offload_params
151
+ and handle.flat_param.device != state.compute_device
152
+ ):
153
+ raise RuntimeError(
154
+ "An FSDP-managed module unexpectedly has parameters on "
155
+ f"{handle.flat_param.device}. Make sure to move the module to "
156
+ f"{state.compute_device} before training."
157
+ )
158
+ elif handle._offload_params and handle.flat_param.device != cpu_device:
159
+ raise RuntimeError(
160
+ "An FSDP-managed module with parameter CPU offloading enabled "
161
+ f"has parameters on {handle.flat_param.device}. Make sure to "
162
+ f"not move the module from CPU when offloading parameters."
163
+ )
164
+
165
+
166
+ @no_type_check
167
+ def _share_state_and_init_handle_attrs(
168
+ root_state: _FSDPState,
169
+ root_module: nn.Module,
170
+ ) -> None:
171
+ """
172
+ Shares data structure state from the ``root_state`` to all FSDP states in
173
+ ``root_module`` 's module tree, and initializes handle attributes. These
174
+ are done together to require a single loop over the states.
175
+ """
176
+ handle = root_state._handle
177
+ if handle:
178
+ handle.init_flat_param_attributes()
179
+ attr_name_to_values: Dict[str, Set[Any]] = {}
180
+ for attr_name in HOMOGENEOUS_ATTR_NAMES:
181
+ attr_name_to_values[attr_name] = set()
182
+ root_state._all_handles = root_state._exec_order_data.all_handles # share reference
183
+ # Update _has_optim_in_backward for each handle.
184
+ for handle in root_state._all_handles:
185
+ flat_param = handle.flat_param
186
+ if hasattr(flat_param, "_in_backward_optimizers"):
187
+ raise RuntimeError(
188
+ "FSDP optimizer in backward only supported with use_orig_params=True!"
189
+ )
190
+ handle._has_optim_in_backward = flat_param._params is not None and any(
191
+ hasattr(param, "_in_backward_optimizers") for param in flat_param._params
192
+ )
193
+ if handle._has_optim_in_backward:
194
+ torch._C._log_api_usage_once("fsdp.optimizer_in_backward")
195
+ for fsdp_state in root_state._all_fsdp_states:
196
+ for attr_name in HOMOGENEOUS_ATTR_NAMES:
197
+ _p_assert(
198
+ hasattr(fsdp_state, attr_name),
199
+ f"FSDP state missing attribute {attr_name}",
200
+ )
201
+ attr_name_to_values[attr_name].add(getattr(fsdp_state, attr_name))
202
+ if fsdp_state is root_state:
203
+ continue
204
+ # Relax the assert for non-root FSDP instances in case the nested
205
+ # initialized module is wrapped again in FSDP later (e.g. after
206
+ # training to run inference)
207
+ _p_assert(
208
+ fsdp_state._is_root is None or not fsdp_state._is_root,
209
+ "Non-root FSDP instance's `_is_root` should not have been "
210
+ "set yet or should have been set to `False`",
211
+ )
212
+ fsdp_state._is_root = False
213
+ fsdp_state._unshard_stream = root_state._unshard_stream
214
+ fsdp_state._post_backward_stream = root_state._post_backward_stream
215
+ fsdp_state._pre_unshard_stream = root_state._pre_unshard_stream
216
+ fsdp_state._all_reduce_stream = root_state._all_reduce_stream
217
+ fsdp_state._default_stream = root_state._default_stream
218
+ fsdp_state._exec_order_data = root_state._exec_order_data
219
+ fsdp_state._free_event_queue = root_state._free_event_queue
220
+ if fsdp_state._fsdp_extension is not None:
221
+ fsdp_state._fsdp_extension.compute_stream = root_state._default_stream
222
+ handle = fsdp_state._handle
223
+ if handle:
224
+ handle.init_flat_param_attributes()
225
+ for attr_name, attr_values in attr_name_to_values.items():
226
+ if len(attr_values) != 1:
227
+ raise ValueError(
228
+ f"Expects one homogeneous value for {attr_name} but got {attr_values}"
229
+ )
230
+
231
+
232
+ @no_type_check
233
+ def _init_streams(
234
+ state: _FSDPState,
235
+ ) -> None:
236
+ """
237
+ Initializes CUDA streams for overlapping communication, computation, and
238
+ data transfers. The streams should be shared across FSDP instances.
239
+ """
240
+ assert state._is_root
241
+ assert state._device_handle.is_available()
242
+ uses_hybrid_sharding = any(
243
+ fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES
244
+ for fsdp_state in state._all_fsdp_states
245
+ )
246
+ # Prioritize all-gathers/reduce-scatters over async all-reduce for HSDP and
247
+ # preserve the default priority of 0 otherwise
248
+ high_priority = -1 if state.limit_all_gathers and uses_hybrid_sharding else 0
249
+ # Default stream for computation
250
+ state._default_stream = state._device_handle.current_stream()
251
+ if state._fsdp_extension is not None:
252
+ # set the compute stream to the FSDP extension
253
+ state._fsdp_extension.compute_stream = state._default_stream
254
+
255
+ # Stream for unshard logic, including allocating the all-gather destination
256
+ # tensors and the all-gathers themselves
257
+ state._unshard_stream = state._device_handle.Stream(priority=high_priority)
258
+ # Stream for overlapping gradient reduction with the backward pass gradient
259
+ # computation
260
+ state._post_backward_stream = state._device_handle.Stream(priority=high_priority)
261
+ # Stream for pre-unshard logic, namely allocations and writes for CPU
262
+ # offloading (H2D copy) and mixed precision (low precision cast)
263
+ state._pre_unshard_stream = state._device_handle.Stream(priority=high_priority)
264
+ # Stream to run HSDP's all-reduce as async (if using HSDP)
265
+ state._all_reduce_stream = (
266
+ state._device_handle.Stream() if uses_hybrid_sharding else state._default_stream
267
+ )
268
+
269
+
270
+ @no_type_check
271
+ def _unshard(
272
+ state: _FSDPState,
273
+ handle: FlatParamHandle,
274
+ unshard_stream: torch.Stream,
275
+ pre_unshard_stream: torch.Stream,
276
+ ) -> None:
277
+ """
278
+ Unshards the handles in ``handles``. If the handles are in
279
+ :meth:`summon_full_params` and are using mixed precision, then they are
280
+ forced to full precision.
281
+
282
+ Postcondition: handle's ``FlatParameter`` 's data is the padded
283
+ unsharded flat parameter on the compute device.
284
+ """
285
+ if not handle:
286
+ return
287
+ with state._device_handle.stream(pre_unshard_stream):
288
+ ran_pre_unshard = handle.pre_unshard()
289
+ if ran_pre_unshard:
290
+ unshard_stream.wait_stream(pre_unshard_stream)
291
+ if state.limit_all_gathers:
292
+ event = state._free_event_queue.dequeue_if_needed()
293
+ if event:
294
+ with torch.profiler.record_function(
295
+ "FullyShardedDataParallel.rate_limiter"
296
+ ):
297
+ event.synchronize()
298
+ with state._device_handle.stream(unshard_stream):
299
+ handle.unshard()
300
+ handle.post_unshard()
301
+
302
+
303
+ @no_type_check
304
+ def _reshard(
305
+ state: _FSDPState,
306
+ handle: FlatParamHandle,
307
+ free_unsharded_flat_param: bool,
308
+ ):
309
+ """
310
+ Reshards the handle. ``free_unsharded_flat_param`` indicates whether to
311
+ free the handle's padded unsharded flat parameter.
312
+ """
313
+ handle.reshard(free_unsharded_flat_param)
314
+ if state.limit_all_gathers and free_unsharded_flat_param:
315
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
316
+ # We don't run a even queue for freeing under torch compile atm
317
+ # But maybe we need to? TODO(voz): Look into this
318
+ free_event = state._device_handle.Event()
319
+ free_event.record()
320
+ state._free_event_queue.enqueue(free_event)
321
+ handle.post_reshard()
322
+ # Flat parameter freed or not, we always have to "unshard" the parameter
323
+ # upon next access to get its shape correct.
324
+ handle._prefetched = False
325
+
326
+
327
+ def _unshard_grads(
328
+ handle: Optional[FlatParamHandle],
329
+ ) -> None:
330
+ if handle:
331
+ handle.unshard_grad()
332
+
333
+
334
+ def _reshard_grads(
335
+ handle: Optional[FlatParamHandle],
336
+ ) -> None:
337
+ if handle:
338
+ handle.reshard_grad()
339
+
340
+
341
+ @no_type_check
342
+ def _pre_forward(
343
+ state: _FSDPState,
344
+ handle: Optional[FlatParamHandle],
345
+ unshard_fn: Callable,
346
+ module: nn.Module,
347
+ args: Tuple[Any, ...],
348
+ kwargs: Dict[str, Any],
349
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
350
+ """
351
+ Runs the pre-forward logic. This includes an opportunity to unshard
352
+ currently sharded parameters such as those for the current forward and
353
+ registering post-backward hooks for these current parameters. This function
354
+ also converts forward ``args`` and ``kwargs`` to the given precision.
355
+
356
+ Args:
357
+ handles (List[FlatParamHandle]): Handles giving the parameters used in
358
+ the current forward.
359
+ unshard_fn (Optional[Callable]): A callable to unshard any currently
360
+ sharded parameters or ``None`` to not do any unsharding.
361
+ module (nn.Module): Module whose forward this method runs right before;
362
+ expected by the hook signature.
363
+ args (Tuple[Any, ...]): Module forward ``args``.
364
+ kwargs (Dict[str, Any]): Module forward ``kwargs``.
365
+ """
366
+ with torch.profiler.record_function("FullyShardedDataParallel._pre_forward"):
367
+ # For `fully_shard` + `checkpoint`, skip pre-forward logic in the
368
+ # recomputed forward
369
+ if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:
370
+ # For both checkpoint implementations, we do not need to re-cast
371
+ # inputs here since they will be checkpointed in the low precision
372
+ # either by AC or normally by autograd as long as the AC region is
373
+ # nested within FSDP
374
+ return args, kwargs
375
+ state.training_state = TrainingState.FORWARD_BACKWARD
376
+ state._exec_order_data.record_pre_forward(handle, module.training)
377
+ if handle:
378
+ handle._training_state = HandleTrainingState.FORWARD
379
+ if unshard_fn is not None:
380
+ unshard_fn(state, handle)
381
+ # Register post-backward hooks to reshard the parameters and reduce-scatter
382
+ # their gradients. They must be re-registered every forward pass in case
383
+ # the `grad_fn` is mutated.
384
+ _register_post_backward_hook(state, handle)
385
+ # We have to reallocate the _cpu_grad if optimizer overlap
386
+ # set the grad to None in the backward pass.
387
+ if handle and handle._offload_params and handle.flat_param._cpu_grad is None:
388
+ handle.flat_param._cpu_grad = torch.zeros_like(
389
+ handle.flat_param._local_shard, device=torch.device("cpu")
390
+ ).pin_memory()
391
+
392
+ should_cast_forward_inputs = (
393
+ state._handle and not state._handle._force_full_precision
394
+ )
395
+
396
+ if should_cast_forward_inputs and state.mixed_precision.cast_forward_inputs:
397
+ # Recursively convert args and kwargs to specified precision.
398
+ input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype
399
+ args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs)
400
+ _register_post_backward_reshard_only_hook(state, handle, args, kwargs)
401
+ return args, kwargs
402
+
403
+
404
+ @no_type_check
405
+ def _pre_forward_unshard(
406
+ state: _FSDPState,
407
+ handle: Optional[FlatParamHandle],
408
+ ) -> None:
409
+ """Unshards parameters in the pre-forward."""
410
+ if not handle:
411
+ return
412
+ # If the handles have been prefetched, then there is no need to call
413
+ # `_unshard()` again
414
+ if not handle._prefetched:
415
+ _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)
416
+ handle._needs_pre_forward_unshard = False
417
+ # Don't wait during trace
418
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
419
+ state._device_handle.current_stream().wait_stream(state._unshard_stream)
420
+ with torch.profiler.record_function(
421
+ "FullyShardedDataParallel._pre_forward_prefetch"
422
+ ):
423
+ _prefetch_handle(state, handle, _PrefetchMode.FORWARD)
424
+
425
+
426
+ @no_type_check
427
+ def _post_forward(
428
+ state: _FSDPState,
429
+ handle: Optional[FlatParamHandle],
430
+ reshard_fn: Callable,
431
+ module: nn.Module,
432
+ input: Any,
433
+ output: Any,
434
+ ) -> Any:
435
+ """
436
+ Runs the post-forward logic. This includes an opportunity to reshard
437
+ currently unsharded parameters such as those used in the current forward
438
+ and registering pre-backward hooks on the forward outputs.
439
+
440
+ Args:
441
+ handles (List[FlatParamHandle]): Handles giving the parameters used in
442
+ the current forward.
443
+ reshard_fn (Optional[Callable]): A callable to reshard any currently
444
+ unsharded parameters (e.g. from the current forward) or ``None`` to
445
+ not do any resharding.
446
+ module (nn.Module): Module whose forward just ran, which should be a
447
+ fully sharded module (see [Note: Fully Sharded Module]); expected
448
+ by the hook signature.
449
+ input (Any): Unused; expected by the hook signature.
450
+ output (Any): Forward pass output; pre-backward hooks are registered on
451
+ the tensors that require gradients in this output.
452
+
453
+ Postcondition: Each ``FlatParameter`` 's data points to the sharded flat
454
+ parameter.
455
+ """
456
+ with torch.profiler.record_function("FullyShardedDataParallel._post_forward"):
457
+ # For `fully_shard` + `checkpoint`, skip post-forward logic in the
458
+ # recomputed forward
459
+ if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:
460
+ return output
461
+
462
+ state._exec_order_data.record_post_forward(handle)
463
+ if reshard_fn is not None:
464
+ reshard_fn(state, handle)
465
+ # Register pre-backward hooks to unshard the flat parameters for the
466
+ # gradient computation (if needed)
467
+ output = _register_pre_backward_hooks(state, module, output, handle)
468
+ state.training_state = TrainingState.IDLE
469
+ if handle:
470
+ handle._training_state = HandleTrainingState.IDLE
471
+ return output
472
+
473
+
474
+ @no_type_check
475
+ def _post_forward_reshard(
476
+ state: _FSDPState,
477
+ handle: FlatParamHandle,
478
+ ) -> None:
479
+ """Reshards parameters in the post-forward."""
480
+ if not handle:
481
+ return
482
+ # Do not free the root's parameters in the post-forward for `FULL_SHARD`
483
+ # with the intention that they are immediately used for backward
484
+ # computation (though this may not be true)
485
+ free_unsharded_flat_param = (
486
+ not state._is_root
487
+ and handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES
488
+ )
489
+ _reshard(state, handle, free_unsharded_flat_param)
490
+
491
+
492
+ @no_type_check
493
+ def _root_pre_forward(
494
+ state: _FSDPState,
495
+ module: nn.Module,
496
+ args,
497
+ kwargs,
498
+ ) -> None:
499
+ """
500
+ Runs pre-forward logic specific to the root FSDP instance, which should run
501
+ before any individual module's pre-forward. This starts with an attempt at
502
+ lazy initialization (which only runs non-vacuously once). Otherwise, if
503
+ this is called on a non-root FSDP instance, then it returns directly.
504
+
505
+ Args:
506
+ module (nn.Module): Module for which this logic tries to run. It may or
507
+ may not be the root. If not, then this method does not do anything.
508
+ """
509
+ with torch.profiler.record_function("FullyShardedDataParallel._root_pre_forward"):
510
+ _lazy_init(state, module)
511
+ _p_assert(state._is_root is not None, "Expects a root FSDP to have been set")
512
+ if not state._is_root:
513
+ # Always cast forward inputs in the root of this local FSDP unit for mixed
514
+ # precision, as this is where mixed precision could be configed.
515
+ # This is more useful for auto wrapping that is recommended in composable path.
516
+ # For manual wrapping, cast forward inputs on each local FSDP unit root will
517
+ # increase some overhead, so not turned on for model wrapper path right now where
518
+ # manual wrapping is more broadly used.
519
+ if _is_composable(state):
520
+ return _root_cast_forward_input(state, module, args, kwargs)
521
+ return args, kwargs
522
+
523
+ # We cast buffers back to full precision if we're forcing full precision. Disjointly, we check if buffers
524
+ # are in full precision and if we should cast them back to lower precision, which happens when
525
+ # exiting eval() mode.
526
+ handle = state._handle
527
+ if handle:
528
+ should_cast_buffers_to_full_prec = handle._force_full_precision
529
+ else:
530
+ should_cast_buffers_to_full_prec = True
531
+
532
+ if should_cast_buffers_to_full_prec:
533
+ _cast_buffers_to_dtype_and_device(
534
+ buffers=dict(module.named_buffers()).values(),
535
+ buffer_dtypes=list(state._buffer_name_to_orig_dtype.values()),
536
+ device=state.compute_device,
537
+ )
538
+ # This flag is only set when we cast buffers to full precision, to avoid the
539
+ # CPU overhead that can stem from retrieving all buffers and their types in the
540
+ # following else branch.
541
+ state._needs_buffer_dtype_restore_check = True
542
+ elif getattr(state, "_needs_buffer_dtype_restore_check", False):
543
+ # Check if buffers are in full precision and we need to cast them
544
+ # back down.
545
+ (
546
+ buffers,
547
+ buffer_dtypes_for_computation,
548
+ ) = _get_buffers_and_dtypes_for_computation(state, module)
549
+ if len(buffers) > 0 and len(buffer_dtypes_for_computation) > 0:
550
+ if any(
551
+ buffer.dtype != buffer_dtype_for_computation
552
+ for buffer, buffer_dtype_for_computation in zip(
553
+ buffers, buffer_dtypes_for_computation
554
+ )
555
+ ):
556
+ # Assume we have to cast everything if there is one mismatch
557
+ _cast_buffers_to_dtype_and_device(
558
+ buffers, buffer_dtypes_for_computation, state.compute_device
559
+ )
560
+ # We don't have to check this again until we cast buffers to full precision again.
561
+ state._needs_buffer_dtype_restore_check = False
562
+
563
+ if state.forward_prefetch:
564
+ handles = []
565
+ for fsdp_state in state._all_fsdp_states:
566
+ if fsdp_state._handle:
567
+ handles.append(fsdp_state._handle)
568
+ for handle in handles:
569
+ handle._needs_pre_forward_unshard = True
570
+ handle._prefetched = False
571
+ _wait_for_computation_stream(
572
+ state._device_handle.current_stream(),
573
+ state._unshard_stream,
574
+ state._pre_unshard_stream,
575
+ )
576
+ _reset_flat_param_grad_info_if_needed(state._all_handles)
577
+
578
+ # Prepares the forward inputs by moving them to ``compute_device``
579
+ # TODO: Do not use the side stream for tensor copies for now; investigate
580
+ # the perf with/without it.
581
+ with torch.profiler.record_function("FullyShardedDataParallel._to_kwargs"):
582
+ args_tuple, kwargs_tuple = _to_kwargs(
583
+ args, kwargs, state.compute_device, False
584
+ )
585
+ args = args_tuple[0]
586
+ kwargs = kwargs_tuple[0]
587
+
588
+ return _root_cast_forward_input(state, module, args, kwargs)
589
+
590
+
591
+ @no_type_check
592
+ def _root_cast_forward_input(
593
+ state: _FSDPState, module: torch.nn.Module, args, kwargs
594
+ ) -> Tuple[Any, Any]:
595
+ if state._handle:
596
+ force_full_precision = not state._handle._force_full_precision
597
+ else:
598
+ force_full_precision = True
599
+
600
+ should_cast_forward_inputs = (
601
+ (module.training or not state._use_full_prec_in_eval) and force_full_precision
602
+ ) and state.mixed_precision.cast_root_forward_inputs
603
+
604
+ if should_cast_forward_inputs:
605
+ input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype
606
+ args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs)
607
+
608
+ return args, kwargs
609
+
610
+
611
+ @no_type_check
612
+ def _pre_backward_hook(
613
+ state: _FSDPState,
614
+ module: nn.Module,
615
+ handle: FlatParamHandle,
616
+ grad,
617
+ *unused: Any,
618
+ ) -> Any:
619
+ """
620
+ Prepares ``_handle`` 's ``FlatParameter`` s for gradient computation.
621
+
622
+ Args:
623
+ module (nn.Module): Fully sharded module (see [Note: Fully Sharded
624
+ Module]).
625
+ """
626
+ # Only run the pre-backward hook once per group of handles involved in the
627
+ # same module forward computation
628
+ if (
629
+ handle
630
+ and hasattr(handle, "_ran_pre_backward_hook")
631
+ and handle._ran_pre_backward_hook
632
+ ):
633
+ log.debug("%s %s", id(state), "Not Running pre backward! Already Ran!")
634
+ return grad
635
+
636
+ with torch.profiler.record_function("FullyShardedDataParallel._pre_backward_hook"):
637
+ # Queue the post-backward callback once for the root FSDP instance to
638
+ # attach it to the outermost backward graph task so that it is called
639
+ # after all backward calls complete
640
+ if state._is_root and not state._post_backward_callback_queued:
641
+ _register_post_backward_final_callback(state, module)
642
+ _reset_flat_param_grad_info_if_needed(state._all_handles)
643
+ elif handle:
644
+ allowed_states = [TrainingState.IDLE]
645
+ if _is_composable(state):
646
+ allowed_states.append(TrainingState.FORWARD_BACKWARD)
647
+ _assert_in_training_states(state, allowed_states)
648
+ state.training_state = TrainingState.FORWARD_BACKWARD
649
+ # Queueing the post-backward callback is the only logic that is not
650
+ # per-handle in the pre-backward hook, so we can return early here if
651
+ # there are no handles.
652
+ if not handle:
653
+ return grad
654
+ handle._training_state = HandleTrainingState.BACKWARD_PRE
655
+
656
+ if handle._needs_pre_backward_unshard:
657
+ # If the handles have been prefetched, then there is no need to
658
+ # call `_unshard()` again
659
+ if not handle._prefetched:
660
+ _unshard(
661
+ state,
662
+ handle,
663
+ state._unshard_stream,
664
+ state._pre_unshard_stream,
665
+ )
666
+ # Don't wait during trace
667
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
668
+ state._device_handle.current_stream().wait_stream(state._unshard_stream)
669
+
670
+ # Set this to `False` to ensure that a mistargeted prefetch does not
671
+ # actually unshard these handles
672
+ handle._needs_pre_backward_unshard = False
673
+ with torch.profiler.record_function(
674
+ "FullyShardedDataParallel._pre_backward_prefetch"
675
+ ):
676
+ _prefetch_handle(state, handle, _PrefetchMode.BACKWARD)
677
+ handle.prepare_gradient_for_backward()
678
+ handle._ran_pre_backward_hook = True
679
+ return grad
680
+
681
+
682
+ @no_type_check
683
+ @torch.no_grad()
684
+ def _post_backward_hook(
685
+ state: _FSDPState,
686
+ handle: FlatParamHandle,
687
+ flat_param,
688
+ *unused: Any,
689
+ ):
690
+ """
691
+ Reduce-scatters the gradient of ``handle`` 's ``FlatParameter``.
692
+
693
+ Precondition: The ``FlatParameter`` 's ``.grad`` attribute contains the
694
+ unsharded gradient for the local batch.
695
+
696
+ Postcondition:
697
+ - If using ``NO_SHARD``, then the ``.grad`` attribute is the reduced
698
+ unsharded gradient.
699
+ - Otherwise, the ``_saved_grad_shard`` attribute is the reduced sharded
700
+ gradient (accumulating with any existing gradient).
701
+ """
702
+ _log_post_backward_hook(state, handle, log)
703
+ flat_param = handle.flat_param
704
+ flat_param._post_backward_called = True
705
+ with torch.autograd.profiler.record_function(
706
+ "FullyShardedDataParallel._post_backward_hook"
707
+ ):
708
+ _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])
709
+ # For multiple applications of reentrant AC across submodules sharing
710
+ # the same `FlatParameter`, the post-backward hook may run multiple
711
+ # times in one backward, in which case we permit the state to already
712
+ # be in `BACKWARD_POST`.
713
+ _p_assert(
714
+ handle._training_state
715
+ in (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.BACKWARD_POST),
716
+ f"Expects `BACKWARD_PRE` or `BACKWARD_POST` state but got {handle._training_state}",
717
+ )
718
+ handle._training_state = HandleTrainingState.BACKWARD_POST
719
+
720
+ if flat_param.grad is None:
721
+ return
722
+ if flat_param.grad.requires_grad:
723
+ raise RuntimeError("FSDP does not support gradients of gradients")
724
+
725
+ _post_backward_reshard(state, handle)
726
+ if not state._sync_gradients:
727
+ if handle._use_orig_params:
728
+ handle._use_unsharded_grad_views()
729
+ return
730
+
731
+ # Wait for all ops in the current stream (e.g. gradient computation) to
732
+ # finish before reduce-scattering the gradient
733
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
734
+ state._post_backward_stream.wait_stream(
735
+ state._device_handle.current_stream()
736
+ )
737
+
738
+ with state._device_handle.stream(state._post_backward_stream):
739
+ autograd_computed_grad = flat_param.grad.data
740
+ if (
741
+ not _low_precision_hook_enabled(state)
742
+ and flat_param.grad.dtype != handle._reduce_dtype
743
+ # If we are forcing full precision but communicating grads
744
+ # (i.e. model.eval() + full precision in eval was configured), don't downcast gradient.
745
+ and not handle._force_full_precision
746
+ ):
747
+ flat_param.grad.data = flat_param.grad.to(handle._reduce_dtype)
748
+ if handle.uses_sharded_strategy:
749
+ _reduce_grad(state, handle)
750
+ else:
751
+ _reduce_grad_no_shard(state, handle)
752
+ # Since the unsharded gradient is produced in the computation
753
+ # stream and consumed in the post-backward stream, inform the
754
+ # caching allocator (before it goes out of scope)
755
+ _no_dispatch_record_stream(
756
+ autograd_computed_grad, state._post_backward_stream
757
+ )
758
+
759
+
760
+ def _post_backward_reshard_only_hook(
761
+ state: _FSDPState,
762
+ handle: FlatParamHandle,
763
+ *unused: Any,
764
+ ) -> None:
765
+ with torch.profiler.record_function(
766
+ "FullyShardedDataParallel._post_backward_hook_reshard_only"
767
+ ):
768
+ # `_pre_backward_hook` may not get executed
769
+ # if forward output does not require grad
770
+ # overwrite IDLE state for post-backward prefetching
771
+ state.training_state = TrainingState.FORWARD_BACKWARD
772
+ handle._training_state = HandleTrainingState.BACKWARD_POST
773
+ _post_backward_reshard(state, handle)
774
+
775
+
776
+ def _post_backward_reshard(
777
+ state: _FSDPState,
778
+ handle: FlatParamHandle,
779
+ *unused: Any,
780
+ ) -> None:
781
+ free_unsharded_flat_param = _should_free_in_backward(state, handle)
782
+ _reshard(state, handle, free_unsharded_flat_param)
783
+
784
+ # TODO: Post-backward prefetching does not support the multiple handles
785
+ # per module case since the post-backward hook runs per handle, not per
786
+ # group of handles.
787
+ with torch.profiler.record_function(
788
+ "FullyShardedDataParallel._post_backward_prefetch"
789
+ ):
790
+ _prefetch_handle(state, handle, _PrefetchMode.BACKWARD)
791
+
792
+
793
+ @no_type_check
794
+ def _should_free_in_backward(
795
+ state: _FSDPState,
796
+ handle: FlatParamHandle,
797
+ ) -> bool:
798
+ """
799
+ Returns whether FSDP should free the unsharded flat parameter in the
800
+ post-backward or not.
801
+ """
802
+ if not handle.uses_sharded_strategy:
803
+ return False
804
+ # If not syncing gradients, then we do not free for strategies that do not
805
+ # reshard after forward as a *heuristic* to tradeoff higher memory for
806
+ # higher throughput.
807
+ return (
808
+ state._sync_gradients
809
+ or handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES
810
+ )
811
+
812
+
813
+ @no_type_check
814
+ def _reduce_grad(state: _FSDPState, handle: FlatParamHandle) -> None:
815
+ """
816
+ For sharded strategies, this runs gradient reduction, sharded gradient
817
+ accumulation if needed, and the post-reduction callback.
818
+ """
819
+ flat_param = handle.flat_param
820
+ uses_hybrid_sharded_strategy = handle._sharding_strategy in (
821
+ HandleShardingStrategy.HYBRID_SHARD,
822
+ HandleShardingStrategy._HYBRID_SHARD_ZERO2,
823
+ )
824
+ # We clear `.grad` to permit multiple backwards. This avoids a race where
825
+ # the second backward pass computation precedes ahead of the first backward
826
+ # pass reduction, which is possible since the reduction is issued in a
827
+ # separate stream and is async and would result in reducing the wrong
828
+ # gradient.
829
+ unsharded_grad = flat_param.grad.data
830
+ flat_param.grad = None
831
+ padded_unsharded_grad, new_sharded_grad = _get_reduce_scatter_tensors(
832
+ state, unsharded_grad
833
+ )
834
+ if state._comm_hook is None: # default path
835
+ _div_if_needed(padded_unsharded_grad, state._gradient_predivide_factor)
836
+ pg = (
837
+ handle._fake_process_group
838
+ if handle._use_fake_reduce
839
+ else state.process_group
840
+ )
841
+ dist.reduce_scatter_tensor(
842
+ new_sharded_grad,
843
+ padded_unsharded_grad,
844
+ group=pg,
845
+ )
846
+ if uses_hybrid_sharded_strategy:
847
+ # Don't wait during trace
848
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
849
+ state._all_reduce_stream.wait_stream(state._post_backward_stream)
850
+ with state._device_handle.stream(state._all_reduce_stream):
851
+ # Since the new sharded gradient is produced in the post-
852
+ # backward stream and consumed in the all-reduce stream,
853
+ # inform the caching allocator
854
+ _no_dispatch_record_stream(new_sharded_grad, state._all_reduce_stream)
855
+ dist.all_reduce(new_sharded_grad, group=state._inter_node_pg)
856
+ _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor)
857
+ grad_to_offload = _accumulate_sharded_grad(
858
+ state, handle, new_sharded_grad
859
+ )
860
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
861
+ return
862
+ _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor)
863
+ else:
864
+ state._comm_hook(
865
+ state._comm_hook_state, padded_unsharded_grad, new_sharded_grad
866
+ )
867
+ # NOTE: HSDP variants do not support communication hook.
868
+ grad_to_offload = _accumulate_sharded_grad(state, handle, new_sharded_grad)
869
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
870
+
871
+
872
+ @no_type_check
873
+ def _get_reduce_scatter_tensors(
874
+ state: _FSDPState, unsharded_grad: torch.Tensor
875
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
876
+ """
877
+ Returns the input and output tensors to reduce-scatter, respectively.
878
+ """
879
+ chunks = list(unsharded_grad.chunk(state.world_size))
880
+ numel_to_pad = state.world_size * chunks[0].numel() - unsharded_grad.numel()
881
+ padded_unsharded_grad = (
882
+ F.pad(unsharded_grad, [0, numel_to_pad]) if numel_to_pad > 0 else unsharded_grad
883
+ )
884
+ new_sharded_grad = torch.empty_like(chunks[0]) # padded
885
+ return padded_unsharded_grad, new_sharded_grad
886
+
887
+
888
+ @no_type_check
889
+ def _accumulate_sharded_grad(
890
+ state: _FSDPState,
891
+ handle: FlatParamHandle,
892
+ sharded_grad: torch.Tensor,
893
+ ) -> torch.Tensor:
894
+ """
895
+ Accumulates the reduce-scattered sharded gradient with any existing sharded
896
+ gradient if needed, returning the gradient to offload (if CPU offloading is
897
+ enabled).
898
+ """
899
+ flat_param = handle.flat_param
900
+ _cast_grad_to_param_dtype(state, sharded_grad, flat_param)
901
+ # Save the sharded gradient in `_saved_grad_shard` to support gradient
902
+ # accumulation -- for multiple backwards, the gradient reductions may
903
+ # happen in arbitrary order
904
+ accumulate_grad = hasattr(flat_param, "_saved_grad_shard")
905
+ if accumulate_grad:
906
+ _check_grad_to_accumulate(sharded_grad, flat_param._saved_grad_shard)
907
+ flat_param._saved_grad_shard += sharded_grad
908
+ else:
909
+ flat_param._saved_grad_shard = sharded_grad
910
+ grad_to_offload = flat_param._saved_grad_shard
911
+ return grad_to_offload
912
+
913
+
914
+ @no_type_check
915
+ def _reduce_grad_no_shard(state: _FSDPState, handle: FlatParamHandle) -> None:
916
+ """
917
+ For no-shard, this runs gradient reduction (which directly covers any
918
+ gradient accumulation implicitly) and the post-reduction callback.
919
+ """
920
+ flat_param = handle.flat_param
921
+ if state._comm_hook is None: # default path
922
+ _div_if_needed(flat_param.grad, state._gradient_predivide_factor)
923
+ dist.all_reduce(flat_param.grad, group=state.process_group)
924
+ _div_if_needed(flat_param.grad, state._gradient_postdivide_factor)
925
+ else:
926
+ state._comm_hook(state._comm_hook_state, flat_param.grad)
927
+ # For `NO_SHARD`, we can keep the low precision gradients by simply
928
+ # omitting the cast altogether
929
+ if not handle._keep_low_precision_grads:
930
+ _cast_grad_to_param_dtype(state, flat_param.grad, flat_param)
931
+ grad_to_offload = flat_param.grad.data
932
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
933
+
934
+
935
+ @no_type_check
936
+ def _post_reduce_grad_callback(
937
+ state: _FSDPState,
938
+ handle: FlatParamHandle,
939
+ # Additional arguments needed for the callback logic
940
+ grad_to_offload: torch.Tensor,
941
+ ):
942
+ """
943
+ This callback captures any logic to run after the gradient reduction
944
+ finishes. Currently, this offloads the gradient to CPU if CPU offloading is
945
+ enabled and uses sharded gradient views if ``use_orig_params=True``.
946
+ """
947
+ _offload_grad(state, handle, grad_to_offload)
948
+ _post_backward_use_sharded_grad_views(handle)
949
+
950
+
951
+ @no_type_check
952
+ def _offload_grad(
953
+ state: _FSDPState,
954
+ handle: FlatParamHandle,
955
+ grad_to_offload: torch.Tensor,
956
+ ):
957
+ if not handle._offload_params:
958
+ return
959
+ # Offload the gradient to CPU to ensure parameters and gradients are on the
960
+ # same device as required by the optimizer
961
+ # TODO: Investigate why `NO_SHARD` breaks correctness when using
962
+ # `non_blocking=True` here.
963
+ # TODO (rohan-varma): When CPU offload and optimizer overlap,
964
+ # non_blocking=True won't work since the copy may have not finished before
965
+ # the optimizer step executes on CPU. If we want to use non-blocking=True
966
+ # here, we'll have to synchronize before using result on CPU.
967
+ non_blocking = handle.uses_sharded_strategy and not handle._has_optim_in_backward
968
+ handle.flat_param._cpu_grad.copy_(
969
+ grad_to_offload.detach(), non_blocking=non_blocking
970
+ ) # synchronized in the post-backward callback
971
+ # Since the gradient being offloaded may have been produced in the
972
+ # computation stream and is being consumed here in the post-backward
973
+ # stream, inform the caching allocator
974
+ _no_dispatch_record_stream(grad_to_offload.data, state._post_backward_stream)
975
+
976
+
977
+ @no_type_check
978
+ def _post_backward_use_sharded_grad_views(handle: FlatParamHandle):
979
+ if not handle._use_orig_params:
980
+ return
981
+ # Since the handle's `FlatParameter` completed its gradient computation, we
982
+ # should reset the gradient noneness mask
983
+ handle._reset_is_grad_none()
984
+ # Delay using sharded gradient views until after the reduce-scatter instead
985
+ # of immediately after resharding
986
+ handle._use_sharded_grad_views()
987
+ if handle._has_optim_in_backward:
988
+ handle.prepare_gradient_for_optim()
989
+ for orig_param in handle.flat_param._params:
990
+ # Check for `None` gradient to filter parameters not in the rank
991
+ if orig_param.grad is not None and hasattr(
992
+ orig_param, "_in_backward_optimizers"
993
+ ):
994
+ # TODO (rohan-varma): For CPU offload, this unfortunately
995
+ # operates on CPU because the parameters and gradients have
996
+ # already been offloaded. We should run this on GPU after
997
+ # refactoring.
998
+ for optim in orig_param._in_backward_optimizers:
999
+ optim.step()
1000
+
1001
+ optim.zero_grad(set_to_none=True)
1002
+ handle._reset_flat_param_grad_info_if_needed()
1003
+ if handle._offload_params:
1004
+ handle.flat_param._cpu_grad = None
1005
+
1006
+
1007
+ def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None:
1008
+ if div_factor > 1:
1009
+ tensor.div_(div_factor)
1010
+
1011
+
1012
+ @no_type_check
1013
+ def _cast_grad_to_param_dtype(
1014
+ state: _FSDPState,
1015
+ sharded_grad: torch.Tensor,
1016
+ param: FlatParameter,
1017
+ ):
1018
+ """
1019
+ Casts ``sharded_grad`` back to the full parameter dtype so that the
1020
+ optimizer step runs with that dtype. This performs an actual cast if
1021
+ 1. parameters were in reduced precision during the forward since then
1022
+ gradients would be in that reduced precision, or
1023
+ 2. parameters were not in reduced precision but gradients were in
1024
+ reduced precision for communication.
1025
+ However, if a low precision communication hook is registered, then this
1026
+ dtype cast happens in the hook instead.
1027
+ """
1028
+ _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])
1029
+ if not _low_precision_hook_enabled(state) and sharded_grad.dtype != param.dtype:
1030
+ low_prec_grad_data = sharded_grad.data
1031
+ sharded_grad.data = sharded_grad.data.to(dtype=param.dtype)
1032
+ # Since for `NO_SHARD`, the gradient is produced in the computation
1033
+ # stream and consumed here in the post-backward stream, inform the
1034
+ # caching allocator; for the sharded strategies, the gradient is
1035
+ # produced in the post-backward stream, so this `record_stream()`
1036
+ # should be a no-op
1037
+ _no_dispatch_record_stream(
1038
+ low_prec_grad_data, state._device_handle.current_stream()
1039
+ )
1040
+
1041
+
1042
+ def _check_grad_to_accumulate(
1043
+ new_sharded_grad: torch.Tensor,
1044
+ accumulated_grad: torch.Tensor,
1045
+ ) -> None:
1046
+ _p_assert(
1047
+ accumulated_grad.shape == new_sharded_grad.shape,
1048
+ "Shape mismatch when accumulating gradients: "
1049
+ f"existing gradient shape={accumulated_grad.shape} "
1050
+ f"new gradient shape={new_sharded_grad.shape}",
1051
+ )
1052
+ _p_assert(
1053
+ accumulated_grad.device == new_sharded_grad.device,
1054
+ "Device mismatch when accumulating gradients: "
1055
+ f"existing gradient device={accumulated_grad.device} "
1056
+ f"new gradient device={new_sharded_grad.device}",
1057
+ )
1058
+
1059
+
1060
+ @no_type_check
1061
+ def _low_precision_hook_enabled(state: _FSDPState) -> bool:
1062
+ return state._comm_hook in LOW_PRECISION_HOOKS
1063
+
1064
+
1065
+ @no_type_check
1066
+ @torch.no_grad()
1067
+ def _post_backward_final_callback(
1068
+ state: _FSDPState,
1069
+ module: nn.Module,
1070
+ ):
1071
+ """
1072
+ This waits for the post-backward to finish and performs some final cleanup.
1073
+ This runs at the end of the entire backward pass and should only be called
1074
+ on the root FSDP instance.
1075
+ """
1076
+ _p_assert(
1077
+ state._is_root,
1078
+ "The post-backward callback should only be called on the root FSDP instance",
1079
+ )
1080
+ root_state = state
1081
+
1082
+ if root_state._sync_gradients:
1083
+ current_stream = state._device_handle.current_stream()
1084
+ # TODO (rohan-varma): this also waits for the overlapped optimizer step to finish
1085
+ # since it currently runs in the post-backward stream. That can be
1086
+ # pushed to the next forward if run in a different stream
1087
+ current_stream.wait_stream(root_state._post_backward_stream)
1088
+ if root_state._all_reduce_stream is not current_stream: # uses HSDP
1089
+ current_stream.wait_stream(root_state._all_reduce_stream)
1090
+ if root_state.cpu_offload.offload_params:
1091
+ # Wait for non-blocking GPU -> CPU sharded gradient copies from the
1092
+ # post-backward hooks to finish explicitly since CPU gradients do
1093
+ # not automatically synchronize with the GPU
1094
+ state._device_handle.current_stream().synchronize()
1095
+ root_state._exec_order_data.next_iter()
1096
+
1097
+ for fsdp_state in state._all_fsdp_states:
1098
+ _catch_all_reshard(fsdp_state)
1099
+ _finalize_params(fsdp_state)
1100
+ fsdp_state.training_state = TrainingState.IDLE
1101
+ handle = fsdp_state._handle
1102
+ if handle:
1103
+ handle._ran_pre_backward_hook = False
1104
+ handle._needs_pre_backward_unshard = False
1105
+ handle._post_forward_index = None
1106
+ handle._training_state = HandleTrainingState.IDLE
1107
+ handle._prefetched = False
1108
+ # Reset for cases like one forward and multiple backwards
1109
+ root_state._post_backward_callback_queued = False
1110
+
1111
+
1112
+ @no_type_check
1113
+ def _catch_all_reshard(
1114
+ state: _FSDPState,
1115
+ ) -> None:
1116
+ """
1117
+ Reshards the parameters that may not have been resharded in the
1118
+ post-backward hook. This can happen when a module's output is used in the
1119
+ forward pass, meaning that its pre-backward hook runs (unsharding the
1120
+ parameter), but the post-backward hook does not run because the output was
1121
+ not jused in the loss computation corresponding to this backward pass.
1122
+ """
1123
+ # Wrap with a try-except to provide a more informative traceback if an
1124
+ # error is raised
1125
+ try:
1126
+ if state._handle:
1127
+ # TODO: This already-resharded check is brittle:
1128
+ # https://github.com/pytorch/pytorch/issues/83956
1129
+ already_resharded = (
1130
+ state._handle.flat_param.data_ptr()
1131
+ == state._handle.flat_param._local_shard.data_ptr()
1132
+ # If FSDP skipped using sharded views, then the flat parameter
1133
+ # still points to the sharded data, so we need to reshard to
1134
+ # use sharded views
1135
+ and not state._handle._skipped_use_sharded_views
1136
+ )
1137
+ if already_resharded:
1138
+ return
1139
+ free_unsharded_flat_param = _should_free_in_backward(state, state._handle)
1140
+ _reshard(state, state._handle, free_unsharded_flat_param)
1141
+ except Exception as e:
1142
+ _p_assert(
1143
+ False,
1144
+ f"Got exception in the catch-all reshard for {state}: {str(e)}",
1145
+ raise_assertion_error=False,
1146
+ )
1147
+ raise e
1148
+
1149
+
1150
+ @no_type_check
1151
+ def _finalize_params(
1152
+ state: _FSDPState,
1153
+ ) -> None:
1154
+ """Finalizes the parameters before the next iteration."""
1155
+ handle = state._handle
1156
+ if not handle:
1157
+ return
1158
+ flat_param = handle.flat_param
1159
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1160
+ if hasattr(flat_param, "_post_backward_hook_handle"):
1161
+ pbhs_handle = flat_param._post_backward_hook_handle
1162
+ pbhs_handle.remove()
1163
+ del flat_param._post_backward_hook_handle
1164
+ else:
1165
+ if hasattr(flat_param, "_post_backward_hook_state"):
1166
+ post_backward_hook_state_len = len(flat_param._post_backward_hook_state)
1167
+ expected_post_backward_hook_state_len = int(flat_param.requires_grad) + 1
1168
+ _p_assert(
1169
+ post_backward_hook_state_len == expected_post_backward_hook_state_len,
1170
+ f"Invalid: ``_post_backward_hook_state``: {flat_param._post_backward_hook_state}",
1171
+ )
1172
+ flat_param._post_backward_hook_state[-1].remove()
1173
+ delattr(flat_param, "_post_backward_hook_state")
1174
+ if flat_param.requires_grad:
1175
+ if not state._sync_gradients:
1176
+ # Preserve the gradient accumulation state if not synchronizing
1177
+ # gradients: `.grad` remains the unsharded gradient from prior
1178
+ # `no_sync()` iterations, and `_saved_grad_shard` remains the
1179
+ # sharded gradient from the last synchronized iteration
1180
+ return
1181
+ if not handle._has_optim_in_backward:
1182
+ handle.prepare_gradient_for_optim()
1183
+ _p_assert(
1184
+ hasattr(flat_param, "_post_backward_called"),
1185
+ "Expects `_post_backward_called` to be set on the `FlatParameter`",
1186
+ )
1187
+ flat_param._post_backward_called = False
1188
+
1189
+
1190
+ @no_type_check
1191
+ def _prefetch_handle(
1192
+ state: _FSDPState,
1193
+ current_handle: Optional[FlatParamHandle],
1194
+ prefetch_mode: _PrefetchMode,
1195
+ ) -> None:
1196
+ """
1197
+ Prefetches the next handles if needed (without synchronization). An empty
1198
+ handles key cannot prefetch.
1199
+ """
1200
+ if not current_handle:
1201
+ return
1202
+ handle = _get_handle_to_prefetch(state, current_handle)
1203
+ if not handle:
1204
+ return
1205
+ # Temporarily emulate the training state while calling `_unshard` to
1206
+ # ensure the correct `as_params` for `_use_unsharded_views()`
1207
+ prev_training_state = handle._training_state
1208
+ if prefetch_mode == _PrefetchMode.BACKWARD:
1209
+ handle._training_state = HandleTrainingState.BACKWARD_PRE
1210
+ elif prefetch_mode == _PrefetchMode.FORWARD:
1211
+ handle._training_state = HandleTrainingState.FORWARD
1212
+ else:
1213
+ raise ValueError(f"Invalid prefetch mode on rank {state.rank}: {prefetch_mode}")
1214
+ # Prefetch the next set of handles without synchronizing to allow
1215
+ # the sync to happen as late as possible to maximize overlap
1216
+ _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)
1217
+ handle._training_state = prev_training_state
1218
+ handle._prefetched = True
1219
+
1220
+
1221
+ @no_type_check
1222
+ def _get_handle_to_prefetch(
1223
+ state: _FSDPState,
1224
+ current_handle: FlatParamHandle,
1225
+ ) -> FlatParamHandle:
1226
+ """
1227
+ Returns a :class:`list` of the handles keys to prefetch for the next
1228
+ module(s), where ``current_handle`` represents the current module.
1229
+
1230
+ "Prefetching" refers to running the unshard logic early (without
1231
+ synchronization), and the "next" modules depend on the recorded execution
1232
+ order and the current training state.
1233
+ """
1234
+ training_state = _get_training_state(current_handle)
1235
+ valid_training_states = (
1236
+ HandleTrainingState.BACKWARD_PRE,
1237
+ HandleTrainingState.BACKWARD_POST,
1238
+ HandleTrainingState.FORWARD,
1239
+ )
1240
+ _p_assert(
1241
+ training_state in valid_training_states,
1242
+ f"Prefetching is only supported in {valid_training_states} but "
1243
+ f"currently in {training_state}",
1244
+ )
1245
+ eod = state._exec_order_data
1246
+ target_handle: Optional[FlatParamHandle] = None
1247
+ if (
1248
+ training_state == HandleTrainingState.BACKWARD_PRE
1249
+ and state.backward_prefetch == BackwardPrefetch.BACKWARD_PRE
1250
+ ) or (
1251
+ training_state == HandleTrainingState.BACKWARD_POST
1252
+ and state.backward_prefetch == BackwardPrefetch.BACKWARD_POST
1253
+ ):
1254
+ target_handle_candidate = eod.get_handle_to_backward_prefetch(current_handle)
1255
+ if (
1256
+ target_handle_candidate
1257
+ and target_handle_candidate._needs_pre_backward_unshard
1258
+ and not target_handle_candidate._prefetched
1259
+ ):
1260
+ target_handle = target_handle_candidate
1261
+ else:
1262
+ target_handle = None
1263
+ elif training_state == HandleTrainingState.FORWARD and state.forward_prefetch:
1264
+ target_handle_candidate = eod.get_handle_to_forward_prefetch(current_handle)
1265
+ if (
1266
+ target_handle_candidate
1267
+ and target_handle_candidate._needs_pre_forward_unshard
1268
+ and not target_handle_candidate._prefetched
1269
+ ):
1270
+ target_handle = target_handle_candidate
1271
+ else:
1272
+ target_handle = None
1273
+
1274
+ return target_handle
1275
+
1276
+
1277
+ def _get_training_state(
1278
+ handle: FlatParamHandle,
1279
+ ) -> HandleTrainingState:
1280
+ """Returns the training state of the handles in ``handle``."""
1281
+ _p_assert(handle, "Expects a non-empty handle")
1282
+ return handle._training_state
1283
+
1284
+
1285
+ @no_type_check
1286
+ def _register_pre_forward_hook(
1287
+ state: _FSDPState,
1288
+ module: nn.Module,
1289
+ ) -> None:
1290
+ """
1291
+ Registers a pre-forward hook on ``module``.
1292
+ """
1293
+ for forward_handle in state._pre_forward_handles:
1294
+ forward_handle.remove()
1295
+ state._pre_forward_handles.clear()
1296
+ module_param_handle = state._fully_sharded_module_to_handle.get(module, None)
1297
+ hook = functools.partial(
1298
+ _pre_forward, state, module_param_handle, _pre_forward_unshard
1299
+ )
1300
+ state._pre_forward_handles.append(
1301
+ module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True)
1302
+ )
1303
+
1304
+
1305
+ @no_type_check
1306
+ def _register_post_forward_hook(
1307
+ state: _FSDPState,
1308
+ module: nn.Module,
1309
+ ) -> None:
1310
+ """
1311
+ Registers a post-forward hook on ``module``. Even if the module has no
1312
+ handles, we should register the hook since it will register the module's
1313
+ pre-backward hook.
1314
+ """
1315
+ for forward_handle in state._post_forward_handles:
1316
+ forward_handle.remove()
1317
+ state._post_forward_handles.clear()
1318
+ module_param_handle = state._fully_sharded_module_to_handle.get(module, None)
1319
+ hook = functools.partial(
1320
+ _post_forward,
1321
+ state,
1322
+ module_param_handle,
1323
+ _post_forward_reshard,
1324
+ )
1325
+ state._post_forward_handles.append(module.register_forward_hook(hook))
1326
+
1327
+
1328
+ @no_type_check
1329
+ def _register_root_pre_forward_hook(
1330
+ state: _FSDPState,
1331
+ module: nn.Module,
1332
+ ):
1333
+ """
1334
+ Registers root pre-forward hook on ``module``, which should be the local
1335
+ FSDP root.
1336
+
1337
+ NOTE: For the current composable FSDP design, we have each application of
1338
+ ``fully_shard()`` to a module to indicate that that module is the local
1339
+ FSDP root. We may remove this assumption in the future, in which case we
1340
+ will need to register this root pre-forward hook on any candidate module
1341
+ that may be the local FSDP root.
1342
+ """
1343
+ for forward_handle in state._root_pre_forward_handles:
1344
+ forward_handle.remove()
1345
+ state._root_pre_forward_handles.clear()
1346
+ hook = functools.partial(_root_pre_forward, state)
1347
+ state._root_pre_forward_handles.append(
1348
+ module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True)
1349
+ )
1350
+
1351
+
1352
+ @no_type_check
1353
+ def _register_pre_backward_hooks(
1354
+ state: _FSDPState,
1355
+ module: nn.Module,
1356
+ outputs: Any,
1357
+ handle: FlatParamHandle,
1358
+ ) -> None:
1359
+ """
1360
+ Registers pre-backward hooks on the tensors that require gradients in the
1361
+ forward pass outputs ``outputs``, which were computed using the
1362
+ ``FlatParameter`` s of ``handles``.
1363
+
1364
+ Args:
1365
+ module (nn.Module): Fully sharded module (see [Note: Fully Sharded
1366
+ Module]).
1367
+
1368
+ Returns:
1369
+ Forward pass outputs with pre-backward hooks registered to tensors that
1370
+ require gradients.
1371
+ """
1372
+ # If there is no gradient computation, then there is no need for
1373
+ # pre-backward logic
1374
+ if not torch.is_grad_enabled():
1375
+ return outputs
1376
+ if state._is_root:
1377
+ state._post_backward_callback_queued = False # only defined on the root
1378
+
1379
+ if handle:
1380
+ handle._needs_pre_backward_unshard = False
1381
+ # Since these handles' `FlatParameter`s participated in a forward, we
1382
+ # conservatively assume that they will be used in the backward
1383
+ handle._ran_pre_backward_hook = False
1384
+
1385
+ def _register_hook(t: torch.Tensor) -> torch.Tensor:
1386
+ if t.requires_grad:
1387
+ t.register_hook(
1388
+ functools.partial(_pre_backward_hook, state, module, handle)
1389
+ )
1390
+ if handle:
1391
+ handle._needs_pre_backward_unshard = True
1392
+ return t
1393
+
1394
+ return _apply_to_tensors(_register_hook, outputs)
1395
+
1396
+
1397
+ def _register_post_backward_hook(
1398
+ state: _FSDPState,
1399
+ handle: Optional[FlatParamHandle],
1400
+ ) -> None:
1401
+ """
1402
+ Registers post-backward hooks on the ``FlatParameter`` s'
1403
+ ``AccumulateGrad`` objects to reshard and to reduce-scatter gradients.
1404
+
1405
+ The ``AccumulateGrad`` object represents the last function that finalizes
1406
+ the ``FlatParameter`` 's gradient, so it only runs after its entire
1407
+ gradient computation has finished.
1408
+
1409
+ We register the post-backward hook only once in the *first* forward that a
1410
+ ``FlatParameter`` participates in. This relies on the ``AccumulateGrad``
1411
+ object being preserved through multiple forwards.
1412
+
1413
+ NOTE: We follow this heuristic to prefer the *first* forward to target the
1414
+ parameter mixed precision case, where there are *separate*
1415
+ ``AccumulateGrad`` objects across the different forwards. (Without
1416
+ parameter mixed precision, the ``AccumulateGrad`` objects are the same.) If
1417
+ we instead prefer the *last* forward, then the hook runs early.
1418
+ """
1419
+ # If there is no gradient computation, then there is no need for
1420
+ # post-backward logic
1421
+ if not torch.is_grad_enabled():
1422
+ return
1423
+ if not handle:
1424
+ return
1425
+ flat_param = handle.flat_param
1426
+
1427
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1428
+ already_registered = hasattr(flat_param, "_post_backward_hook_handle")
1429
+ if already_registered or not flat_param.requires_grad:
1430
+ return
1431
+ hook = functools.partial(_post_backward_hook, state, handle)
1432
+ hook_handle = flat_param.register_post_accumulate_grad_hook(hook)
1433
+ flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined]
1434
+ else:
1435
+ already_registered = hasattr(flat_param, "_post_backward_hook_state")
1436
+ if already_registered or not flat_param.requires_grad:
1437
+ return
1438
+ # Get the `AccumulateGrad` object
1439
+ temp_flat_param = flat_param.expand_as(flat_param)
1440
+ _p_assert(
1441
+ temp_flat_param.grad_fn is not None,
1442
+ "The `grad_fn` is needed to access the `AccumulateGrad` and "
1443
+ "register the post-backward hook",
1444
+ )
1445
+ acc_grad = temp_flat_param.grad_fn.next_functions[0][0] # type: ignore[union-attr]
1446
+ assert acc_grad is not None
1447
+ hook_handle = acc_grad.register_hook(
1448
+ functools.partial(_post_backward_hook, state, handle)
1449
+ )
1450
+ flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]
1451
+
1452
+
1453
+ def _register_post_backward_reshard_only_hook(
1454
+ state: _FSDPState,
1455
+ handle: Optional[FlatParamHandle],
1456
+ args: Tuple[Any, ...],
1457
+ kwargs: Dict[str, Any],
1458
+ ) -> None:
1459
+ """
1460
+ Registers post-backward hooks to reshard flat parameters that do not
1461
+ require gradient. We register these using multi-post-grad hooks on the
1462
+ input activations to ensure that all gradients that may depend on the
1463
+ parameters have been computed before resharding.
1464
+ """
1465
+ # If there is no gradient computation, then there is no need for
1466
+ # post-backward logic
1467
+ if not torch.is_grad_enabled():
1468
+ return
1469
+ # Construct `inp_tensors` lazily to avoid CPU overhead in typical case
1470
+ # where each flat parameter requires gradient
1471
+ inp_tensors: Optional[List[torch.Tensor]] = None
1472
+ if not handle:
1473
+ return
1474
+ flat_param = handle.flat_param
1475
+
1476
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1477
+ already_registered = hasattr(flat_param, "_post_backward_hook_handle")
1478
+ else:
1479
+ already_registered = hasattr(flat_param, "_post_backward_hook_state")
1480
+
1481
+ if already_registered or flat_param.requires_grad:
1482
+ return
1483
+ if inp_tensors is None:
1484
+ args_flat = pytree.arg_tree_leaves(*args, **kwargs)
1485
+ inp_tensors = [
1486
+ obj for obj in args_flat if torch.is_tensor(obj) and obj.requires_grad
1487
+ ]
1488
+ assert inp_tensors is not None # mypy
1489
+ hook_handle = register_multi_grad_hook(
1490
+ inp_tensors, functools.partial(_post_backward_reshard_only_hook, state, handle)
1491
+ )
1492
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1493
+ flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined, assignment]
1494
+ else:
1495
+ flat_param._post_backward_hook_state = (hook_handle,) # type: ignore[attr-defined, assignment]
1496
+
1497
+
1498
+ @no_type_check
1499
+ def _register_post_backward_final_callback(
1500
+ state: _FSDPState, module: nn.Module
1501
+ ) -> None:
1502
+ """
1503
+ Registers the post-backward final callback that runs at the end of the
1504
+ backward pass. This should be called from the root FSDP instance at the
1505
+ beginning of the pre-backward.
1506
+ """
1507
+ _p_assert(
1508
+ state._is_root,
1509
+ "Only the root FSDP instance should register the post-backward callback",
1510
+ )
1511
+ if state._post_backward_callback_queued:
1512
+ return
1513
+ _assert_in_training_states(state, [TrainingState.IDLE])
1514
+ # Trace does not need this callback
1515
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
1516
+ state._post_backward_callback_queued = True
1517
+ Variable._execution_engine.queue_callback(
1518
+ functools.partial(_post_backward_final_callback, state, module)
1519
+ )
1520
+
1521
+
1522
+ def _wait_for_computation_stream(
1523
+ computation_stream: torch.Stream,
1524
+ unshard_stream: torch.Stream,
1525
+ pre_unshard_stream: torch.Stream,
1526
+ ):
1527
+ """
1528
+ Has the unshard and pre-unshard streams wait for the computation stream.
1529
+ For example, this should be called in the FSDP root's pre-forward to
1530
+ respect optimizer step computation.
1531
+ """
1532
+ # Tracing does not need to wait
1533
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1534
+ return
1535
+ unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined]
1536
+ # Having the pre-all-gather stream wait for the current stream even if we
1537
+ # do not leverage the pre-all-gather stream is tolerable since this only
1538
+ # runs once per iteration
1539
+ pre_unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined]
1540
+
1541
+
1542
+ def _reset_flat_param_grad_info_if_needed(
1543
+ handles: List[FlatParamHandle],
1544
+ ):
1545
+ """
1546
+ Clears the original parameters' gradients if needed. This method's CPU
1547
+ overhead is minimal, so we may call it throughout FSDP methods, which serve
1548
+ as callsites to free the gradient memory earlier.
1549
+ """
1550
+ if not isinstance(handles, list):
1551
+ handles = [handles]
1552
+ for handle in handles:
1553
+ if handle._use_orig_params:
1554
+ handle._reset_flat_param_grad_info_if_needed()
1555
+
1556
+
1557
+ @no_type_check
1558
+ def _get_buffers_and_dtypes_for_computation(
1559
+ state: _FSDPState,
1560
+ root_module: nn.Module,
1561
+ ) -> Tuple[List[torch.Tensor], List[Optional[torch.dtype]]]:
1562
+ """
1563
+ Returns all buffers in the module tree rooted at ``root_module`` and a
1564
+ corresponding list of the buffer dtypes for computation. Each buffer dtype
1565
+ is either ``None`` if buffer mixed precision is not enabled or the buffer
1566
+ low precision dtype otherwise.
1567
+ """
1568
+ _p_assert(state._is_root, "Expects the root to cast buffers")
1569
+ buffers: List[torch.Tensor] = []
1570
+ buffer_dtypes: List[Optional[torch.dtype]] = []
1571
+ visited_buffers: Set[torch.Tensor] = set()
1572
+ # Traverse the FSDP states bottom-up so that we prefer the owning FSDP
1573
+ # instance's mixed precision setting for each buffer
1574
+ fsdp_states, fsdp_modules = traversal_utils._get_fsdp_states_with_modules(
1575
+ root_module
1576
+ )
1577
+ for fsdp_state, fsdp_module in zip(reversed(fsdp_states), reversed(fsdp_modules)):
1578
+ for buffer_name, buffer in fsdp_module.named_buffers():
1579
+ if buffer in visited_buffers:
1580
+ continue
1581
+ visited_buffers.add(buffer)
1582
+ if clean_tensor_name(buffer_name) in fsdp_state._ignored_buffer_names:
1583
+ continue
1584
+ buffers.append(buffer)
1585
+ buffer_dtypes.append(fsdp_state.mixed_precision.buffer_dtype)
1586
+ assert len(buffers) == len(buffer_dtypes), f"{len(buffers)} {len(buffer_dtypes)}"
1587
+ return buffers, buffer_dtypes
1588
+
1589
+
1590
+ @no_type_check
1591
+ def _get_orig_buffer_dtypes(
1592
+ state: _FSDPState,
1593
+ buffer_names: List[str],
1594
+ ) -> List[torch.dtype]:
1595
+ """
1596
+ Returns the original buffer types of the given buffer names.
1597
+ """
1598
+ buffer_dtypes: List[torch.dtype] = []
1599
+ for buffer_name in buffer_names:
1600
+ _p_assert(
1601
+ buffer_name in state._buffer_name_to_orig_dtype,
1602
+ f"{buffer_name} is missing from pre-computed dict on rank "
1603
+ f"{state.rank}, which only has keys "
1604
+ f"{state._buffer_name_to_orig_dtype.keys()}",
1605
+ )
1606
+ buffer_dtypes.append(state._buffer_name_to_orig_dtype[buffer_name])
1607
+ return buffer_dtypes
1608
+
1609
+
1610
+ def _cast_buffers_to_dtype_and_device(
1611
+ buffers: List[torch.Tensor],
1612
+ buffer_dtypes: List[Optional[torch.dtype]],
1613
+ device: torch.device,
1614
+ ) -> None:
1615
+ """
1616
+ Casts ``buffers`` to the dtypes given by ``buffer_dtypes`` and moves them
1617
+ to ``device``. If an element in ``buffer_dtypes`` is ``None``, then the
1618
+ corresponding buffer is only moved to ``device``.
1619
+ """
1620
+ _p_assert(
1621
+ buffer_dtypes is None or len(buffers) == len(buffer_dtypes),
1622
+ f"Expects `buffers` and `buffer_dtypes` to have the same length if "
1623
+ f"`buffer_dtypes` is specified but got {len(buffers)} and "
1624
+ f"{len(buffer_dtypes)}",
1625
+ )
1626
+ for buffer, buffer_dtype in zip(buffers, buffer_dtypes):
1627
+ if not torch.is_floating_point(buffer) or buffer_dtype is None:
1628
+ buffer.data = buffer.to(device=device)
1629
+ else:
1630
+ buffer.data = buffer.to(device=device, dtype=buffer_dtype)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import math
4
+ from typing import Optional
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from torch.distributed import distributed_c10d
9
+ from torch.distributed._shard.sharded_tensor import (
10
+ Shard,
11
+ ShardedTensor,
12
+ ShardedTensorMetadata,
13
+ TensorProperties,
14
+ )
15
+ from torch.distributed._shard.sharding_spec import ShardMetadata
16
+ from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard as DShard
17
+
18
+
19
+ def _get_remote_device_str(rank, device_type, num_devices_per_node):
20
+ if device_type.lower() == "cpu":
21
+ return f"rank:{rank}/{device_type}"
22
+ else:
23
+ return f"rank:{rank}/{device_type}:{rank % num_devices_per_node}"
24
+
25
+
26
+ def _create_chunk_sharded_tensor(
27
+ tensor: torch.Tensor,
28
+ rank: int,
29
+ world_size: int,
30
+ num_devices_per_node: int,
31
+ pg: dist.ProcessGroup,
32
+ device: Optional[torch.device] = None,
33
+ ) -> ShardedTensor:
34
+ """
35
+ Shard a tensor to chunks along the first dimension. The local rank will gets its
36
+ corresponding chunk as the local shard to create a ShardedTensor.
37
+ """
38
+ chunks = tensor.chunk(world_size, dim=0)
39
+ if len(chunks) > rank:
40
+ local_shard = chunks[rank].clone()
41
+ offsets = [0 for _ in tensor.size()]
42
+ offsets[0] = math.ceil(tensor.size()[0] / world_size) * rank
43
+ local_shards = [Shard.from_tensor_and_offsets(local_shard, offsets, rank)]
44
+ else:
45
+ local_shards = []
46
+
47
+ # Create a ShardedTensor without invoking communication.
48
+ chunk_sizes = [list(chunk.size()) for chunk in chunks]
49
+ dim0_offsets = [0] + list(
50
+ itertools.accumulate([chunk_size[0] for chunk_size in chunk_sizes])
51
+ )[:-1]
52
+ offsets = [0] * (len(chunk_sizes[0]) - 1)
53
+ chunk_offsets = [[d0] + offsets for d0 in dim0_offsets]
54
+ device_type = (
55
+ distributed_c10d._get_pg_default_device(pg).type
56
+ if device is None
57
+ else device.type
58
+ )
59
+ placements = [
60
+ _get_remote_device_str(r, device_type, num_devices_per_node)
61
+ for r in range(len(chunk_sizes))
62
+ ]
63
+ assert len(chunk_sizes) == len(chunk_offsets) == len(placements)
64
+ shard_metadata = [
65
+ ShardMetadata(offset, size, placement)
66
+ for offset, size, placement in zip(chunk_offsets, chunk_sizes, placements)
67
+ ]
68
+ sharded_tensor_metadata = ShardedTensorMetadata(
69
+ shards_metadata=shard_metadata,
70
+ size=tensor.size(),
71
+ tensor_properties=TensorProperties(
72
+ dtype=tensor.dtype,
73
+ layout=tensor.layout,
74
+ requires_grad=False,
75
+ memory_format=torch.contiguous_format,
76
+ pin_memory=tensor.is_pinned(),
77
+ ),
78
+ )
79
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
80
+ local_shards, sharded_tensor_metadata=sharded_tensor_metadata, process_group=pg
81
+ )
82
+
83
+
84
+ def _create_chunk_dtensor(
85
+ tensor: torch.Tensor,
86
+ rank: int,
87
+ device_mesh: DeviceMesh,
88
+ ) -> DTensor:
89
+ """
90
+ Shard a tensor to chunks along the first dimension. The local rank will gets its
91
+ corresponding chunk as the local tensor to create a DTensor.
92
+ """
93
+ # We need to explicitly call .detach() to return a new tensor detached from the current graph.
94
+ tensor = tensor.clone().detach()
95
+
96
+ # FSDP placements: [Shard(0)]
97
+ # HSDP placements: [Replicate(), Shard(0)]
98
+ replicate_placements = [Replicate() for _ in range(device_mesh.ndim)]
99
+ shard_placements = [Replicate() for _ in range(device_mesh.ndim)]
100
+ shard_placements[-1] = DShard(0) # type: ignore[call-overload]
101
+
102
+ return DTensor.from_local(
103
+ tensor, device_mesh, replicate_placements, run_check=False
104
+ ).redistribute(
105
+ placements=shard_placements,
106
+ )
107
+
108
+
109
+ def _all_gather_dtensor(
110
+ tensor: DTensor,
111
+ parent_mesh: Optional[DeviceMesh],
112
+ ) -> torch.Tensor:
113
+ """
114
+ All gather a DTensor in its sharded dimension and return the local tensor.
115
+ """
116
+ assert parent_mesh is None
117
+
118
+ placements = list(copy.deepcopy(tensor.placements))
119
+ # FSDP placements: [Shard(0)] -> [Replicate()]
120
+ # HSDP placements: [Replicate(), Shard(0)] -> [Replicate(), Replicate()]
121
+ placements[-1] = Replicate()
122
+ tensor = tensor.redistribute(
123
+ device_mesh=tensor.device_mesh,
124
+ placements=placements,
125
+ )
126
+
127
+ return tensor.to_local()
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py ADDED
@@ -0,0 +1,928 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import logging
3
+ import math
4
+ import warnings
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ cast,
9
+ Dict,
10
+ Generator,
11
+ Iterator,
12
+ List,
13
+ no_type_check,
14
+ Tuple,
15
+ )
16
+
17
+ import torch
18
+ import torch.distributed as dist
19
+
20
+ import torch.distributed.algorithms._checkpoint.checkpoint_wrapper as checkpoint_wrapper
21
+
22
+ import torch.nn as nn
23
+ import torch.nn.functional as F
24
+ from torch.distributed._shard.sharded_tensor import (
25
+ init_from_local_shards,
26
+ Shard,
27
+ ShardedTensor,
28
+ )
29
+ from torch.distributed._tensor import DTensor
30
+ from torch.distributed.device_mesh import _mesh_resources
31
+
32
+ from torch.distributed.fsdp._common_utils import (
33
+ _FSDPState,
34
+ _get_module_fsdp_state_if_fully_sharded_module,
35
+ _has_fsdp_params,
36
+ _is_composable,
37
+ _module_handle,
38
+ clean_tensor_name,
39
+ FSDP_PREFIX,
40
+ FSDP_WRAPPED_MODULE,
41
+ )
42
+ from torch.distributed.fsdp._debug_utils import SimpleProfiler
43
+ from torch.distributed.fsdp._runtime_utils import (
44
+ _cast_buffers_to_dtype_and_device,
45
+ _get_orig_buffer_dtypes,
46
+ _lazy_init,
47
+ _reset_flat_param_grad_info_if_needed,
48
+ )
49
+ from torch.distributed.fsdp.api import (
50
+ FullStateDictConfig,
51
+ ShardingStrategy,
52
+ StateDictType,
53
+ )
54
+ from torch.distributed.utils import _replace_by_prefix
55
+
56
+ from ._fsdp_extensions import (
57
+ _ext_all_gather_dtensor,
58
+ _ext_chunk_dtensor,
59
+ _ext_chunk_tensor,
60
+ _ext_post_unflatten_transform,
61
+ _ext_pre_load_state_dict_transform,
62
+ )
63
+ from ._unshard_param_utils import _unshard_fsdp_state_params, FLAT_PARAM
64
+
65
+
66
+ logger = logging.getLogger(__name__)
67
+
68
+
69
+ def _should_unshard_params(fsdp_state: _FSDPState) -> bool:
70
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD and (
71
+ _is_composable(fsdp_state) or fsdp_state._use_orig_params
72
+ ):
73
+ return False
74
+ else:
75
+ return True
76
+
77
+
78
+ def _convert_to_wrapped_module_name(module_name: str) -> str:
79
+ module_name = module_name.replace(f"{FSDP_PREFIX}", "")
80
+ module_name = module_name.replace(f"{FSDP_WRAPPED_MODULE}", "")
81
+ if module_name:
82
+ module_name = f"{module_name}."
83
+ # `CheckpointWrapper` adds a prefix that has to be removed as well.
84
+ module_name = module_name.replace(checkpoint_wrapper._CHECKPOINT_PREFIX, "")
85
+ return module_name
86
+
87
+
88
+ def _param_name_infos(
89
+ module: nn.Module, fsdp_state: _FSDPState
90
+ ) -> Iterator[Tuple[str, str, str]]:
91
+ if not _has_fsdp_params(fsdp_state, module):
92
+ return
93
+ for param_name, module_name in _module_handle(
94
+ fsdp_state, module
95
+ ).param_module_names():
96
+ module_name = _convert_to_wrapped_module_name(module_name)
97
+ fqn = f"{module_name}{param_name}"
98
+ yield fqn, param_name, module_name
99
+
100
+
101
+ def _shared_param_name_infos(
102
+ module: nn.Module, fsdp_state
103
+ ) -> Iterator[Tuple[str, str, str]]:
104
+ for param_name, module_name in _module_handle(
105
+ fsdp_state, module
106
+ ).shared_param_module_names():
107
+ module_name = _convert_to_wrapped_module_name(module_name)
108
+ fqn = f"{module_name}{param_name}"
109
+ yield fqn, param_name, module_name
110
+
111
+
112
+ @no_type_check
113
+ def _enter_unshard_params_ctx(
114
+ module: nn.Module,
115
+ fsdp_state: _FSDPState,
116
+ writeback: bool = False,
117
+ rank0_only: bool = False,
118
+ offload_to_cpu: bool = False,
119
+ with_grads: bool = False,
120
+ ) -> None:
121
+ """
122
+ state_dict hooks cannot use the pure context call as the checkpoint flow
123
+ requires to enter the context in the pre-hook but leave the context in the
124
+ post-hook. This API enters the context of ``_unshard_fsdp_state_params``.
125
+ """
126
+ assert module not in fsdp_state._unshard_params_ctx, (
127
+ "Entering the ``_unshard_fsdp_state_params`` context but _unshard_params_ctx[module] "
128
+ "is not None."
129
+ )
130
+ fsdp_state._unshard_params_ctx[module] = _unshard_fsdp_state_params(
131
+ module,
132
+ fsdp_state,
133
+ writeback=writeback,
134
+ rank0_only=rank0_only,
135
+ offload_to_cpu=offload_to_cpu,
136
+ with_grads=with_grads,
137
+ )
138
+ fsdp_state._unshard_params_ctx[module].__enter__()
139
+
140
+
141
+ @no_type_check
142
+ def _exit_unshard_params_ctx(module: nn.Module, fsdp_state: _FSDPState) -> None:
143
+ """A helper function to exit ``_unshard_fsdp_state_params`` context."""
144
+ fsdp_state._unshard_params_ctx[module].__exit__(None, None, None)
145
+ fsdp_state._unshard_params_ctx.pop(module)
146
+
147
+
148
+ def _common_pre_state_dict_hook(
149
+ module: nn.Module,
150
+ fsdp_state: _FSDPState,
151
+ ) -> None:
152
+ """Performs the pre-state_dict tasks shared by all state_dict types."""
153
+ if fsdp_state._device_handle.is_available():
154
+ fsdp_state._device_handle.synchronize()
155
+ # TODO: need to check if this is always correct for composable FSDP.
156
+ _lazy_init(fsdp_state, module)
157
+ if fsdp_state._is_root:
158
+ _reset_flat_param_grad_info_if_needed(fsdp_state._all_handles)
159
+
160
+
161
+ def _common_unshard_pre_state_dict_hook(
162
+ module: nn.Module,
163
+ fsdp_state: _FSDPState,
164
+ offload_to_cpu: bool,
165
+ rank0_only: bool,
166
+ ) -> None:
167
+ """
168
+ Performs the pre-state_dict tasks shared by all state_dict types that require
169
+ ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this hook.
170
+ """
171
+ # For composable `fully_shard`, it does not need to unshard parameters for `NO_SHARD` cases.
172
+ if not _should_unshard_params(fsdp_state):
173
+ return
174
+ _enter_unshard_params_ctx(
175
+ module,
176
+ fsdp_state,
177
+ writeback=False,
178
+ offload_to_cpu=offload_to_cpu,
179
+ rank0_only=rank0_only,
180
+ )
181
+
182
+
183
+ @no_type_check
184
+ def _common_unshard_post_state_dict_hook(
185
+ module: nn.Module,
186
+ fsdp_state: _FSDPState,
187
+ state_dict: Dict[str, Any],
188
+ prefix: str,
189
+ param_hook: Callable,
190
+ ) -> Dict[str, Any]:
191
+ """
192
+ The post-state_dict flow that shared by all state_dict types that require
193
+ ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this
194
+ hook.
195
+ """
196
+ _replace_by_prefix(state_dict, prefix + f"{FSDP_PREFIX}", prefix)
197
+ # Return early for trivial cases
198
+ if not state_dict or not _has_fsdp_params(fsdp_state, module):
199
+ if _should_unshard_params(fsdp_state):
200
+ _exit_unshard_params_ctx(module, fsdp_state)
201
+ return state_dict
202
+
203
+ # If a rank does not have unsharded parameters(when `rank0_only=True`
204
+ # and `rank != 0`), then the rank only needed to participate in the
205
+ # all-gather and does not need to save the # state dict. We simply check
206
+ # rank0_only to ensure this issue.
207
+ rank0_only = (
208
+ fsdp_state._state_dict_type == StateDictType.FULL_STATE_DICT
209
+ and cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only
210
+ )
211
+ # no_fsdp_return means the state_dict returned by this rank should contain
212
+ # only non-FSDP controlled parameters and buffers.
213
+ no_fsdp_return = rank0_only and fsdp_state.rank != 0
214
+ if no_fsdp_return and not fsdp_state._use_orig_params:
215
+ for clean_key in fsdp_state._buffer_names:
216
+ # This is a hack to support activation checkpoint.
217
+ clean_key = clean_key.replace(
218
+ f"{checkpoint_wrapper._CHECKPOINT_PREFIX}.", ""
219
+ )
220
+ state_dict.pop(f"{prefix}{clean_key}", None)
221
+ # Non-zero ranks have flat_param key when rank0_only=True, because rank0_only=True is
222
+ # passed in to unshard context, but nonzero ranks reshard early, causing this flat_param
223
+ # to appear in state_dict.
224
+ state_dict.pop(f"{prefix}{FLAT_PARAM}")
225
+ _exit_unshard_params_ctx(module, fsdp_state)
226
+ return state_dict
227
+
228
+ # Loop only the parameters saved in this instance's wrapped module to
229
+ # avoid processing buffers.
230
+ for fqn, param_name, module_name in _param_name_infos(module, fsdp_state):
231
+ fqn = f"{prefix}{fqn}"
232
+ if no_fsdp_return:
233
+ state_dict.pop(fqn)
234
+ continue
235
+ assert fqn in state_dict, (
236
+ f"FSDP assumes {fqn} is in the state_dict but the state_dict only "
237
+ f"has {state_dict.keys()}. "
238
+ f"prefix={prefix}, module_name={module_name}, "
239
+ f"param_name={param_name} rank={fsdp_state.rank}."
240
+ )
241
+
242
+ param_hook(state_dict, prefix, fqn)
243
+
244
+ if _should_unshard_params(fsdp_state):
245
+ _exit_unshard_params_ctx(module, fsdp_state)
246
+
247
+ cpu_device = torch.device("cpu")
248
+ buffer_clean_fqns = []
249
+ buffers = []
250
+ for clean_key in fsdp_state._buffer_names:
251
+ # This is a hack to support activation checkpoint.
252
+ clean_key = clean_tensor_name(clean_key)
253
+ fqn = f"{prefix}{clean_key}"
254
+ if fqn not in state_dict:
255
+ # A buffer can be registered as non-persistent.
256
+ continue
257
+ if no_fsdp_return:
258
+ state_dict.pop(fqn)
259
+ else:
260
+ buffer = state_dict[fqn]
261
+ if (
262
+ fsdp_state._state_dict_config.offload_to_cpu
263
+ and buffer.device != cpu_device
264
+ ):
265
+ state_dict[fqn] = buffer.to(cpu_device)
266
+ # skip upcasting for ignored buffers
267
+ if clean_key not in fsdp_state._ignored_buffer_names:
268
+ buffer_clean_fqns.append(clean_key)
269
+ buffers.append(state_dict[fqn])
270
+
271
+ if buffers:
272
+ mixed_precision_enabled_for_buffers = (
273
+ fsdp_state._mixed_precision_enabled_for_buffers()
274
+ if not _is_composable(fsdp_state)
275
+ else (fsdp_state.mixed_precision.buffer_dtype is not None)
276
+ )
277
+ if mixed_precision_enabled_for_buffers:
278
+ buffer_dtypes = _get_orig_buffer_dtypes(fsdp_state, buffer_clean_fqns)
279
+ _cast_buffers_to_dtype_and_device(
280
+ buffers, buffer_dtypes, fsdp_state.compute_device
281
+ )
282
+ for buffer, clean_fqn in zip(buffers, buffer_clean_fqns):
283
+ fqn = f"{prefix}{clean_fqn}"
284
+ logger.info("FSDP is casting the dtype of %s to %s", fqn, buffer.dtype)
285
+ state_dict[fqn] = buffer.clone()
286
+ return state_dict
287
+
288
+
289
+ @no_type_check
290
+ def _full_pre_state_dict_hook(
291
+ fsdp_state: _FSDPState,
292
+ module: nn.Module,
293
+ *args,
294
+ **kwargs,
295
+ ) -> None:
296
+ """
297
+ Hook that runs before model.state_dict() is called. pre-state_dict hook is
298
+ not actually supported by ``nn.Module``. As a result, this API is called
299
+ from ``_full_post_state_dict_hook()`` to simulate the case. Once pre-state_dict
300
+ is supported in ``nn.Module``, this hook will be registered as a hook in
301
+ ``nn.Module``.
302
+ """
303
+ if getattr(fsdp_state, "_device_mesh", False):
304
+ parent_mesh = _mesh_resources.get_parent_mesh(fsdp_state._device_mesh)
305
+
306
+ _common_pre_state_dict_hook(module, fsdp_state)
307
+ _common_unshard_pre_state_dict_hook(
308
+ module,
309
+ fsdp_state,
310
+ offload_to_cpu=fsdp_state._state_dict_config.offload_to_cpu,
311
+ rank0_only=cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only,
312
+ )
313
+
314
+
315
+ @no_type_check
316
+ def _full_post_state_dict_hook(
317
+ module: nn.Module,
318
+ fsdp_state: _FSDPState,
319
+ state_dict: Dict[str, Any],
320
+ prefix: str,
321
+ ) -> Dict[str, Any]:
322
+ """
323
+ Hook that runs after model.state_dict() is called before returning result to
324
+ user. For FSDP, we may have to clone the tensors in state_dict as params go
325
+ back to sharded version after _unshard_fsdp_state_params ends, and also remove
326
+ the ``FSDP_WRAPPED_MODULE`` prefix.
327
+ """
328
+
329
+ def param_hook(
330
+ state_dict: Dict[str, Any],
331
+ prefix: str,
332
+ fqn: str,
333
+ ) -> None:
334
+ clean_key = fqn
335
+ clean_prefix = clean_tensor_name(prefix)
336
+ # Strip prefix out of key if needed as buffer names and param names
337
+ # do not have prefix considered as they are not computed in `state_dict`
338
+ # call.
339
+ if clean_key.startswith(clean_prefix):
340
+ clean_key = clean_key[len(clean_prefix) :]
341
+
342
+ # Clone parameters before exiting the `_unshard_fsdp_state_params()` context.
343
+ if not getattr(state_dict[fqn], "_has_been_cloned", False):
344
+ try:
345
+ state_dict[fqn] = state_dict[fqn].clone().detach()
346
+ state_dict[fqn]._has_been_cloned = True # type: ignore[attr-defined]
347
+ except BaseException as e:
348
+ warnings.warn(
349
+ f"Failed to clone() tensor with name {fqn} on rank {fsdp_state.rank}. "
350
+ "This may mean that this state_dict entry could point to invalid "
351
+ "memory regions after returning from state_dict() call if this "
352
+ "parameter is managed by FSDP. Please check clone "
353
+ f"implementation of {fqn}. Error: {str(e)}"
354
+ )
355
+
356
+ return _common_unshard_post_state_dict_hook(
357
+ module, fsdp_state, state_dict, prefix, param_hook
358
+ )
359
+
360
+
361
+ def _full_pre_load_state_dict_hook(
362
+ module: nn.Module,
363
+ fsdp_state: _FSDPState,
364
+ state_dict: Dict[str, Any],
365
+ prefix: str,
366
+ ) -> None:
367
+ _lazy_init(fsdp_state, module)
368
+ if _should_unshard_params(fsdp_state):
369
+ with SimpleProfiler.profile("_enter_unshard_params_ctx"):
370
+ _enter_unshard_params_ctx(module, fsdp_state, writeback=True)
371
+ # Add FSDP_PREFIX only for wrapper-based FSDP.
372
+ if not _is_composable(fsdp_state):
373
+ _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}")
374
+
375
+
376
+ def _full_post_load_state_dict_hook(
377
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
378
+ ) -> None:
379
+ if _should_unshard_params(fsdp_state):
380
+ with SimpleProfiler.profile("_exit_unshard_params_ctx"):
381
+ _exit_unshard_params_ctx(module, fsdp_state)
382
+
383
+
384
+ def _local_pre_state_dict_hook(
385
+ fsdp_state: _FSDPState,
386
+ module: nn.Module,
387
+ *args,
388
+ **kwargs,
389
+ ) -> None:
390
+ """
391
+ Hook that runs before model.state_dict() is called. Right now, pre-state_dict
392
+ hook is not supported by the PyTorch core. So this API is called from
393
+ `_local_post_state_dict_hook()` to simulate the case.
394
+ """
395
+ if (
396
+ _has_fsdp_params(fsdp_state, module)
397
+ and not _module_handle(fsdp_state, module).uses_sharded_strategy
398
+ ):
399
+ raise RuntimeError(
400
+ "``local_state_dict`` can only be used when parameters are flatten "
401
+ "and sharded."
402
+ )
403
+ _common_pre_state_dict_hook(module, fsdp_state)
404
+
405
+
406
+ @no_type_check
407
+ def _local_post_state_dict_hook(
408
+ module: nn.Module,
409
+ fsdp_state: _FSDPState,
410
+ state_dict: Dict[str, Any],
411
+ prefix: str,
412
+ ) -> Dict[str, Any]:
413
+ """
414
+ This hook create a ShardedTensor from the local flat_param and replace
415
+ the state_dict[f"{prefix}{FLAT_PARAM}] with the ShardedTensor. No copy
416
+ will happen. The underlying storage is the same.
417
+ """
418
+
419
+ _replace_by_prefix(state_dict, f"{prefix}{FSDP_PREFIX}", prefix)
420
+ if not _has_fsdp_params(fsdp_state, module):
421
+ return state_dict
422
+
423
+ # state_dict[f"{prefix}{FLAT_PARAM}"] exists and has the same tensor
424
+ # value as the flat_param but it is a pure Tensor because
425
+ # nn.Module.state_dict() will detach the parameter. Therefore, we need
426
+ # to get flat_param to get the metadata.
427
+ assert _module_handle(fsdp_state, module), "Should have returned early"
428
+ flat_param = _module_handle(fsdp_state, module).flat_param
429
+ # Constructs a ShardedTensor from the flat_param "without" padding.
430
+ # Removing the padding allows users to change the number of ranks
431
+ # when loading the local_state_dict.
432
+ full_numel = flat_param._unpadded_unsharded_size.numel() # type: ignore[attr-defined]
433
+ shard_offset = flat_param.numel() * fsdp_state.rank
434
+ valid_data_size = flat_param.numel() - flat_param._shard_numel_padded
435
+ if valid_data_size > 0:
436
+ # If FlatParameter is returned, FlatParameter._local_shard cause a
437
+ # pickling issue (can be torch.save but not torch.load). Since there
438
+ # is no benefit for state_dict to return the actual FlatParameter class,
439
+ # a view (which is a tensor) of the FlatParameter will be returned.
440
+ flat_param = flat_param[:valid_data_size].view(valid_data_size)
441
+ local_shards = [
442
+ Shard.from_tensor_and_offsets(flat_param, [shard_offset], fsdp_state.rank)
443
+ ]
444
+ else:
445
+ local_shards = []
446
+ sharded_tensor = init_from_local_shards(
447
+ local_shards, full_numel, process_group=fsdp_state.process_group
448
+ ) # type: ignore[assignment]
449
+ # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT.
450
+ if fsdp_state._state_dict_config.offload_to_cpu:
451
+ sharded_tensor = sharded_tensor.cpu()
452
+ state_dict[f"{prefix}{FLAT_PARAM}"] = sharded_tensor
453
+ return state_dict
454
+
455
+
456
+ def _local_post_load_state_dict_hook(
457
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
458
+ ) -> None:
459
+ pass
460
+
461
+
462
+ def _local_pre_load_state_dict_hook(
463
+ module: nn.Module,
464
+ fsdp_state: _FSDPState,
465
+ state_dict: Dict[str, Any],
466
+ prefix: str,
467
+ ) -> None:
468
+ """
469
+ This hook finds the local flat_param for this FSDP module from the
470
+ state_dict. The flat_param should be a ShardedTensor. This hook converts
471
+ the ShardedTensor to a tensor. No copy happen unless padding is required.
472
+ """
473
+ _lazy_init(fsdp_state, module)
474
+ _replace_by_prefix(state_dict, prefix, f"{prefix}{FSDP_PREFIX}")
475
+ fqn = f"{prefix}{FSDP_PREFIX}{FLAT_PARAM}"
476
+ if fqn not in state_dict:
477
+ assert not _has_fsdp_params(fsdp_state, module), (
478
+ "No `FlatParameter` in `state_dict` for this FSDP instance "
479
+ "but it has parameters"
480
+ )
481
+ return
482
+ load_tensor = state_dict[fqn]
483
+ assert isinstance(
484
+ load_tensor, ShardedTensor
485
+ ), "Tensors in local_state_dict should be ShardedTensor."
486
+
487
+ # Convert the ShardedTensor to a Tensor.
488
+ flat_param = _module_handle(fsdp_state, module).flat_param
489
+ assert flat_param is not None
490
+ valid_data_size = flat_param.numel() - flat_param._shard_numel_padded
491
+ shards = load_tensor.local_shards()
492
+ if valid_data_size > 0:
493
+ assert len(shards), "load_local_state_dict assume one shard per ShardedTensor."
494
+ load_tensor = shards[0].tensor
495
+
496
+ # Get the metadata of the flat_param to decide whether to pad the loaded
497
+ # tensor.
498
+ if flat_param._shard_numel_padded > 0:
499
+ assert load_tensor.numel() < flat_param.numel(), (
500
+ f"Local shard size = {flat_param.numel()} and the tensor in "
501
+ f"the state_dict is {load_tensor.numel()}."
502
+ )
503
+ load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded])
504
+ else:
505
+ load_tensor = flat_param
506
+ # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT.
507
+ state_dict[fqn] = load_tensor
508
+
509
+
510
+ def _sharded_pre_state_dict_hook(
511
+ fsdp_state: _FSDPState,
512
+ module: nn.Module,
513
+ *args,
514
+ **kwargs,
515
+ ) -> None:
516
+ """
517
+ Hook that runs before model.state_dict() is called. Check
518
+ ``_full_pre_load_state_dict_hook`` for the detail.
519
+ """
520
+ if (
521
+ _has_fsdp_params(fsdp_state, module)
522
+ and not _module_handle(fsdp_state, module).uses_sharded_strategy
523
+ ):
524
+ raise RuntimeError(
525
+ "``sharded_state_dict`` can only be used when parameters are flatten "
526
+ "and sharded."
527
+ )
528
+ _common_pre_state_dict_hook(module, fsdp_state)
529
+ # Setting offload_to_cpu here does not work even if offload_to_cpu is True.
530
+ # We have to create ShardedTensor first then move it to CPU.
531
+ _common_unshard_pre_state_dict_hook(
532
+ module,
533
+ fsdp_state,
534
+ offload_to_cpu=False,
535
+ rank0_only=False,
536
+ )
537
+
538
+
539
+ @no_type_check
540
+ def _sharded_post_state_dict_hook(
541
+ module: nn.Module,
542
+ fsdp_state: _FSDPState,
543
+ state_dict: Dict[str, Any],
544
+ prefix: str,
545
+ ) -> Dict[str, Any]:
546
+ """
547
+ The hook replaces the unflattened, unsharded parameter in the state_dict
548
+ with a unflattened, sharded parameter (a ShardedTensor).
549
+ """
550
+
551
+ def param_hook(state_dict: Dict[str, Any], prefix: str, fqn: str):
552
+ param = state_dict[fqn]
553
+ if not fsdp_state._state_dict_config._use_dtensor:
554
+ sharded_tensor = _ext_chunk_tensor(
555
+ tensor=param,
556
+ rank=fsdp_state.rank,
557
+ world_size=fsdp_state.world_size,
558
+ num_devices_per_node=fsdp_state._device_handle.device_count(),
559
+ pg=fsdp_state.process_group,
560
+ fsdp_extension=fsdp_state._fsdp_extension,
561
+ )
562
+ else:
563
+ sharded_tensor = _ext_chunk_dtensor(
564
+ tensor=param,
565
+ rank=fsdp_state.rank,
566
+ device_mesh=fsdp_state._device_mesh,
567
+ fsdp_extension=fsdp_state._fsdp_extension,
568
+ )
569
+ if fsdp_state._state_dict_config.offload_to_cpu:
570
+ sharded_tensor = sharded_tensor.cpu()
571
+ state_dict[fqn] = sharded_tensor
572
+
573
+ return _common_unshard_post_state_dict_hook(
574
+ module, fsdp_state, state_dict, prefix, param_hook
575
+ )
576
+
577
+
578
+ @no_type_check
579
+ def _sharded_post_load_state_dict_hook(
580
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
581
+ ) -> None:
582
+ if _has_fsdp_params(fsdp_state, module):
583
+ with SimpleProfiler.profile("_exit_unshard_params_ctx"):
584
+ _exit_unshard_params_ctx(module, fsdp_state)
585
+
586
+
587
+ @no_type_check
588
+ def _sharded_pre_load_state_dict_hook(
589
+ module: nn.Module,
590
+ fsdp_state: _FSDPState,
591
+ state_dict: Dict[str, Any],
592
+ prefix: str,
593
+ ) -> None:
594
+ """
595
+ The hook combines the unflattened, sharded parameters (ShardedTensor) to
596
+ a new FlatParameter and shards the new FlatParameter to the local chunk.
597
+ """
598
+ _lazy_init(fsdp_state, module)
599
+ if not _is_composable(fsdp_state):
600
+ _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}")
601
+ if not _has_fsdp_params(fsdp_state, module):
602
+ return
603
+
604
+ handle = _module_handle(fsdp_state, module)
605
+ if not handle.uses_sharded_strategy:
606
+ raise RuntimeError(
607
+ "load_sharded_state_dict can only be called when parameters "
608
+ "are flattened and sharded."
609
+ )
610
+ fqn_to_param_ext = dict(
611
+ zip(handle.flat_param._fqns, handle.flat_param._param_extensions)
612
+ )
613
+
614
+ for fqn, _, _ in _param_name_infos(module, fsdp_state):
615
+ if not _is_composable(fsdp_state):
616
+ fqn_from_global_root = f"{prefix}{FSDP_PREFIX}{fqn}"
617
+ else:
618
+ fqn_from_global_root = f"{prefix}{fqn}"
619
+ try:
620
+ param = state_dict.pop(fqn_from_global_root)
621
+ except KeyError:
622
+ logger.warning(
623
+ f"Did not find param with FQN {fqn_from_global_root}, skipping it. " # noqa: G004
624
+ "The weight will not be filled if you expect it to be."
625
+ )
626
+ continue # TODO: Improve unittesting for state_dict finetuning
627
+ # cases: https://github.com/pytorch/pytorch/issues/109134
628
+
629
+ if not fsdp_state._state_dict_config._use_dtensor:
630
+ # All-gather the param (ShardedTensor)
631
+ param, shards = _ext_pre_load_state_dict_transform(
632
+ param, fsdp_state._fsdp_extension
633
+ )
634
+
635
+ assert len(shards) < 2, (
636
+ "Expects 0 or 1 shard per rank "
637
+ f"but got {len(shards)} shards on rank {fsdp_state.rank}."
638
+ )
639
+ param_numel = param.size().numel()
640
+ dim_0_size = param.size()[0]
641
+ chunk_size = (
642
+ math.ceil(dim_0_size / fsdp_state.world_size)
643
+ * param_numel
644
+ // dim_0_size
645
+ )
646
+ if len(shards) == 1:
647
+ local_tensor = shards[0].tensor.flatten()
648
+ with SimpleProfiler.profile(SimpleProfiler.Type.H2D):
649
+ local_tensor = local_tensor.to(fsdp_state.compute_device)
650
+ num_padding = chunk_size - local_tensor.numel()
651
+ if num_padding > 0:
652
+ local_tensor = F.pad(local_tensor, [0, num_padding])
653
+ else:
654
+ local_tensor = torch.zeros(
655
+ chunk_size, dtype=param.dtype, device=fsdp_state.compute_device
656
+ )
657
+ tensor = torch.empty(
658
+ chunk_size * fsdp_state.world_size,
659
+ dtype=local_tensor.dtype,
660
+ device=fsdp_state.compute_device,
661
+ )
662
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
663
+ dist.all_gather_into_tensor(
664
+ tensor, local_tensor, group=fsdp_state.process_group
665
+ )
666
+ tensor = tensor.narrow(0, 0, param_numel).reshape(param.size())
667
+ state_dict[fqn_from_global_root] = tensor
668
+ else:
669
+ if param.device != fsdp_state._device_mesh.device_type:
670
+ param = param.to(fsdp_state._device_mesh.device_type)
671
+
672
+ parent_mesh = _mesh_resources.get_parent_mesh(fsdp_state._device_mesh)
673
+ local_tensor = _ext_all_gather_dtensor(
674
+ param, parent_mesh, fsdp_state._fsdp_extension
675
+ )
676
+
677
+ if fqn_to_param_ext.get(fqn) is not None:
678
+ ext = fqn_to_param_ext[fqn]
679
+ local_tensor = _ext_post_unflatten_transform(
680
+ local_tensor, ext, fsdp_state._fsdp_extension
681
+ )
682
+ state_dict[fqn_from_global_root] = local_tensor
683
+
684
+ with SimpleProfiler.profile("_enter_unshard_params_ctx"):
685
+ _enter_unshard_params_ctx(module, fsdp_state, writeback=True)
686
+
687
+
688
+ @contextlib.contextmanager
689
+ def _replace_with_full_state_dict_type(fsdp_state: _FSDPState) -> Generator:
690
+ old_state_dict_config = fsdp_state._state_dict_config
691
+ old_state_dict_type = fsdp_state._state_dict_type
692
+ fsdp_state._state_dict_config = FullStateDictConfig()
693
+ fsdp_state._state_dict_type = StateDictType.FULL_STATE_DICT
694
+ yield
695
+ fsdp_state._state_dict_config = old_state_dict_config
696
+ fsdp_state._state_dict_type = old_state_dict_type
697
+
698
+
699
+ @no_type_check
700
+ @torch.no_grad()
701
+ def _post_state_dict_hook(
702
+ module: nn.Module,
703
+ state_dict: Dict[str, Any],
704
+ prefix: str,
705
+ *args: Any,
706
+ ) -> Dict[str, Any]:
707
+ """
708
+ _post_state_dict_hook() is called after the state_dict() of this
709
+ FSDP module is executed. ``fsdp_state._state_dict_type`` is used to decide
710
+ what postprocessing will be done.
711
+ """
712
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
713
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
714
+ context = _replace_with_full_state_dict_type(fsdp_state)
715
+ warnings.warn(
716
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
717
+ "be returned."
718
+ )
719
+ else:
720
+ context = contextlib.nullcontext()
721
+
722
+ with context:
723
+ _post_state_dict_hook_fn = {
724
+ StateDictType.FULL_STATE_DICT: _full_post_state_dict_hook,
725
+ StateDictType.LOCAL_STATE_DICT: _local_post_state_dict_hook,
726
+ StateDictType.SHARDED_STATE_DICT: _sharded_post_state_dict_hook,
727
+ }
728
+ processed_state_dict = _post_state_dict_hook_fn[fsdp_state._state_dict_type](
729
+ module, fsdp_state, state_dict, prefix
730
+ )
731
+
732
+ if fsdp_state._is_root:
733
+ logger.info("FSDP finished processing state_dict(), prefix=%s", prefix)
734
+ for key, tensor in sorted(processed_state_dict.items()):
735
+ if key.startswith(prefix) and isinstance(tensor, torch.Tensor):
736
+ local_shape = tensor.shape
737
+ if isinstance(tensor, ShardedTensor):
738
+ local_shape = None
739
+ shards = tensor.local_shards()
740
+ if shards:
741
+ local_shape = shards[0].tensor.shape
742
+ elif isinstance(tensor, DTensor):
743
+ local_shape = tensor.to_local().shape
744
+ logger.info(
745
+ "FQN=%s: type=%s, shape=%s, local_shape=%s, dtype=%s, device=%s",
746
+ key,
747
+ type(tensor),
748
+ tensor.shape,
749
+ local_shape,
750
+ tensor.dtype,
751
+ tensor.device,
752
+ )
753
+
754
+ return processed_state_dict
755
+
756
+
757
+ @no_type_check
758
+ @torch.no_grad()
759
+ def _pre_state_dict_hook(
760
+ module: nn.Module,
761
+ *args,
762
+ **kwargs,
763
+ ) -> None:
764
+ """
765
+ This is called before the core state dict saving logic of ``module``.
766
+ ``fsdp_state._state_dict_type`` is used to decide what postprocessing will
767
+ be done.
768
+ """
769
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
770
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
771
+ context = _replace_with_full_state_dict_type(fsdp_state)
772
+ warnings.warn(
773
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
774
+ "be returned."
775
+ )
776
+ else:
777
+ _set_use_dtensor(fsdp_state)
778
+ context = contextlib.nullcontext()
779
+
780
+ with context:
781
+ _pre_state_dict_hook_fn = {
782
+ StateDictType.FULL_STATE_DICT: _full_pre_state_dict_hook,
783
+ StateDictType.LOCAL_STATE_DICT: _local_pre_state_dict_hook,
784
+ StateDictType.SHARDED_STATE_DICT: _sharded_pre_state_dict_hook,
785
+ }
786
+ _pre_state_dict_hook_fn[fsdp_state._state_dict_type](
787
+ fsdp_state,
788
+ module,
789
+ *args,
790
+ **kwargs,
791
+ )
792
+
793
+
794
+ @no_type_check
795
+ def _set_use_dtensor(fsdp_state: _FSDPState) -> None:
796
+ # If device_mesh is passed in when initalizing FSDP, we automatically turn the
797
+ # _use_dtensor flag to be true for ShardedStateDictConfig().
798
+ if getattr(fsdp_state, "_device_mesh", None):
799
+ state_dict_type = fsdp_state._state_dict_type
800
+ if state_dict_type == StateDictType.LOCAL_STATE_DICT:
801
+ raise RuntimeError(
802
+ "Found state_dict_type LOCAL_STATE_DICT",
803
+ "DeviceMesh is not compatible with LOCAL_STATE_DICT.",
804
+ "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
805
+ )
806
+ else:
807
+ fsdp_state._state_dict_config._use_dtensor = True
808
+
809
+
810
+ @no_type_check
811
+ @torch.no_grad()
812
+ def _pre_load_state_dict_hook(
813
+ module: nn.Module,
814
+ state_dict: Dict[str, Any],
815
+ prefix: str,
816
+ *args: Any,
817
+ ) -> None:
818
+ """
819
+ This is called before ``module._load_from_state_dict()``.
820
+ ``fsdp_state._state_dict_type`` is used to decide what preprocessing will
821
+ be done.
822
+ """
823
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
824
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
825
+ context = _replace_with_full_state_dict_type(fsdp_state)
826
+ warnings.warn(
827
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
828
+ "be returned."
829
+ )
830
+ else:
831
+ _set_use_dtensor(fsdp_state)
832
+ context = contextlib.nullcontext()
833
+
834
+ _lazy_init(fsdp_state, module)
835
+ if fsdp_state._is_root:
836
+ SimpleProfiler.reset()
837
+
838
+ with context:
839
+ _pre_load_state_dict_hook_fn = {
840
+ StateDictType.FULL_STATE_DICT: _full_pre_load_state_dict_hook,
841
+ StateDictType.LOCAL_STATE_DICT: _local_pre_load_state_dict_hook,
842
+ StateDictType.SHARDED_STATE_DICT: _sharded_pre_load_state_dict_hook,
843
+ }
844
+ # Code that is common for all state_dict impls
845
+ if fsdp_state._device_handle.is_available():
846
+ fsdp_state._device_handle.synchronize()
847
+ # Dispatch into state_dict specific implementation of pre-hook.
848
+ _pre_load_state_dict_hook_fn[fsdp_state._state_dict_type](
849
+ module, fsdp_state, state_dict, prefix
850
+ )
851
+
852
+
853
+ @no_type_check
854
+ @torch.no_grad()
855
+ def _post_load_state_dict_hook(
856
+ module: nn.Module,
857
+ incompatible_keys: Tuple[List[str], List[str]],
858
+ *args: Any,
859
+ ) -> None:
860
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
861
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
862
+ context = _replace_with_full_state_dict_type(fsdp_state)
863
+ warnings.warn(
864
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
865
+ "be returned."
866
+ )
867
+ else:
868
+ context = contextlib.nullcontext()
869
+
870
+ with context:
871
+ _post_load_state_dict_hook_fn = {
872
+ StateDictType.FULL_STATE_DICT: _full_post_load_state_dict_hook,
873
+ StateDictType.LOCAL_STATE_DICT: _local_post_load_state_dict_hook,
874
+ StateDictType.SHARDED_STATE_DICT: _sharded_post_load_state_dict_hook,
875
+ }
876
+ # Code that is common for all state_dict impls
877
+ # Dispatch into state_dict type specific implementation of post-hook for
878
+ # loading state_dict.
879
+ _post_load_state_dict_hook_fn[fsdp_state._state_dict_type](module, fsdp_state)
880
+
881
+ # When reporting incompatible keys, trim FSDP prefixes.
882
+ missing_keys = incompatible_keys[0]
883
+ unexpected_keys = incompatible_keys[1]
884
+ for i in range(len(missing_keys)):
885
+ missing_keys[i] = clean_tensor_name(missing_keys[i])
886
+
887
+ for i in range(len(unexpected_keys)):
888
+ unexpected_keys[i] = clean_tensor_name(unexpected_keys[i])
889
+
890
+ if fsdp_state._is_root:
891
+ SimpleProfiler.dump_and_reset("FSDP model load_state_dict profiling: ")
892
+
893
+
894
+ def _register_all_state_dict_hooks(state: _FSDPState):
895
+ """
896
+ Registers pre-save, post-save, pre-load, and post-load state dict hooks.
897
+ """
898
+ for hook_registration_fn_str, hook, hook_registration_fn_kwargs in (
899
+ ("register_state_dict_pre_hook", _pre_state_dict_hook, {}),
900
+ ("_register_state_dict_hook", _post_state_dict_hook, {}),
901
+ (
902
+ "_register_load_state_dict_pre_hook",
903
+ _pre_load_state_dict_hook,
904
+ {"with_module": True},
905
+ ),
906
+ ("register_load_state_dict_post_hook", _post_load_state_dict_hook, {}),
907
+ ):
908
+ _register_state_dict_hooks_base(
909
+ state, hook_registration_fn_str, hook, hook_registration_fn_kwargs
910
+ )
911
+
912
+
913
+ @no_type_check
914
+ def _register_state_dict_hooks_base(
915
+ state: _FSDPState,
916
+ hook_registration_fn_name: str,
917
+ hook: Callable,
918
+ hook_registration_fn_kwargs: Dict[str, Any],
919
+ ) -> None:
920
+ """Registers ``hook`` using ``hook_registration_fn``."""
921
+ if not _is_composable(state):
922
+ getattr(state, hook_registration_fn_name)(hook, **hook_registration_fn_kwargs)
923
+ else:
924
+ handle = state._handle
925
+ if handle:
926
+ getattr(handle._fully_sharded_module, hook_registration_fn_name)(
927
+ hook, **hook_registration_fn_kwargs
928
+ )
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from contextlib import contextmanager
3
+ from dataclasses import dataclass, field
4
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+
10
+ @dataclass
11
+ class TracingConfig:
12
+ """
13
+ This represents a symbolic tracing configuration.
14
+
15
+ Args:
16
+ tracer (torch.fx.Tracer): An instance of :class:`torch.fx.Tracer` to
17
+ use for symbolic tracing. The default value is the native
18
+ :class:`torch.fx.Tracer` constructed with default arguments.
19
+ However, the user may want to pass a different value such as the
20
+ ``HFTracer`` for models in the HuggingFace Transformers_ library.
21
+ .. _Transformers: https://huggingface.co/docs/transformers/index
22
+ concrete_args (Optional[Dict[str, Any]]): Concrete arguments that
23
+ should not be treated as ``torch.fx.Proxy`` when tracing the
24
+ module ``forward()``. Passing ``concrete_args`` allows partially
25
+ specializing the forward, e.g. to remove control flow or data
26
+ structures. This ``concrete_args`` here is the same argument used
27
+ in :meth:`~torch.fx.Tracer.trace`.
28
+ """
29
+
30
+ tracer: torch.fx.Tracer = field(default_factory=torch.fx.Tracer)
31
+ concrete_args: Optional[Dict[str, Any]] = None
32
+
33
+
34
+ class _ParamUsageInfo(NamedTuple):
35
+ """
36
+ This is used for ``_ExecutionInfo.module_to_param_usage_infos`` to record
37
+ execution information. The ``dict`` maps modules to a list of these
38
+ ``_ParamUsageInfo`` instances, where each instance represents a group of
39
+ parameters used together.
40
+
41
+ Specifically, for each module key in the ``dict``, each instance of this
42
+ class represents either:
43
+ (1) the module and some sublist of its ``named_parameters()`` used
44
+ together in execution (see ``_patched_create_proxy()``), or
45
+ (2) a submodule and all of ``submodule.named_parameters()`` (see
46
+ ``_patched_call_module()``).
47
+
48
+ Type (1) corresponds to directly using parameters in ops without calling
49
+ ``forward()``, and type (2) corresponds to calling ``forward()``. The
50
+ mapped-to lists in the ``dict`` follow the execution order.
51
+ """
52
+
53
+ module: nn.Module
54
+ named_params: List[Tuple[str, nn.Parameter]]
55
+
56
+
57
+ class _ExecutionInfo:
58
+ """
59
+ This represents the execution order information from the forward pass.
60
+
61
+ Attributes:
62
+ curr_module (nn.Module): Current module being traced.
63
+ module_forward_order (List[nn.Module]): The modules in (pre-)forward
64
+ order, i.e. the order in which their ``forward()`` methods are
65
+ called. Each call to a module's ``forward()`` corresponds to one
66
+ element in the list.
67
+ module_to_param_usage_infos (Dict[nn.Module, List[_ParamUsageInfo]]):
68
+ Maps a module to a list of module execution infos. See
69
+ :class:`_ParamUsageInfo` for details.
70
+ param_forward_order (List[nn.Parameter]): The parameters in forward
71
+ execution order, where only a parameter's first participation is
72
+ included.
73
+ visited_params (Set[nn.Parameter]): The parameters visited so far
74
+ during the trace. This is only used during tracing for fast
75
+ membership check. Invariant: The parameters in
76
+ ``param_forward_order`` are exactly those in ``visited_params``.
77
+ """
78
+
79
+ def __init__(self, root_module: nn.Module) -> None:
80
+ self.curr_module: nn.Module = root_module
81
+ self.module_forward_order: List[nn.Module] = [root_module]
82
+ self.module_to_param_usage_infos: Dict[nn.Module, List[_ParamUsageInfo]] = {
83
+ root_module: []
84
+ }
85
+ self.param_forward_order: List[nn.Parameter] = []
86
+ self.visited_params: Set[nn.Parameter] = set()
87
+
88
+
89
+ class _ExecOrderTracer:
90
+ def __init__(self) -> None:
91
+ self.exec_info: Optional[_ExecutionInfo] = None
92
+
93
+ @contextmanager
94
+ def patch_tracer(self, tracer: torch.fx.Tracer, root_module: nn.Module):
95
+ self.exec_info = _ExecutionInfo(root_module)
96
+ orig_call_module = tracer.call_module
97
+ orig_create_proxy = tracer.create_proxy
98
+ tracer.call_module = functools.partial(
99
+ self._patched_call_module, orig_call_module, self.exec_info
100
+ )
101
+ fqn_to_param = dict(root_module.named_parameters())
102
+ tracer.create_proxy = functools.partial(
103
+ self._patched_create_proxy,
104
+ orig_create_proxy,
105
+ self.exec_info,
106
+ fqn_to_param,
107
+ )
108
+ try:
109
+ yield
110
+ finally:
111
+ tracer.call_module = orig_call_module
112
+ tracer.create_proxy = orig_create_proxy
113
+
114
+ def _patched_call_module(
115
+ self,
116
+ call_module: Callable,
117
+ exec_info: _ExecutionInfo,
118
+ # Below are the expected arguments to `call_module()`
119
+ module: nn.Module,
120
+ forward: Callable,
121
+ args: Tuple[Any, ...],
122
+ kwargs: Dict[str, Any],
123
+ ) -> Any:
124
+ """
125
+ Overrides ``call_module`` to save execution information to
126
+ ``exec_info``. Note that ``call_module`` is called during symbolic
127
+ tracing for each non-root module.
128
+
129
+ Args:
130
+ call_module (Callable): Original ``call_module`` to override.
131
+ exec_info (_ExecutionInfo): Used to record execution information.
132
+ module (nn.Module): Module corresponding to this ``call_module``.
133
+ forward (Callable): ``forward()`` method of ``module`` to be called
134
+ for this ``call_module``.
135
+ args (Tuple[Any, ...]): Positional arguments for ``forward``.
136
+ kwargs (Dict[str, Any]): Keyword arguments for ``forward``.
137
+
138
+ Returns:
139
+ Same return value as ``call_module``.
140
+ """
141
+ exec_info.module_forward_order.append(module)
142
+ named_params = list(module.named_parameters())
143
+ curr_module = exec_info.curr_module
144
+ if named_params:
145
+ assert (
146
+ curr_module in exec_info.module_to_param_usage_infos
147
+ ), "The current module should have already been processed by a patched `call_module`"
148
+ exec_info.module_to_param_usage_infos[exec_info.curr_module].append(
149
+ _ParamUsageInfo(module, named_params)
150
+ )
151
+ prev_curr_module = curr_module
152
+ exec_info.curr_module = module
153
+ exec_info.module_to_param_usage_infos[module] = []
154
+ output = call_module(module, forward, args, kwargs)
155
+ exec_info.curr_module = prev_curr_module
156
+ return output
157
+
158
+ def _patched_create_proxy(
159
+ self,
160
+ create_proxy: Callable,
161
+ exec_info: _ExecutionInfo,
162
+ fqn_to_param: Dict[str, nn.Parameter],
163
+ # Below are the expected arguments to `create_proxy()`
164
+ kind: str,
165
+ target: torch.fx.node.Target,
166
+ args: Tuple[Any, ...],
167
+ kwargs: Dict[str, Any],
168
+ name: Optional[str] = None,
169
+ type_expr: Optional[Any] = None,
170
+ proxy_factory_fn: Optional[Callable[[torch.fx.Node], torch.fx.Proxy]] = None,
171
+ ) -> torch.fx.Proxy:
172
+ """
173
+ Overrides ``create_proxy`` to save execution information to
174
+ ``exec_info``. Note that ``create_proxy`` is called during symbolic
175
+ tracing for each leaf function/method/module.
176
+
177
+ Args:
178
+ create_proxy (Callable): Original ``create_proxy`` to override.
179
+ exec_info (_ExecutionInfo): Used to record execution information.
180
+ fqn_to_param (Dict[str, nn.Parameter]): ``dict`` version of the
181
+ root module's ``named_parameters()`` with FQN as key and
182
+ parameter as value.
183
+ kind (str): Kind of the target method ('call_function',
184
+ 'call_method', 'get_attr', 'call_module', 'placeholder', or
185
+ 'output'). See :class:`torch.fx.Graph` for details. This is
186
+ passed to ``create_proxy``.
187
+ target (torch.fx.node.Target): Contains the string name of the
188
+ function/method/module. This is passed to ``create_proxy``.
189
+ args (Tuple[Any, ...]): Positional arguments for the function/
190
+ method/module. This is passed to ``create_proxy``.
191
+ kwargs (Dict[str, Any]): Keyword arguments for the function/method/
192
+ module. This is passed to ``create_proxy``
193
+ name (Optional[str]): An optional string name for the ``Node``
194
+ created in ``create_proxy``. This is passed to
195
+ ``create_proxy``.
196
+ type_expr (Optional[Any]): An optional type annotation representing
197
+ the Python type that the output of the node has. This is passed
198
+ to ``create_proxy``.
199
+ proxy_factory_fn (Callable[[torch.fx.Node], torch.fx.Proxy]):
200
+ An alternative proxy constructor used in ``create_proxy``. This
201
+ is passed to ``create_proxy``.
202
+
203
+ Returns:
204
+ torch.fx.Proxy: Created ``Node`` wrapped in a ``Proxy`` object.
205
+ """
206
+ proxy = create_proxy(
207
+ kind, target, args, kwargs, name, type_expr, proxy_factory_fn
208
+ )
209
+ curr_module = exec_info.curr_module
210
+ if kind in ("call_function", "call_method"):
211
+ if args is not None:
212
+ named_params: List[Tuple[str, nn.Parameter]] = []
213
+ for arg in args:
214
+ if (
215
+ isinstance(arg, torch.fx.Proxy)
216
+ and arg.node.target in fqn_to_param
217
+ ):
218
+ param = fqn_to_param[arg.node.target]
219
+ named_params.append((arg.node.target, param))
220
+ if param not in exec_info.visited_params:
221
+ exec_info.visited_params.add(param)
222
+ exec_info.param_forward_order.append(param)
223
+ if named_params:
224
+ exec_info.module_to_param_usage_infos[curr_module].append(
225
+ _ParamUsageInfo(curr_module, named_params)
226
+ )
227
+ elif kind == "call_module":
228
+ named_params = list(curr_module.named_parameters())
229
+ if named_params:
230
+ exec_info.module_to_param_usage_infos[curr_module].append(
231
+ _ParamUsageInfo(curr_module, named_params)
232
+ )
233
+ for _, param in named_params:
234
+ if param not in exec_info.visited_params:
235
+ exec_info.visited_params.add(param)
236
+ exec_info.param_forward_order.append(param)
237
+ return proxy
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NOTE: This file must be imported like
3
+ ``import torch.distributed.fsdp._traversal_utils`` and not like
4
+ ``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular
5
+ imports. For brevity, we may import the file as ``traversal_utils``.
6
+ """
7
+
8
+ import collections
9
+ from typing import Deque, List, Set, Tuple
10
+
11
+ import torch.nn as nn
12
+ from torch.distributed._composable.contract import _get_registry
13
+ from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
14
+
15
+
16
+ """
17
+ [Note: FSDP State Traversal]
18
+ For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
19
+ module wrapping a fully sharded module, and for the non-wrapper code path,
20
+ ``_FSDPState`` is an object that gets embedded on a fully sharded module.
21
+ See [Note: Fully Sharded Module] for the definition.
22
+
23
+ There are three common traversal idioms: Given a root module,
24
+ - ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
25
+ - ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
26
+ tree (i.e. those with ``_is_root == True``).
27
+ - ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
28
+
29
+ All of these methods must take in the root module (i.e. an ``nn.Module``) and
30
+ not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
31
+ traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
32
+ """
33
+
34
+
35
+ def _composable(module: nn.Module) -> bool:
36
+ """
37
+ Returns if ``module`` can compose with ``fully_shard``.
38
+ """
39
+ # TODO: Add any other composable APIs that are mutually exclusive.
40
+ registry = _get_registry(module)
41
+ if registry is None:
42
+ return True
43
+ return "replicate" not in registry
44
+
45
+
46
+ # TODO (awgu): We may be able to remove this function if we retired the
47
+ # `use_orig_params=False` code path since so far we only need the module for
48
+ # `FlatParameter` registration, which is not needed for `use_orig_params=True`.
49
+ def _get_fsdp_states_with_modules(
50
+ module: nn.Module,
51
+ ) -> Tuple[List[_FSDPState], List[nn.Module]]:
52
+ """
53
+ Returns a tuple containing:
54
+ 1. A list of the ``_FSDPState`` instances in the module tree rooted at
55
+ ``module`` without any duplicates and following the ``module.modules()``
56
+ traversal order (which is assumed to be depth-first).
57
+ 2. A corresponding list of the modules owning the states in the first list.
58
+
59
+ For the wrapper code path, both returned lists are the same, each
60
+ containing all ``FullyShardedDataParallel`` instances. For the composable
61
+ code path, this returns a list of all composable state instances and a list
62
+ of the corresponding fully sharded modules. See [Note: Fully Sharded
63
+ Module].
64
+
65
+ NOTE: The traversal does not proceed into any module annotated by an
66
+ incompatible API (e.g. ``replicate``).
67
+ """
68
+ fsdp_states: List[_FSDPState] = []
69
+ fsdp_modules: List[nn.Module] = []
70
+ # Track the visited FSDP states since multiple modules may share the same
71
+ # one and we want to return a de-duplicated list
72
+ visited_fsdp_states: Set[_FSDPState] = set()
73
+ # Track the visited modules in case of shared modules, which implies the
74
+ # module graph is no longer a tree
75
+ visited_modules: Set[nn.Module] = set()
76
+
77
+ # Perform depth-first search from `module` to ensure that we do not
78
+ # traverse into an incompatible API's subtree (use DFS instead of BFS to
79
+ # match `.modules()` order)
80
+ deque: Deque[nn.Module] = collections.deque([module])
81
+ while deque:
82
+ submodule = deque.popleft()
83
+ visited_modules.add(submodule)
84
+ if not _composable(submodule):
85
+ continue
86
+ for child_module in reversed(list(submodule.children())):
87
+ if child_module not in visited_modules:
88
+ deque.appendleft(child_module)
89
+ optional_state = _get_module_fsdp_state(submodule)
90
+ if optional_state is not None and optional_state not in visited_fsdp_states:
91
+ visited_fsdp_states.add(optional_state)
92
+ fsdp_states.append(optional_state)
93
+ fsdp_modules.append(submodule)
94
+ return fsdp_states, fsdp_modules
95
+
96
+
97
+ def _get_fsdp_states(module: nn.Module) -> List[_FSDPState]:
98
+ """See :func:`_get_fsdp_states_with_modules`."""
99
+ fsdp_states, _ = _get_fsdp_states_with_modules(module)
100
+ return fsdp_states
101
+
102
+
103
+ def _get_fsdp_handles(module: nn.Module) -> List:
104
+ """
105
+ Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
106
+ following the rules in :func:`_get_fsdp_state`.
107
+ """
108
+ handles = [
109
+ fsdp_state._handle
110
+ for fsdp_state in _get_fsdp_states(module)
111
+ if fsdp_state._handle is not None
112
+ ]
113
+ return handles
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import warnings
3
+ from typing import cast, Generator
4
+
5
+ import torch
6
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
7
+ import torch.nn as nn
8
+ from torch.distributed.fsdp._common_utils import (
9
+ _FSDPState,
10
+ _has_fsdp_params,
11
+ _module_handle,
12
+ HandleTrainingState,
13
+ TrainingState,
14
+ )
15
+ from torch.distributed.fsdp._runtime_utils import (
16
+ _get_fsdp_root_states_with_modules,
17
+ _lazy_init,
18
+ _reset_flat_param_grad_info_if_needed,
19
+ _reshard,
20
+ _reshard_grads,
21
+ _unshard,
22
+ _unshard_grads,
23
+ )
24
+ from torch.distributed.utils import _p_assert
25
+
26
+ from ._flat_param import FlatParamHandle
27
+
28
+ FLAT_PARAM = "_flat_param"
29
+
30
+
31
+ @torch.no_grad()
32
+ def _writeback_to_local_shard(
33
+ handle: FlatParamHandle,
34
+ writeback_grad: bool,
35
+ ):
36
+ """
37
+ For the handle, writes back the this rank's shard of the unsharded
38
+ flattened parameter to the sharded flattened parameter. If
39
+ ``writeback_grad=True``, then writes back to the sharded gradient as
40
+ well.
41
+
42
+ Precondition: The handle's ``FlatParameter`` 's data points to the
43
+ padded unsharded flattened parameter.
44
+ """
45
+
46
+ def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor:
47
+ if handle.uses_sharded_strategy:
48
+ # For sharded strategies, get the *unpadded* shard instead of
49
+ # the *padded* shard to persist user changes to the padding
50
+ # (though FSDP does not explicitly support this)
51
+ shard, _ = FlatParamHandle._get_unpadded_shard(
52
+ flat_param_or_grad,
53
+ handle.rank,
54
+ handle.world_size,
55
+ )
56
+ return shard
57
+ # For `NO_SHARD`, the `flat_param` or its gradient may be modified,
58
+ # so we write it back directly
59
+ return flat_param_or_grad
60
+
61
+ param_shard = _get_shard(handle.flat_param)
62
+ handle.flat_param._local_shard[: param_shard.numel()].copy_(param_shard) # type: ignore[attr-defined]
63
+ if writeback_grad:
64
+ existing_grad = handle.sharded_grad
65
+ if existing_grad is not None:
66
+ assert handle.flat_param.grad is not None
67
+ grad_shard = _get_shard(handle.flat_param.grad)
68
+ existing_grad[: grad_shard.numel()].copy_(grad_shard)
69
+
70
+
71
+ def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None:
72
+ """
73
+ De-registers the flattened parameter from the wrapped module, hiding it
74
+ from ``nn.Module`` methods.
75
+
76
+ We do not use ``del`` because we want ``FLAT_PARAM`` to always be an
77
+ attribute but dynamically change whether it is visible to ``nn.Module``
78
+ methods.
79
+ """
80
+ if _has_fsdp_params(state, module):
81
+ # TODO: figure out the case for the composable APIs.
82
+ cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None)
83
+
84
+
85
+ def _register_flat_param(state: _FSDPState, module: nn.Module) -> None:
86
+ """
87
+ Registers the flattened parameter to the wrapped module, making it
88
+ visible to ``nn.Module`` methods.
89
+
90
+ We do not use :meth:`nn.Module.register_parameter` because we want
91
+ ``FLAT_PARAM`` to always be an attribute but dynamically change whether
92
+ it is visible to ``nn.Module`` methods.
93
+ """
94
+ handle = _module_handle(state, module)
95
+ if _has_fsdp_params(state, module):
96
+ # TODO: figure out the case for the composable APIs.
97
+ cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param
98
+
99
+
100
+ @contextlib.contextmanager
101
+ def _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator:
102
+ """
103
+ Assumes that the flattened parameter is unsharded. When in the context,
104
+ de-registers the flattened parameter and unflattens the original
105
+ parameters as ``nn.Parameter`` views into the flattened parameter.
106
+ After the context, re-registers the flattened parameter and restores
107
+ the original parameters as ``Tensor`` views into the flattened
108
+ parameter.
109
+ """
110
+ handle = _module_handle(state, module)
111
+ if not handle:
112
+ yield
113
+ else:
114
+ _deregister_flat_param(state, module)
115
+ try:
116
+ with handle.unflatten_as_params():
117
+ yield
118
+ finally:
119
+ if not handle._use_orig_params:
120
+ _register_flat_param(state, module)
121
+
122
+
123
+ def _validate_unshard_params_args(
124
+ state: _FSDPState,
125
+ writeback: bool,
126
+ rank0_only: bool,
127
+ offload_to_cpu: bool,
128
+ with_grads: bool,
129
+ ) -> None:
130
+ if with_grads and (offload_to_cpu or not state._use_orig_params):
131
+ raise NotImplementedError(
132
+ f"with_grads={with_grads}, "
133
+ f"use_orig_params={state._use_orig_params}, "
134
+ f"offload_to_cpu={offload_to_cpu} "
135
+ f"is not supported yet"
136
+ )
137
+ if offload_to_cpu and state._handle and (not state._handle.uses_sharded_strategy):
138
+ raise NotImplementedError(
139
+ "offload_to_cpu=True and NO_SHARD is not supported yet"
140
+ )
141
+ if writeback and rank0_only:
142
+ # TODO: Rank 0 can broadcast the `FlatParameter` to allow all ranks to
143
+ # persist the changes.
144
+ raise NotImplementedError(
145
+ "writeback=True and rank0_only=True is not supported yet"
146
+ )
147
+ if offload_to_cpu and not rank0_only:
148
+ warnings.warn(
149
+ "offload_to_cpu=True and rank0_only=False may result in the"
150
+ "unsharded parameters being redundantly copied to CPU memory for "
151
+ "GPUs sharing the same CPU memory, which risks CPU OOM. We "
152
+ "recommend using offload_to_cpu=True with rank0_only=True."
153
+ )
154
+
155
+
156
+ @contextlib.contextmanager
157
+ def _unshard_fsdp_state_params(
158
+ module: nn.Module,
159
+ state: _FSDPState,
160
+ writeback: bool,
161
+ rank0_only: bool,
162
+ offload_to_cpu: bool,
163
+ with_grads: bool,
164
+ ):
165
+ """
166
+ This unshards the parameters for a single FSDP state ``state`` that
167
+ corresponds to ``module``.
168
+ """
169
+ _validate_unshard_params_args(
170
+ state, writeback, rank0_only, offload_to_cpu, with_grads
171
+ )
172
+ state._device_handle.synchronize()
173
+ # If handles are shared by other module(s), the handle may be already unsharded.
174
+ maybe_handle = _module_handle(state, module)
175
+ handle = None
176
+ if (
177
+ maybe_handle
178
+ and maybe_handle._training_state != HandleTrainingState.SUMMON_FULL_PARAMS
179
+ ):
180
+ handle = maybe_handle
181
+ if not handle:
182
+ yield
183
+ return
184
+
185
+ assert (
186
+ handle._training_state == HandleTrainingState.IDLE
187
+ ), f"Expects the handle training to be IDLE but got {handle._training_state}"
188
+
189
+ handle._training_state = HandleTrainingState.SUMMON_FULL_PARAMS
190
+
191
+ _reset_flat_param_grad_info_if_needed(handle)
192
+ free_unsharded_flat_param = handle.needs_unshard()
193
+ # No need to call `wait_stream()` since we unshard in the computation
194
+ # stream directly
195
+ computation_stream = state._device_handle.current_stream()
196
+ _unshard(state, handle, computation_stream, computation_stream)
197
+ if with_grads:
198
+ _unshard_grads(handle)
199
+
200
+ if rank0_only and state.rank != 0:
201
+ # Free the unsharded flattened parameter early
202
+ _reshard(state, handle, free_unsharded_flat_param)
203
+ if with_grads:
204
+ _reshard_grads(handle)
205
+ try:
206
+ yield
207
+ finally:
208
+ handle._training_state = HandleTrainingState.IDLE
209
+ else:
210
+ # Unflatten the unsharded flattened parameters
211
+ with contextlib.ExitStack() as stack:
212
+ # Invariant: rank == 0 or !rank0_only
213
+ if offload_to_cpu and handle.uses_sharded_strategy:
214
+ stack.enter_context(handle.to_cpu())
215
+ # NOTE: Since PyTorch enforces that a parameter and its
216
+ # gradients need to match metadata (e.g. device), we must
217
+ # move gradients to CPU *after* we move parameters.
218
+ # NOTE: This assumes 1 `FlatParameter`
219
+ if not state._use_orig_params:
220
+ stack.enter_context(_unflatten_as_params(state, module))
221
+ try:
222
+ yield
223
+ finally:
224
+ stack.close()
225
+ if writeback:
226
+ _writeback_to_local_shard(handle, with_grads)
227
+ _reshard(state, handle, free_unsharded_flat_param)
228
+ if with_grads:
229
+ _reshard_grads(handle)
230
+ handle._training_state = HandleTrainingState.IDLE
231
+
232
+
233
+ @contextlib.contextmanager
234
+ def _unshard_params_recurse(
235
+ module: nn.Module,
236
+ state: _FSDPState,
237
+ recurse: bool,
238
+ writeback: bool,
239
+ rank0_only: bool,
240
+ offload_to_cpu: bool,
241
+ with_grads: bool,
242
+ ):
243
+ """
244
+ This is a helper for :func:`_unshard_params` that recursively calls
245
+ :func:`_unshard_fsdp_state_params` on FSDP states if ``recurse=True``.
246
+ NOTE: This runs lazy initialization.
247
+ """
248
+ _validate_unshard_params_args(
249
+ state, writeback, rank0_only, offload_to_cpu, with_grads
250
+ )
251
+ if recurse:
252
+ with contextlib.ExitStack() as stack:
253
+ # TODO (awgu): The traversal function does not traverse through
254
+ # incompatible composable APIs. Verify if this is the desired
255
+ # behavior for this function.
256
+ for state, fsdp_module in zip(
257
+ *traversal_utils._get_fsdp_states_with_modules(module)
258
+ ):
259
+ stack.enter_context(
260
+ _unshard_params_recurse(
261
+ module=fsdp_module,
262
+ state=state,
263
+ recurse=False,
264
+ writeback=writeback,
265
+ rank0_only=rank0_only,
266
+ offload_to_cpu=offload_to_cpu,
267
+ with_grads=with_grads,
268
+ )
269
+ )
270
+ yield
271
+ return
272
+ _lazy_init(state, module)
273
+ if state.training_state == TrainingState.FORWARD_BACKWARD:
274
+ raise AssertionError(
275
+ "Cannot manually unshard parameters during forward/backward"
276
+ )
277
+ elif state.training_state == TrainingState.SUMMON_FULL_PARAMS:
278
+ raise AssertionError(
279
+ "Cannot manually unshard parameters when already unsharding parameters"
280
+ )
281
+ with _unshard_fsdp_state_params(
282
+ module=module,
283
+ state=state,
284
+ writeback=writeback,
285
+ rank0_only=rank0_only,
286
+ offload_to_cpu=offload_to_cpu,
287
+ with_grads=with_grads,
288
+ ):
289
+ try:
290
+ state.training_state = TrainingState.SUMMON_FULL_PARAMS
291
+ yield
292
+ finally:
293
+ state.training_state = TrainingState.IDLE
294
+
295
+
296
+ @contextlib.contextmanager
297
+ def _unshard_params(
298
+ module: nn.Module,
299
+ recurse: bool,
300
+ writeback: bool,
301
+ rank0_only: bool,
302
+ offload_to_cpu: bool,
303
+ with_grads: bool,
304
+ ):
305
+ """
306
+ This unshards FSDP-managed parameters for all modules with FSDP applied in
307
+ the module tree rooted at ``module``.
308
+ """
309
+ root_fsdp_states, root_fsdp_modules = _get_fsdp_root_states_with_modules(module)
310
+ with contextlib.ExitStack() as stack:
311
+ for root_fsdp_state, root_fsdp_module in zip(
312
+ root_fsdp_states, root_fsdp_modules
313
+ ):
314
+ stack.enter_context(
315
+ _unshard_params_recurse(
316
+ module=root_fsdp_module,
317
+ state=root_fsdp_state,
318
+ recurse=recurse,
319
+ writeback=writeback,
320
+ rank0_only=rank0_only,
321
+ offload_to_cpu=offload_to_cpu,
322
+ with_grads=with_grads,
323
+ )
324
+ )
325
+ yield
326
+ return
327
+
328
+
329
+ def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None:
330
+ """
331
+ Deregisters the original parameters; registers the ``FlatParameter``.
332
+ """
333
+ handle = _module_handle(state, module)
334
+ if not handle:
335
+ return
336
+ _p_assert(
337
+ handle._use_orig_params,
338
+ f"Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} "
339
+ f"handle: {handle._use_orig_params}",
340
+ )
341
+ handle._deregister_orig_params()
342
+ _register_flat_param(state, module)
343
+
344
+
345
+ def _register_orig_params(state: _FSDPState, module: nn.Module) -> None:
346
+ """
347
+ Deregisters the ``FlatParameter``; registers the original parameters.
348
+ """
349
+ handle = _module_handle(state, module)
350
+ if not handle:
351
+ return
352
+ _deregister_flat_param(state, module)
353
+ if handle.is_sharded(handle.flat_param):
354
+ handle._use_sharded_views()
355
+ handle._use_sharded_grad_views()
356
+ else:
357
+ handle._use_unsharded_views(as_params=True)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+ import inspect
4
+ import warnings
5
+ from functools import partial
6
+ from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union
7
+
8
+ import torch.nn as nn
9
+ from torch.distributed.fsdp._common_utils import (
10
+ _get_module_fsdp_state,
11
+ _override_module_mixed_precision,
12
+ )
13
+
14
+ from torch.distributed.fsdp.wrap import (
15
+ _construct_wrap_fn,
16
+ _or_policy,
17
+ _Policy,
18
+ _post_order_apply,
19
+ _recursive_wrap,
20
+ _run_mixed_precision_override_policy,
21
+ _wrap_module_cls_individually,
22
+ )
23
+
24
+
25
+ def _auto_wrap(
26
+ root_module: nn.Module,
27
+ policy: Union[Callable, _Policy],
28
+ ignored_modules: Set[nn.Module],
29
+ ignored_params: Set[nn.Parameter],
30
+ root_kwargs: Dict[str, Any],
31
+ fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard`
32
+ ):
33
+ """
34
+ Auto wraps modules in ``root_module`` 's tree according to ``policy``
35
+ following a post-order traversal.
36
+
37
+ Precondition: ``root_kwargs`` should contain all arguments except
38
+ ``module``. This function accepts the kwargs dict directly since it gets
39
+ forwarded into the post-order traversal function.
40
+ """
41
+ mixed_precision = root_kwargs["mixed_precision"]
42
+ is_wrapper = inspect.isclass(fsdp_fn)
43
+ # TODO: We may relax this no-nested-wrapping constraint to support manual
44
+ # wrapping followed by auto wrapping.
45
+ _check_nested_wrapping(root_module)
46
+
47
+ if isinstance(policy, _Policy):
48
+ root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None
49
+ target_module_to_kwargs = policy._run_policy(
50
+ root_module, ignored_modules, root_kwargs
51
+ )
52
+ if mixed_precision is not None:
53
+ target_module_to_kwargs = _run_mixed_precision_override_policy(
54
+ root_module,
55
+ mixed_precision._module_classes_to_ignore,
56
+ ignored_modules,
57
+ root_kwargs,
58
+ target_module_to_kwargs,
59
+ )
60
+ overridden_module_classes = _override_module_mixed_precision(
61
+ root_module, mixed_precision._module_classes_to_ignore
62
+ )
63
+ _warn_on_overridden_mixed_precision(overridden_module_classes)
64
+ use_orig_params = root_kwargs.get("use_orig_params", False)
65
+ _validate_frozen_params(
66
+ root_module,
67
+ set(target_module_to_kwargs.keys()),
68
+ ignored_params,
69
+ use_orig_params,
70
+ )
71
+ wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn)
72
+ _post_order_apply(root_module, wrap_fn)
73
+ return
74
+
75
+ recursive_wrap_kwargs = {
76
+ "module": root_module,
77
+ "auto_wrap_policy": policy,
78
+ "wrapper_cls": fsdp_fn,
79
+ "ignored_modules": ignored_modules,
80
+ "ignored_params": ignored_params,
81
+ "only_wrap_children": True,
82
+ }
83
+ if mixed_precision is not None:
84
+ # Wrap modules of the ignored types separately and register forward
85
+ # hooks to cast to fp32 and back to the original dtype, respectively
86
+ overridden_module_classes = _override_module_mixed_precision(
87
+ root_module, mixed_precision._module_classes_to_ignore
88
+ )
89
+ policy = functools.partial(
90
+ _or_policy,
91
+ policies=[
92
+ policy,
93
+ partial(
94
+ _wrap_module_cls_individually,
95
+ module_classes=mixed_precision._module_classes_to_ignore,
96
+ ),
97
+ ],
98
+ )
99
+ recursive_wrap_kwargs["auto_wrap_policy"] = policy
100
+ _warn_on_overridden_mixed_precision(overridden_module_classes)
101
+ _recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type]
102
+
103
+
104
+ def _check_nested_wrapping(root_module: nn.Module):
105
+ for module_name, module in root_module.named_modules():
106
+ if _get_module_fsdp_state(module) is not None:
107
+ raise ValueError(
108
+ "FSDP auto wrapping requires modules to not already have "
109
+ f"FSDP applied but found {module_name} in\n{root_module}"
110
+ )
111
+
112
+
113
+ def _warn_on_overridden_mixed_precision(
114
+ overridden_module_classes: Set[Type[nn.Module]],
115
+ ):
116
+ if len(overridden_module_classes) == 0:
117
+ return
118
+ warnings.warn(
119
+ "Both mixed precision and an auto_wrap_policy were specified to FSDP, "
120
+ f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n"
121
+ "These modules will be wrapped as separate FSDP instacnes with mixed "
122
+ "precision disabled."
123
+ )
124
+
125
+
126
+ def _validate_frozen_params(
127
+ root_module: nn.Module,
128
+ modules_to_wrap: Set[nn.Module],
129
+ ignored_params: Set[nn.Parameter],
130
+ use_orig_params: bool,
131
+ ):
132
+ """
133
+ This checks that, given ``modules_to_wrap``, each module would manage
134
+ parameters that are uniformly frozen or non-frozen. This uniformity
135
+ requirement is strict for ``use_orig_params=False`` (hard error) and highly
136
+ recommended for ``use_orig_params=True`` (user warning).
137
+ """
138
+ post_order_named_modules = _get_post_order_named_modules(root_module)
139
+ visited_modules: Set[nn.Module] = set()
140
+ for module_name, module in post_order_named_modules:
141
+ if module in modules_to_wrap:
142
+ param_to_fqn = _get_managed_param_to_fqn(
143
+ module, ignored_params, visited_modules, module_name
144
+ )
145
+ frozen_param_fqns: List[str] = []
146
+ frozen_param_numel = 0
147
+ nonfrozen_param_fqns: List[str] = []
148
+ nonfrozen_param_numel = 0
149
+ for param, fqn in param_to_fqn.items():
150
+ if param.requires_grad:
151
+ nonfrozen_param_fqns.append(fqn)
152
+ nonfrozen_param_numel += param.numel()
153
+ else:
154
+ frozen_param_fqns.append(fqn)
155
+ frozen_param_numel += param.numel()
156
+ if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0:
157
+ msg = f"{module_name} has both parameters with requires_grad=True and False."
158
+ if use_orig_params:
159
+ total_param_numel = frozen_param_numel + nonfrozen_param_numel
160
+ msg += (
161
+ " We do not recommend wrapping such modules since "
162
+ "the gradient memory usage will be higher than expected "
163
+ f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel "
164
+ "before sharding via reduce-scatter). "
165
+ )
166
+ else:
167
+ msg += " FSDP does not support wrapping such modules when use_orig_params=False. "
168
+ msg += "If possible, wrap the frozen parameters with FSDP separately.\n"
169
+ msg += (
170
+ f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n"
171
+ f"The following parameters have requires_grad=False:\n{frozen_param_fqns}"
172
+ )
173
+ if use_orig_params:
174
+ warnings.warn(msg)
175
+ else:
176
+ raise ValueError(msg)
177
+
178
+
179
+ def _get_post_order_named_modules(
180
+ root_module: nn.Module,
181
+ ) -> List[Tuple[str, nn.Module]]:
182
+ """
183
+ This returns the named modules following a post-order traversal, which is a
184
+ valid reverse topological sort. We achieve this using the reverse of a
185
+ stack-based DFS order instead of reversing ``root_module.named_modules()``
186
+ since the former gives the modules in registration order at each level in
187
+ the module tree (as opposed to the reverse), which allows us to error/warn
188
+ on the first registered module that violates the condition.
189
+
190
+ For example, consider the following module structure:
191
+ M(
192
+ S1(),
193
+ S2(
194
+ SS1(),
195
+ SS2(),
196
+ ),
197
+ S3(),
198
+ )
199
+ The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse
200
+ ``named_modules()`` order is [S3, SS2, SS1, S2, S1, M].
201
+ """
202
+ visited_modules = {root_module}
203
+ stack = [("", root_module)]
204
+ # Append and reverse at the end for linear-time algorithm
205
+ reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = []
206
+ while stack:
207
+ module_name, module = stack.pop()
208
+ reverse_post_order_named_modules.append((module_name, module))
209
+ for child_module_name, child_module in module.named_children():
210
+ if child_module is None: # only for overrides of `named_children()`
211
+ continue
212
+ if child_module not in visited_modules:
213
+ visited_modules.add(child_module)
214
+ if module_name != "":
215
+ child_module_name = module_name + "." + child_module_name
216
+ stack.append((child_module_name, child_module))
217
+ post_order_named_modules = list(reversed(reverse_post_order_named_modules))
218
+ return post_order_named_modules
219
+
220
+
221
+ def _get_managed_param_to_fqn(
222
+ module_to_wrap: nn.Module,
223
+ ignored_params: Set[nn.Parameter],
224
+ visited_modules: Set[nn.Module],
225
+ root_prefix: str,
226
+ ) -> Dict[nn.Parameter, str]:
227
+ """
228
+ This returns a dict that maps managed parameter to its FQN for the given
229
+ ``module_to_wrap``. The dict's keys are exactly the parameters that would
230
+ be managed by the module, where this is achieved by calling this function
231
+ on the modules to wrap in reverse topological order, destructively updating
232
+ ``visited_modules``, and not traversing into those modules. The FQNs are
233
+ prefixed from the root (via ``root_prefix``) to be more informative.
234
+
235
+ NOTE: This function is meant to be called pre-wrapping and iteratively in
236
+ reverse topological order to cover the full module tree. This differs from
237
+ the ``_get_param_to_fqn()`` function meant to be called post-wrapping and
238
+ on the full module tree in one shot. Given those differences, we do not try
239
+ to unify the two.
240
+ """
241
+ param_to_fqn: Dict[nn.Parameter, str] = {}
242
+ # Run BFS (or any tree traversal works)
243
+ queue = collections.deque([(module_to_wrap, root_prefix)])
244
+ visited_modules.add(module_to_wrap)
245
+ while queue:
246
+ module, prefix = queue.popleft()
247
+ for param_name, param in module.named_parameters(recurse=False):
248
+ if param not in ignored_params:
249
+ fqn = param_name if prefix == "" else prefix + "." + param_name
250
+ param_to_fqn[param] = fqn
251
+ for child_module_name, child_module in module.named_children():
252
+ if child_module is None: # only for overrides of `named_children()`
253
+ continue
254
+ if child_module not in visited_modules:
255
+ visited_modules.add(child_module)
256
+ child_prefix = (
257
+ child_module_name
258
+ if prefix == ""
259
+ else prefix + "." + child_module_name
260
+ )
261
+ queue.append((child_module, child_prefix))
262
+ return param_to_fqn
venv/lib/python3.10/site-packages/torch/distributed/fsdp/api.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file includes public APIs for FSDP such as the classes used for the
3
+ constructor arguments.
4
+ """
5
+
6
+ from dataclasses import dataclass
7
+ from enum import auto, Enum
8
+
9
+ from typing import Optional, Sequence, Type
10
+
11
+ import torch
12
+ from torch.nn.modules.batchnorm import _BatchNorm
13
+
14
+ __all__ = [
15
+ "ShardingStrategy",
16
+ "BackwardPrefetch",
17
+ "MixedPrecision",
18
+ "CPUOffload",
19
+ "StateDictType",
20
+ "StateDictConfig",
21
+ "FullStateDictConfig",
22
+ "LocalStateDictConfig",
23
+ "ShardedStateDictConfig",
24
+ "OptimStateDictConfig",
25
+ "FullOptimStateDictConfig",
26
+ "LocalOptimStateDictConfig",
27
+ "ShardedOptimStateDictConfig",
28
+ "StateDictSettings",
29
+ ]
30
+
31
+
32
+ class ShardingStrategy(Enum):
33
+ """
34
+ This specifies the sharding strategy to be used for distributed training by
35
+ :class:`FullyShardedDataParallel`.
36
+
37
+ - ``FULL_SHARD``: Parameters, gradients, and optimizer states are sharded.
38
+ For the parameters, this strategy unshards (via all-gather) before the
39
+ forward, reshards after the forward, unshards before the backward
40
+ computation, and reshards after the backward computation. For gradients,
41
+ it synchronizes and shards them (via reduce-scatter) after the backward
42
+ computation. The sharded optimizer states are updated locally per rank.
43
+ - ``SHARD_GRAD_OP``: Gradients and optimizer states are sharded during
44
+ computation, and additionally, parameters are sharded outside
45
+ computation. For the parameters, this strategy unshards before the
46
+ forward, does not reshard them after the forward, and only reshards them
47
+ after the backward computation. The sharded optimizer states are updated
48
+ locally per rank. Inside ``no_sync()``, the parameters are not resharded
49
+ after the backward computation.
50
+ - ``NO_SHARD``: Parameters, gradients, and optimizer states are not sharded
51
+ but instead replicated across ranks similar to PyTorch's
52
+ :class:`DistributedDataParallel` API. For gradients, this strategy
53
+ synchronizes them (via all-reduce) after the backward computation. The
54
+ unsharded optimizer states are updated locally per rank.
55
+ - ``HYBRID_SHARD``: Apply ``FULL_SHARD`` within a node, and replicate parameters across
56
+ nodes. This results in reduced communication volume as expensive all-gathers and
57
+ reduce-scatters are only done within a node, which can be more performant for medium
58
+ -sized models.
59
+ - ``_HYBRID_SHARD_ZERO2``: Apply ``SHARD_GRAD_OP`` within a node, and replicate parameters across
60
+ nodes. This is like ``HYBRID_SHARD``, except this may provide even higher throughput
61
+ since the unsharded parameters are not freed after the forward pass, saving the
62
+ all-gathers in the pre-backward.
63
+ """
64
+
65
+ FULL_SHARD = auto()
66
+ SHARD_GRAD_OP = auto()
67
+ NO_SHARD = auto()
68
+ HYBRID_SHARD = auto()
69
+ _HYBRID_SHARD_ZERO2 = auto()
70
+
71
+
72
+ class BackwardPrefetch(Enum):
73
+ """
74
+ This configures explicit backward prefetching, which improves throughput by
75
+ enabling communication and computation overlap in the backward pass at the
76
+ cost of slightly increased memory usage.
77
+
78
+ - ``BACKWARD_PRE``: This enables the most overlap but increases memory
79
+ usage the most. This prefetches the next set of parameters *before* the
80
+ current set of parameters' gradient computation. This overlaps the *next
81
+ all-gather* and the *current gradient computation*, and at the peak, it
82
+ holds the current set of parameters, next set of parameters, and current
83
+ set of gradients in memory.
84
+ - ``BACKWARD_POST``: This enables less overlap but requires less memory
85
+ usage. This prefetches the next set of parameters *after* the current
86
+ set of parameters' gradient computation. This overlaps the *current
87
+ reduce-scatter* and the *next gradient computation*, and it frees the
88
+ current set of parameters before allocating memory for the next set of
89
+ parameters, only holding the next set of parameters and current set of
90
+ gradients in memory at the peak.
91
+ - FSDP's ``backward_prefetch`` argument accepts ``None``, which disables
92
+ the backward prefetching altogether. This has no overlap and does not
93
+ increase memory usage. In general, we do not recommend this setting since
94
+ it may degrade throughput significantly.
95
+
96
+ For more technical context: For a single process group using NCCL backend,
97
+ any collectives, even if issued from different streams, contend for the
98
+ same per-device NCCL stream, which implies that the relative order in which
99
+ the collectives are issued matters for overlapping. The two backward
100
+ prefetching values correspond to different issue orders.
101
+ """
102
+
103
+ # NOTE: For both modes, the ordering that defines "current" and "next" is
104
+ # not always exact in the current implementation. A mistargeted prefetch
105
+ # simply means that the parameter memory is allocated earlier than needed,
106
+ # possibly increasing peak memory usage, but does not affect correctness.
107
+ BACKWARD_PRE = auto()
108
+ BACKWARD_POST = auto()
109
+
110
+
111
+ @dataclass
112
+ class MixedPrecision:
113
+ """
114
+ This configures FSDP-native mixed precision training.
115
+
116
+ Attributes:
117
+ param_dtype (Optional[torch.dtype]): This specifies the dtype for model
118
+ parameters during forward and backward and thus the dtype for
119
+ forward and backward computation. Outside forward and backward, the
120
+ *sharded* parameters are kept in full precision (e.g. for the
121
+ optimizer step), and for model checkpointing, the parameters are
122
+ always saved in full precision. (Default: ``None``)
123
+ reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
124
+ gradient reduction (i.e. reduce-scatter or all-reduce). If this is
125
+ ``None`` but ``param_dtype`` is not ``None``, then this takes on
126
+ the ``param_dtype`` value, still running gradient reduction in low
127
+ precision. This is permitted to differ from ``param_dtype``, e.g.
128
+ to force gradient reduction to run in full precision. (Default:
129
+ ``None``)
130
+ buffer_dtype (Optional[torch.dtype]): This specifies the dtype for
131
+ buffers. FSDP does not shard buffers. Rather, FSDP casts them to
132
+ ``buffer_dtype`` in the first forward pass and keeps them in that
133
+ dtype thereafter. For model checkpointing, the buffers are saved
134
+ in full precision except for ``LOCAL_STATE_DICT``. (Default:
135
+ ``None``)
136
+ keep_low_precision_grads (bool): If ``False``, then FSDP upcasts
137
+ gradients to full precision after the backward pass in preparation
138
+ for the optimizer step. If ``True``, then FSDP keeps the gradients
139
+ in the dtype used for gradient reduction, which can save memory if
140
+ using a custom optimizer that supports running in low precision.
141
+ (Default: ``False``)
142
+ cast_forward_inputs (bool): If ``True``, then this FSDP module casts
143
+ its forward args and kwargs to ``param_dtype``. This is to ensure
144
+ that parameter and input dtypes match for forward computation, as
145
+ required by many ops. This may need to be set to ``True`` when only
146
+ applying mixed precision to some but not all FSDP modules, in which
147
+ case a mixed-precision FSDP submodule needs to recast its inputs.
148
+ (Default: ``False``)
149
+ cast_root_forward_inputs (bool): If ``True``, then the root FSDP module
150
+ casts its forward args and kwargs to ``param_dtype``, overriding
151
+ the value of ``cast_forward_inputs``. For non-root FSDP modules,
152
+ this does not do anything. (Default: ``True``)
153
+ _module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies
154
+ module classes to ignore for mixed precision when using an
155
+ ``auto_wrap_policy``: Modules of these classes will have FSDP
156
+ applied to them separately with mixed precision disabled (meaning
157
+ that the final FSDP construction would deviate from the specified
158
+ policy). If ``auto_wrap_policy`` is not specified, then this does
159
+ not do anything. This API is experimental and subject to change.
160
+ (Default: ``(_BatchNorm,)``)
161
+
162
+ .. note:: This API is experimental and subject to change.
163
+
164
+ .. note:: Only floating point tensors are cast to their specified dtypes.
165
+
166
+ .. note:: In ``summon_full_params``, parameters are forced to full
167
+ precision, but buffers are not.
168
+
169
+ .. note:: Layer norm and batch norm accumulate in ``float32`` even when
170
+ their inputs are in a low precision like ``float16`` or ``bfloat16``.
171
+ Disabling FSDP's mixed precision for those norm modules only means that
172
+ the affine parameters are kept in ``float32``. However, this incurs
173
+ separate all-gathers and reduce-scatters for those norm modules, which
174
+ may be inefficient, so if the workload permits, the user should prefer
175
+ to still apply mixed precision to those modules.
176
+
177
+ .. note:: By default, if the user passes a model with any ``_BatchNorm``
178
+ modules and specifies an ``auto_wrap_policy``, then the batch norm
179
+ modules will have FSDP applied to them separately with mixed precision
180
+ disabled. See the ``_module_classes_to_ignore`` argument.
181
+
182
+ .. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and
183
+ ``cast_forward_inputs=False`` by default. For the root FSDP instance,
184
+ its ``cast_root_forward_inputs`` takes precedence over its
185
+ ``cast_forward_inputs``. For non-root FSDP instances, their
186
+ ``cast_root_forward_inputs`` values are ignored. The default setting is
187
+ sufficient for the typical case where each FSDP instance has the same
188
+ ``MixedPrecision`` configuration and only needs to cast inputs to the
189
+ ``param_dtype`` at the beginning of the model's forward pass.
190
+
191
+ .. note:: For nested FSDP instances with different ``MixedPrecision``
192
+ configurations, we recommend setting individual ``cast_forward_inputs``
193
+ values to configure casting inputs or not before each instance's
194
+ forward. In such a case, since the casts happen before each FSDP
195
+ instance's forward, a parent FSDP instance should have its non-FSDP
196
+ submodules run before its FSDP submodules to avoid the activation dtype
197
+ being changed due to a different ``MixedPrecision`` configuration.
198
+
199
+ Example::
200
+
201
+ >>> # xdoctest: +SKIP("undefined variables")
202
+ >>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3))
203
+ >>> model[1] = FSDP(
204
+ >>> model[1],
205
+ >>> mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True),
206
+ >>> )
207
+ >>> model = FSDP(
208
+ >>> model,
209
+ >>> mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True),
210
+ >>> )
211
+
212
+ The above shows a working example. On the other hand, if ``model[1]``
213
+ were replaced with ``model[0]``, meaning that the submodule using
214
+ different ``MixedPrecision`` ran its forward first, then ``model[1]``
215
+ would incorrectly see ``float16`` activations instead of ``bfloat16``
216
+ ones.
217
+
218
+ """
219
+
220
+ param_dtype: Optional[torch.dtype] = None
221
+ reduce_dtype: Optional[torch.dtype] = None
222
+ buffer_dtype: Optional[torch.dtype] = None
223
+ keep_low_precision_grads: bool = False
224
+ cast_forward_inputs: bool = False
225
+ cast_root_forward_inputs: bool = True
226
+ _module_classes_to_ignore: Sequence[Type[torch.nn.Module]] = (_BatchNorm,)
227
+
228
+
229
+ @dataclass
230
+ class CPUOffload:
231
+ """
232
+ This configures CPU offloading.
233
+
234
+ Attributes:
235
+ offload_params (bool): This specifies whether to offload parameters to
236
+ CPU when not involved in computation. If ``True``, then this
237
+ offloads gradients to CPU as well, meaning that the optimizer step
238
+ runs on CPU.
239
+ """
240
+
241
+ offload_params: bool = False
242
+
243
+
244
+ class StateDictType(Enum):
245
+ """
246
+ This enum indicates that which type of ``state_dict`` the FSDP module is
247
+ currently processing (returning or loading).
248
+ The default value is FULL_STATE_DICT to comply the PyTorch convention.
249
+ ..note::
250
+ FSDP currently supports three types of ``state_dict``:
251
+ 1. ``state_dict/load_state_dict`: this pair of APIs return and load
252
+ the non-sharded, unflattened parameters. The semantics is the
253
+ same as using DDP.
254
+ 2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return
255
+ and load local sharded, flattened parameters. The values returned
256
+ by ``_local_state_dict`` can be directly used by FSDP and is only
257
+ meaningful to FSDP (because parameters are flattened). Note that
258
+ these APIs are meant for use via the :func:`state_dict_type`
259
+ context manager as follows:
260
+ >>> # xdoctest: +SKIP("undefined variables")
261
+ >>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT):
262
+ ... state = fsdp.state_dict() # loads local state dict
263
+ 3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs
264
+ return and load sharded, unflattened parameters. The ``state_dict``
265
+ return by ``sharded_state_dict`` can be used by all other parallel
266
+ schemes (resharding may be required).
267
+ """
268
+
269
+ FULL_STATE_DICT = auto()
270
+ LOCAL_STATE_DICT = auto()
271
+ SHARDED_STATE_DICT = auto()
272
+
273
+
274
+ @dataclass
275
+ class StateDictConfig:
276
+ """
277
+ ``StateDictConfig`` is the base class for all ``state_dict`` configuration
278
+ classes. Users should instantiate a child class (e.g.
279
+ ``FullStateDictConfig``) in order to configure settings for the
280
+ corresponding ``state_dict`` type supported by FSDP.
281
+
282
+ Attributes:
283
+ offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict
284
+ values to CPU, and if ``False``, then FSDP keeps them on GPU.
285
+ (Default: ``False``)
286
+ """
287
+
288
+ offload_to_cpu: bool = False
289
+
290
+
291
+ @dataclass
292
+ class FullStateDictConfig(StateDictConfig):
293
+ """
294
+ ``FullStateDictConfig`` is a config class meant to be used with
295
+ ``StateDictType.FULL_STATE_DICT``. We recommend enabling both
296
+ ``offload_to_cpu=True`` and ``rank0_only=True`` when saving full state
297
+ dicts to save GPU memory and CPU memory, respectively. This config class
298
+ is meant to be used via the :func:`state_dict_type` context manager as
299
+ follows:
300
+
301
+ >>> # xdoctest: +SKIP("undefined variables")
302
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
303
+ >>> fsdp = FSDP(model, auto_wrap_policy=...)
304
+ >>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
305
+ >>> with FSDP.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg):
306
+ >>> state = fsdp.state_dict()
307
+ >>> # `state` will be empty on non rank 0 and contain CPU tensors on rank 0.
308
+ >>> # To reload checkpoint for inference, finetuning, transfer learning, etc:
309
+ >>> model = model_fn() # Initialize model in preparation for wrapping with FSDP
310
+ >>> if dist.get_rank() == 0:
311
+ >>> # Load checkpoint only on rank 0 to avoid memory redundancy
312
+ >>> state_dict = torch.load("my_checkpoint.pt")
313
+ >>> model.load_state_dict(state_dict)
314
+ >>> # All ranks initialize FSDP module as usual. `sync_module_states` argument
315
+ >>> # communicates loaded checkpoint states from rank 0 to rest of the world.
316
+ >>> fsdp = FSDP(model, device_id=torch.cuda.current_device(), auto_wrap_policy=..., sync_module_states=True)
317
+ >>> # After this point, all ranks have FSDP model with loaded checkpoint.
318
+
319
+ Attributes:
320
+ rank0_only (bool): If ``True``, then only rank 0 saves the full state
321
+ dict, and nonzero ranks save an empty dict. If ``False``, then all
322
+ ranks save the full state dict. (Default: ``False``)
323
+ """
324
+
325
+ rank0_only: bool = False
326
+
327
+
328
+ @dataclass
329
+ class LocalStateDictConfig(StateDictConfig):
330
+ pass
331
+
332
+
333
+ @dataclass
334
+ class ShardedStateDictConfig(StateDictConfig):
335
+ """
336
+ ``ShardedStateDictConfig`` is a config class meant to be used with
337
+ ``StateDictType.SHARDED_STATE_DICT``.
338
+
339
+ Attributes:
340
+ _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
341
+ as ``DTensor``, and if ``False``, then FSDP saves them as
342
+ ``ShardedTensor``. (Default: ``False``)
343
+
344
+ .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig`
345
+ and it is used by FSDP to determine the type of state dict values. Users should not
346
+ manually modify ``_use_dtensor``.
347
+ """
348
+
349
+ _use_dtensor: bool = False
350
+
351
+
352
+ @dataclass
353
+ class OptimStateDictConfig:
354
+ """
355
+ ``OptimStateDictConfig`` is the base class for all ``optim_state_dict``
356
+ configuration classes. Users should instantiate a child class (e.g.
357
+ ``FullOptimStateDictConfig``) in order to configure settings for the
358
+ corresponding ``optim_state_dict`` type supported by FSDP.
359
+
360
+ Attributes:
361
+ offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict's
362
+ tensor values to CPU, and if ``False``, then FSDP keeps them on the
363
+ original device (which is GPU unless parameter CPU offloading is
364
+ enabled). (Default: ``True``)
365
+ """
366
+
367
+ offload_to_cpu: bool = True
368
+
369
+
370
+ @dataclass
371
+ class FullOptimStateDictConfig(OptimStateDictConfig):
372
+ """
373
+ Attributes:
374
+ rank0_only (bool): If ``True``, then only rank 0 saves the full state
375
+ dict, and nonzero ranks save an empty dict. If ``False``, then all
376
+ ranks save the full state dict. (Default: ``False``)
377
+ """
378
+
379
+ rank0_only: bool = False
380
+
381
+
382
+ @dataclass
383
+ class LocalOptimStateDictConfig(OptimStateDictConfig):
384
+ offload_to_cpu: bool = False
385
+
386
+
387
+ @dataclass
388
+ class ShardedOptimStateDictConfig(OptimStateDictConfig):
389
+ """
390
+ ``ShardedOptimStateDictConfig`` is a config class meant to be used with
391
+ ``StateDictType.SHARDED_STATE_DICT``.
392
+
393
+ Attributes:
394
+ _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
395
+ as ``DTensor``, and if ``False``, then FSDP saves them as
396
+ ``ShardedTensor``. (Default: ``False``)
397
+
398
+ .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedOptimStateDictConfig`
399
+ and it is used by FSDP to determine the type of state dict values. Users should not
400
+ manually modify ``_use_dtensor``.
401
+ """
402
+
403
+ _use_dtensor: bool = False
404
+
405
+
406
+ @dataclass
407
+ class StateDictSettings:
408
+ state_dict_type: StateDictType
409
+ state_dict_config: StateDictConfig
410
+ optim_state_dict_config: OptimStateDictConfig
venv/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py ADDED
@@ -0,0 +1,2075 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import copy
5
+ import functools
6
+ import math
7
+ import traceback
8
+ import warnings
9
+ from contextlib import contextmanager
10
+ from enum import auto, Enum
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ Generator,
16
+ Iterable,
17
+ Iterator,
18
+ List,
19
+ Optional,
20
+ Tuple,
21
+ Union,
22
+ )
23
+
24
+ import torch
25
+ import torch.distributed as dist
26
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
27
+ import torch.nn as nn
28
+ from torch.distributed._tensor import DeviceMesh
29
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
30
+ _CHECKPOINT_WRAPPED_MODULE,
31
+ ActivationWrapper,
32
+ )
33
+ from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS
34
+ from torch.distributed.fsdp._common_utils import (
35
+ _FSDPState,
36
+ _get_param_to_fqns,
37
+ FSDP_PREFIX,
38
+ FSDP_WRAPPED_MODULE,
39
+ TrainingState,
40
+ )
41
+ from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
42
+ from torch.distributed.fsdp._init_utils import (
43
+ _check_orig_params_flattened,
44
+ _init_buffer_state,
45
+ _init_core_state,
46
+ _init_device_handle,
47
+ _init_extension,
48
+ _init_ignored_module_states,
49
+ _init_param_handle_from_module,
50
+ _init_prefetching_state,
51
+ _init_process_group_state,
52
+ _init_runtime_state,
53
+ _init_state_dict_state,
54
+ HYBRID_SHARDING_STRATEGIES,
55
+ ProcessGroupType,
56
+ )
57
+ from torch.distributed.fsdp._runtime_utils import (
58
+ _get_fsdp_root_states,
59
+ _is_fsdp_root,
60
+ _lazy_init,
61
+ _post_forward,
62
+ _post_forward_reshard,
63
+ _pre_forward,
64
+ _pre_forward_unshard,
65
+ _root_pre_forward,
66
+ )
67
+ from torch.distributed.fsdp._wrap_utils import _auto_wrap
68
+ from torch.distributed.fsdp.api import (
69
+ BackwardPrefetch,
70
+ CPUOffload,
71
+ FullOptimStateDictConfig,
72
+ FullStateDictConfig,
73
+ LocalOptimStateDictConfig,
74
+ LocalStateDictConfig,
75
+ MixedPrecision,
76
+ OptimStateDictConfig,
77
+ ShardedOptimStateDictConfig,
78
+ ShardedStateDictConfig,
79
+ ShardingStrategy,
80
+ StateDictConfig,
81
+ StateDictSettings,
82
+ StateDictType,
83
+ )
84
+ from torch.distributed.utils import _p_assert
85
+ from ._flat_param import FlatParameter
86
+
87
+ from ._optim_utils import (
88
+ _flatten_optim_state_dict,
89
+ _get_param_id_to_param_from_optim_input,
90
+ _get_param_key_to_param,
91
+ _get_param_to_param_id_from_optim_input,
92
+ _get_param_to_param_key,
93
+ _optim_state_dict,
94
+ _rekey_sharded_optim_state_dict,
95
+ _set_optim_use_dtensor,
96
+ )
97
+ from ._state_dict_utils import _register_all_state_dict_hooks
98
+ from ._unshard_param_utils import (
99
+ _deregister_orig_params,
100
+ _register_flat_param,
101
+ _register_orig_params,
102
+ _unshard_params,
103
+ _unshard_params_recurse,
104
+ )
105
+ from .wrap import CustomPolicy, ModuleWrapPolicy
106
+
107
+
108
+ __all__ = [
109
+ "FullyShardedDataParallel",
110
+ "OptimStateKeyType",
111
+ ]
112
+
113
+
114
+ FLAT_PARAM = "_flat_param"
115
+
116
+
117
+ class OptimStateKeyType(Enum):
118
+ """Represents the type of key in an optimizer state-dict."""
119
+
120
+ PARAM_NAME = auto()
121
+ PARAM_ID = auto()
122
+
123
+
124
+ class FullyShardedDataParallel(nn.Module, _FSDPState):
125
+ """A wrapper for sharding module parameters across data parallel workers.
126
+
127
+ This is inspired by `Xu et al.`_ as well as the ZeRO Stage 3 from DeepSpeed_.
128
+ FullyShardedDataParallel is commonly shortened to FSDP.
129
+
130
+ .. _`Xu et al.`: https://arxiv.org/abs/2004.13336
131
+ .. _DeepSpeed: https://www.deepspeed.ai/
132
+
133
+ For advanced notes please refer to :ref:`fsdp_notes`.
134
+
135
+ Example::
136
+
137
+ >>> # xdoctest: +SKIP("undefined variables")
138
+ >>> import torch
139
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
140
+ >>> torch.cuda.set_device(device_id)
141
+ >>> sharded_module = FSDP(my_module)
142
+ >>> optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
143
+ >>> x = sharded_module(x, y=3, z=torch.Tensor([1]))
144
+ >>> loss = x.sum()
145
+ >>> loss.backward()
146
+ >>> optim.step()
147
+
148
+ .. warning::
149
+ The optimizer must be initialized *after* the module has been wrapped
150
+ with FSDP since FSDP will shard and transform the module's parameters
151
+ in a way that may not preserve the original parameter variables. Thus,
152
+ the previously initialized optimizer may have stale references to the
153
+ parameters.
154
+
155
+ .. warning::
156
+ If the destination CUDA device has ID ``dev_id``, either (1)
157
+ ``module`` should already be placed on that device, (2) the device
158
+ should be set using ``torch.cuda.set_device(dev_id)``, or (3)
159
+ ``dev_id`` should be passed into the ``device_id`` constructor
160
+ argument. This FSDP instance's compute device will be that destination
161
+ device. For (1) and (3), the FSDP initialization always occurs on GPU.
162
+ For (2), the FSDP initialization happens on ``module`` 's current
163
+ device, which may be CPU.
164
+
165
+ .. warning::
166
+ FSDP currently does not support gradient accumulation outside
167
+ ``no_sync()`` when using CPU offloading. Trying to do so yields
168
+ incorrect results since FSDP will use the newly-reduced gradient
169
+ instead of accumulating with any existing gradient.
170
+
171
+ .. warning::
172
+ Changing the original parameter variable names after construction will
173
+ lead to undefined behavior.
174
+
175
+ .. warning::
176
+ Passing in the ``sync_module_states=True`` flag requires ``module`` to
177
+ be on GPU or to use the ``device_id`` argument to specify a CUDA device
178
+ that FSDP will move ``module`` to in the FSDP constructor. This is
179
+ because ``sync_module_states=True`` requires GPU communication.
180
+
181
+ .. warning::
182
+ As of PyTorch 1.12, FSDP only offers limited support for shared parameters
183
+ (for example, setting one ``Linear`` layer's weight to another's). In
184
+ particular, modules that share parameters must be wrapped as part of the
185
+ same FSDP unit. If enhanced shared parameter support is needed for your
186
+ use case, please ping https://github.com/pytorch/pytorch/issues/77724
187
+
188
+ .. warning::
189
+ FSDP has some constraints on freezing parameters (i.e. setting
190
+ ``param.requires_grad=False``). For ``use_orig_params=False``, each
191
+ FSDP instance must manage parameters that are all frozen or all
192
+ non-frozen. For ``use_orig_params=True``, FSDP supports mixing frozen
193
+ and non-frozen, but we recommend not doing so since then the gradient
194
+ memory usage will be higher than expected (namely, equivalent to not
195
+ freezing those parameters). This means that ideally, frozen parameters
196
+ should be isolated into their own ``nn.Module`` s and wrapped
197
+ separately with FSDP.
198
+
199
+ .. note::
200
+ Attempting to run the forward pass of a submodule that is contained in an
201
+ FSDP instance is not supported and will result in errors. This is because the
202
+ submodule's parameters will be sharded, but it itself is not an FSDP instance,
203
+ so its forward pass will not all-gather the full parameters appropriately.
204
+ This could potentially happen when attempting to run only the encoder of a
205
+ encoder-decoder model, and the encoder is not wrapped in its own FSDP instance. To
206
+ resolve this, please wrap the submodule in its own FSDP unit.
207
+
208
+ .. note::
209
+ FSDP moves input tensors to the ``forward`` method to the GPU compute
210
+ device, so the user does not need to manually move them from CPU.
211
+
212
+ .. warning::
213
+ The user should not modify the parameters between forward and backward
214
+ without using the :meth:`summon_full_params` context since the
215
+ modifications may not persist. Moreover, for ``use_orig_params=False``,
216
+ accessing the original parameters between forward and backward may
217
+ raise an illegal memory access.
218
+
219
+ .. warning::
220
+ For ``use_orig_params=True``, ``ShardingStrategy.SHARD_GRAD_OP``
221
+ exposes the unsharded parameters, not the sharded parameters, after
222
+ forward since it does not free the unsharded ones, unlike
223
+ ``ShardingStrategy.FULL_SHARD``. One caveat is that, since gradients
224
+ are always sharded or ``None``, ``ShardingStrategy.SHARD_GRAD_OP`` will
225
+ not expose the sharded gradients with the unsharded parameters after
226
+ forward. If you want to inspect the gradients, try
227
+ :meth:`summon_full_params` with ``with_grads=True``.
228
+
229
+ .. warning::
230
+ FSDP replaces managed modules' parameters with ``torch.Tensor`` views
231
+ during forward and backward computation for autograd-related reasons.
232
+ If your module's forward relies on saved references to the parameters
233
+ instead of reacquiring the references each iteration, then it will not
234
+ see FSDP's newly created views, and autograd will not work correctly.
235
+
236
+ .. note::
237
+ With ``limit_all_gathers=True``, you may see a gap in the FSDP
238
+ pre-forward where the CPU thread is not issuing any kernels. This is
239
+ intentional and shows the rate limiter in effect. Synchronizing the CPU
240
+ thread in that way prevents over-allocating memory for subsequent
241
+ all-gathers, and it should not actually delay GPU kernel execution.
242
+
243
+ .. note::
244
+ When using ``sharding_strategy=ShardingStrategy.HYBRID_SHARD`` with the
245
+ sharding process group being intra-node and the replication process
246
+ group being inter-node, setting ``NCCL_CROSS_NIC=1`` can help improve
247
+ the all-reduce times over the replication process group for some
248
+ cluster setups.
249
+
250
+ .. warning::
251
+ FSDP does not work with double backwards due to how it registers
252
+ backward hooks.
253
+
254
+ Args:
255
+ module (nn.Module):
256
+ This is the module to be wrapped with FSDP.
257
+ process_group (Optional[Union[ProcessGroup, Tuple[ProcessGroup, ProcessGroup]]]):
258
+ This is the process group over which the model is sharded and thus
259
+ the one used for FSDP's all-gather and reduce-scatter collective
260
+ communications. If ``None``, then FSDP uses the default process
261
+ group. For hybrid sharding strategies such as
262
+ ``ShardingStrategy.HYBRID_SHARD``, users can pass in a tuple of
263
+ process groups, representing the groups over which to shard and
264
+ replicate, respectively. If ``None``, then FSDP constructs process
265
+ groups for the user to shard intra-node and replicate inter-node.
266
+ (Default: ``None``)
267
+ sharding_strategy (Optional[ShardingStrategy]):
268
+ This configures the sharding strategy, which may trade off memory
269
+ saving and communication overhead. See :class:`ShardingStrategy`
270
+ for details. (Default: ``FULL_SHARD``)
271
+ cpu_offload (Optional[CPUOffload]):
272
+ This configures CPU offloading. If this is set to ``None``, then
273
+ no CPU offloading happens. See :class:`CPUOffload` for details.
274
+ (Default: ``None``)
275
+ auto_wrap_policy (Optional[Union[Callable[[nn.Module, bool, int], bool], ModuleWrapPolicy, CustomPolicy]]):
276
+ This specifies a policy to apply FSDP to submodules of ``module``,
277
+ which is needed for communication and computation overlap and thus
278
+ affects performance. If ``None``, then FSDP only applies to
279
+ ``module``, and users should manually apply FSDP to parent modules
280
+ themselves (proceeding bottom-up). For convenience, this accepts
281
+ ``ModuleWrapPolicy`` directly, which allows users to specify the
282
+ module classes to wrap (e.g. the transformer block). Otherwise,
283
+ this should be a callable that takes in three arguments
284
+ ``module: nn.Module``, ``recurse: bool``, and
285
+ ``nonwrapped_numel: int`` and should return a ``bool`` specifying
286
+ whether the passed-in ``module`` should have FSDP applied if
287
+ ``recurse=False`` or if the traversal should continue into the
288
+ module's subtree if ``recurse=True``. Users may add additional
289
+ arguments to the callable. The ``size_based_auto_wrap_policy`` in
290
+ ``torch.distributed.fsdp.wrap.py`` gives an example callable that
291
+ applies FSDP to a module if the parameters in its subtree exceed
292
+ 100M numel. We recommend printing the model after applying FSDP
293
+ and adjusting as needed.
294
+
295
+ Example::
296
+
297
+ >>> def custom_auto_wrap_policy(
298
+ >>> module: nn.Module,
299
+ >>> recurse: bool,
300
+ >>> nonwrapped_numel: int,
301
+ >>> # Additional custom arguments
302
+ >>> min_num_params: int = int(1e8),
303
+ >>> ) -> bool:
304
+ >>> return nonwrapped_numel >= min_num_params
305
+ >>> # Configure a custom `min_num_params`
306
+ >>> my_auto_wrap_policy = functools.partial(custom_auto_wrap_policy, min_num_params=int(1e5))
307
+
308
+ backward_prefetch (Optional[BackwardPrefetch]):
309
+ This configures explicit backward prefetching of all-gathers. If
310
+ ``None``, then FSDP does not backward prefetch, and there is no
311
+ communication and computation overlap in the backward pass. See
312
+ :class:`BackwardPrefetch` for details. (Default: ``BACKWARD_PRE``)
313
+ mixed_precision (Optional[MixedPrecision]):
314
+ This configures native mixed precision for FSDP. If this is set to
315
+ ``None``, then no mixed precision is used. Otherwise, parameter,
316
+ buffer, and gradient reduction dtypes can be set. See
317
+ :class:`MixedPrecision` for details. (Default: ``None``)
318
+ ignored_modules (Optional[Iterable[torch.nn.Module]]): Modules whose
319
+ own parameters and child modules' parameters and buffers are
320
+ ignored by this instance. None of the modules directly in
321
+ ``ignored_modules`` should be :class:`FullyShardedDataParallel`
322
+ instances, and any child modules that are already-constructed
323
+ :class:`FullyShardedDataParallel` instances will not be ignored if
324
+ they are nested under this instance. This argument may be used to
325
+ avoid sharding specific parameters at module granularity when using an
326
+ ``auto_wrap_policy`` or if parameters' sharding is not managed by
327
+ FSDP. (Default: ``None``)
328
+ param_init_fn (Optional[Callable[[nn.Module], None]]):
329
+ A ``Callable[torch.nn.Module] -> None`` that
330
+ specifies how modules that are currently on the meta device should
331
+ be initialized onto an actual device. As of v1.12, FSDP detects
332
+ modules with parameters or buffers on meta device via ``is_meta``
333
+ and either applies ``param_init_fn`` if specified or calls
334
+ ``nn.Module.reset_parameters()`` otherwise. For both cases, the
335
+ implementation should *only* initialize the parameters/buffers of
336
+ the module, not those of its submodules. This is to avoid
337
+ re-initialization. In addition, FSDP also supports deferred
338
+ initialization via torchdistX's (https://github.com/pytorch/torchdistX)
339
+ ``deferred_init()`` API, where the deferred modules are initialized
340
+ by calling ``param_init_fn`` if specified or torchdistX's default
341
+ ``materialize_module()`` otherwise. If ``param_init_fn`` is
342
+ specified, then it is applied to all meta-device modules, meaning
343
+ that it should probably case on the module type. FSDP calls the
344
+ initialization function before parameter flattening and sharding.
345
+
346
+ Example::
347
+
348
+ >>> # xdoctest: +SKIP("undefined variables")
349
+ >>> module = MyModule(device="meta")
350
+ >>> def my_init_fn(module: nn.Module):
351
+ >>> # E.g. initialize depending on the module type
352
+ >>> ...
353
+ >>> fsdp_model = FSDP(module, param_init_fn=my_init_fn, auto_wrap_policy=size_based_auto_wrap_policy)
354
+ >>> print(next(fsdp_model.parameters()).device) # current CUDA device
355
+ >>> # With torchdistX
356
+ >>> module = deferred_init.deferred_init(MyModule, device="cuda")
357
+ >>> # Will initialize via deferred_init.materialize_module().
358
+ >>> fsdp_model = FSDP(module, auto_wrap_policy=size_based_auto_wrap_policy)
359
+
360
+ device_id (Optional[Union[int, torch.device]]): An ``int`` or
361
+ ``torch.device`` giving the CUDA device on which FSDP
362
+ initialization takes place, including the module initialization
363
+ if needed and the parameter sharding. This should be specified to
364
+ improve initialization speed if ``module`` is on CPU. If the
365
+ default CUDA device was set (e.g. via ``torch.cuda.set_device``),
366
+ then the user may pass ``torch.cuda.current_device`` to this.
367
+ (Default: ``None``)
368
+ sync_module_states (bool): If ``True``, then each FSDP module will
369
+ broadcast module parameters and buffers from rank 0 to ensure that
370
+ they are replicated across ranks (adding communication overhead to
371
+ this constructor). This can help load ``state_dict`` checkpoints
372
+ via ``load_state_dict`` in a memory efficient way. See
373
+ :class:`FullStateDictConfig` for an example of this. (Default:
374
+ ``False``)
375
+ forward_prefetch (bool): If ``True``, then FSDP *explicitly* prefetches
376
+ the next forward-pass all-gather before the current forward
377
+ computation. This is only useful for CPU-bound workloads, in which
378
+ case issuing the next all-gather earlier may improve overlap. This
379
+ should only be used for static-graph models since the prefetching
380
+ follows the first iteration's execution order. (Default: ``False``)
381
+ limit_all_gathers (bool): If ``True``, then FSDP explicitly
382
+ synchronizes the CPU thread to ensure GPU memory usage from only
383
+ *two* consecutive FSDP instances (the current instance running
384
+ computation and the next instance whose all-gather is prefetched).
385
+ If ``False``, then FSDP allows the CPU thread to issue all-gathers
386
+ without any extra synchronization. (Default: ``True``) We often
387
+ refer to this feature as the "rate limiter". This flag should only
388
+ be set to ``False`` for specific CPU-bound workloads with low
389
+ memory pressure in which case the CPU thread can aggressively issue
390
+ all kernels without concern for the GPU memory usage.
391
+ use_orig_params (bool): Setting this to ``True`` has FSDP use
392
+ ``module`` 's original parameters. FSDP exposes those original
393
+ parameters to the user via :meth:`nn.Module.named_parameters`
394
+ instead of FSDP's internal :class:`FlatParameter` s. This means
395
+ that the optimizer step runs on the original parameters, enabling
396
+ per-original-parameter hyperparameters. FSDP preserves the original
397
+ parameter variables and manipulates their data between unsharded
398
+ and sharded forms, where they are always views into the underlying
399
+ unsharded or sharded :class:`FlatParameter`, respectively. With the
400
+ current algorithm, the sharded form is always 1D, losing the
401
+ original tensor structure. An original parameter may have all,
402
+ some, or none of its data present for a given rank. In the none
403
+ case, its data will be like a size-0 empty tensor. Users should not
404
+ author programs relying on what data is present for a given
405
+ original parameter in its sharded form. ``True`` is required to
406
+ use ``torch.compile()``. Setting this to ``False`` exposes FSDP's
407
+ internal :class:`FlatParameter` s to the user via
408
+ :meth:`nn.Module.named_parameters`. (Default: ``False``)
409
+ ignored_states (Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]):
410
+ Ignored parameters or modules that will not be managed by this FSDP
411
+ instance, meaning that the parameters are not sharded and their
412
+ gradients are not reduced across ranks. This argument unifies with
413
+ the existing ``ignored_modules`` argument, and we may deprecate
414
+ ``ignored_modules`` soon. For backward compatibility, we keep both
415
+ ``ignored_states`` and `ignored_modules``, but FSDP only allows one
416
+ of them to be specified as not ``None``.
417
+ """
418
+
419
+ def __init__(
420
+ self,
421
+ module: nn.Module,
422
+ process_group: ProcessGroupType = None,
423
+ sharding_strategy: Optional[ShardingStrategy] = None,
424
+ cpu_offload: Optional[CPUOffload] = None,
425
+ auto_wrap_policy: Optional[
426
+ Union[Callable, ModuleWrapPolicy, CustomPolicy]
427
+ ] = None,
428
+ backward_prefetch: Optional[BackwardPrefetch] = BackwardPrefetch.BACKWARD_PRE,
429
+ mixed_precision: Optional[MixedPrecision] = None,
430
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
431
+ param_init_fn: Optional[Callable[[nn.Module], None]] = None,
432
+ device_id: Optional[Union[int, torch.device]] = None,
433
+ sync_module_states: bool = False,
434
+ forward_prefetch: bool = False,
435
+ limit_all_gathers: bool = True,
436
+ use_orig_params: bool = False,
437
+ ignored_states: Union[
438
+ Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
439
+ ] = None,
440
+ device_mesh: Optional[DeviceMesh] = None,
441
+ ):
442
+ torch._C._log_api_usage_once("torch.distributed.fsdp")
443
+ super().__init__()
444
+ _init_ignored_module_states(self, module, ignored_modules, ignored_states)
445
+ _init_device_handle(self, module, self._ignored_params, device_id)
446
+
447
+ # Add module annotations for Dynamo support (see function for details)
448
+ _annotate_modules_for_dynamo(module, self._ignored_modules, use_orig_params)
449
+
450
+ # Initializes self.process_group, along with rank and world size. This will
451
+ # also set another attribute, _inter_node_pg, to control the process group
452
+ # over which sharding occurs, if sharding_strategy is {HYBRID_SHARD, _HYBRID_SHARD_ZERO2}.
453
+ # Note that this is done before auto_wrapping, so that child FSDP modules simply pick up
454
+ # the same process group state as the root FSDP module.
455
+ self._device_mesh = device_mesh
456
+ _init_process_group_state(
457
+ self,
458
+ process_group,
459
+ sharding_strategy,
460
+ auto_wrap_policy,
461
+ device_mesh,
462
+ )
463
+ if auto_wrap_policy is not None:
464
+ root_kwargs = {
465
+ "process_group": process_group,
466
+ "sharding_strategy": sharding_strategy,
467
+ "cpu_offload": cpu_offload,
468
+ "backward_prefetch": backward_prefetch,
469
+ "mixed_precision": mixed_precision,
470
+ "param_init_fn": param_init_fn,
471
+ "device_id": device_id,
472
+ "sync_module_states": sync_module_states,
473
+ "forward_prefetch": forward_prefetch,
474
+ "limit_all_gathers": limit_all_gathers,
475
+ "use_orig_params": use_orig_params,
476
+ "ignored_states": self._ignored_params,
477
+ "device_mesh": device_mesh,
478
+ }
479
+ if sharding_strategy in HYBRID_SHARDING_STRATEGIES and device_mesh is None:
480
+ # Share root process groups with children to maintain
481
+ # the invariant that all FSDP modules will have the same
482
+ # process groups.
483
+ root_kwargs["process_group"] = (self.process_group, self._inter_node_pg)
484
+
485
+ _auto_wrap(
486
+ module,
487
+ auto_wrap_policy,
488
+ self._ignored_modules,
489
+ self._ignored_params,
490
+ root_kwargs,
491
+ FullyShardedDataParallel,
492
+ )
493
+
494
+ backward_prefetch_limit = 1
495
+ forward_prefetch_limit = 1
496
+ _init_core_state(
497
+ self,
498
+ sharding_strategy,
499
+ mixed_precision,
500
+ cpu_offload,
501
+ limit_all_gathers,
502
+ use_orig_params,
503
+ backward_prefetch_limit,
504
+ forward_prefetch_limit,
505
+ )
506
+ _init_runtime_state(self)
507
+ _init_prefetching_state(self, backward_prefetch, forward_prefetch)
508
+ _init_buffer_state(self, module)
509
+ # extension needs to be set before `_init_param_handle_from_module()`
510
+ _init_extension(self, device_mesh)
511
+ _init_param_handle_from_module(
512
+ self,
513
+ module,
514
+ device_id,
515
+ param_init_fn,
516
+ sync_module_states,
517
+ )
518
+ self._fsdp_wrapped_module = module
519
+ if not use_orig_params:
520
+ _check_orig_params_flattened(self, self._ignored_params)
521
+ _register_flat_param(self, self)
522
+
523
+ # `_state_dict_type` controls the `state_dict()` behavior, which is
524
+ # implemented using post-save and pre-load hooks
525
+ _init_state_dict_state(self)
526
+ _register_all_state_dict_hooks(self)
527
+
528
+ @property
529
+ def module(self) -> nn.Module:
530
+ """Return the wrapped module."""
531
+ # FSDP's `.module` must refer to the innermost wrapped module when
532
+ # composing with other module wrappers in order for state dict to work
533
+ if isinstance(self._fsdp_wrapped_module, ActivationWrapper):
534
+ return getattr(self._fsdp_wrapped_module, _CHECKPOINT_WRAPPED_MODULE)
535
+ return self._fsdp_wrapped_module
536
+
537
+ @property
538
+ def _has_params(self) -> bool:
539
+ """Returns whether this FSDP instance manages any parameters."""
540
+ return hasattr(self, "_handle") and self._handle is not None
541
+
542
+ @property
543
+ def _flat_param(self) -> Optional[FlatParameter]:
544
+ return self._handle.flat_param if self._handle else None
545
+
546
+ def __getattr__(self, name: str) -> Any:
547
+ """Forward missing attributes to the wrapped module."""
548
+ try:
549
+ return super().__getattr__(name) # defer to nn.Module's logic
550
+ except AttributeError:
551
+ return getattr(self._fsdp_wrapped_module, name)
552
+
553
+ def __getitem__(self, key: int) -> Any:
554
+ """Forward indexing calls in case the module is an ``nn.Sequential``."""
555
+ if hasattr(self, FSDP_WRAPPED_MODULE):
556
+ return self._fsdp_wrapped_module.__getitem__(key) # type: ignore[operator]
557
+ return super().__getitem__(key)
558
+
559
+ def check_is_root(self) -> bool:
560
+ """Check if this instance is a root FSDP module."""
561
+ return _is_fsdp_root(self, self)
562
+
563
+ @staticmethod
564
+ def fsdp_modules(
565
+ module: nn.Module,
566
+ root_only: bool = False,
567
+ ) -> List["FullyShardedDataParallel"]:
568
+ """Return all nested FSDP instances.
569
+
570
+ This possibly includes ``module`` itself and only includes FSDP root modules if ``root_only=True``.
571
+
572
+ Args:
573
+ module (torch.nn.Module): Root module, which may or may not be an
574
+ ``FSDP`` module.
575
+ root_only (bool): Whether to return only FSDP root modules.
576
+ (Default: ``False``)
577
+
578
+ Returns:
579
+ List[FullyShardedDataParallel]: FSDP modules that are nested in
580
+ the input ``module``.
581
+ """
582
+ if root_only:
583
+ return _get_fsdp_root_states(module)
584
+ return traversal_utils._get_fsdp_states(module)
585
+
586
+ def apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel":
587
+ r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self.
588
+
589
+ Typical use includes initializing the parameters of a model (see also :ref:`nn-init-doc`).
590
+
591
+ Compared to ``torch.nn.Module.apply``, this version additionally gathers
592
+ the full parameters before applying ``fn``. It should not be called from
593
+ within another ``summon_full_params`` context.
594
+
595
+ Args:
596
+ fn (:class:`Module` -> None): function to be applied to each submodule
597
+
598
+ Returns:
599
+ Module: self
600
+ """
601
+ uninitialized = self._is_root is None
602
+ self._assert_state(TrainingState.IDLE)
603
+ # Use `_unshard_params_recurse()` with `recurse=False` instead of
604
+ # `_unshard_fsdp_state_params()` directly to perform lazy
605
+ # initialization, which is needed to initialize `FlatParameter`
606
+ # parameter attributes as required by the unshard logic
607
+ with _unshard_params_recurse(
608
+ self,
609
+ self,
610
+ recurse=False,
611
+ writeback=True,
612
+ rank0_only=False,
613
+ offload_to_cpu=False,
614
+ with_grads=False,
615
+ ):
616
+ ret = super().apply(fn)
617
+
618
+ # Reset lazy init called in `_unshard_params_recurse()` since `apply()`
619
+ # may have been called on FSDP instance that is not truly a root, in
620
+ # which case it will be incorrectly marked as one.
621
+ if uninitialized and self._is_root:
622
+ for module in traversal_utils._get_fsdp_states(self):
623
+ module._reset_lazy_init()
624
+
625
+ return ret
626
+
627
+ def _mixed_precision_enabled_for_buffers(self) -> bool:
628
+ """Return whether the user explicitly enabled buffer mixed precision.
629
+
630
+ NOTE: Unlike parameters and gradient reduction, buffer mixed precision
631
+ is applied at the FSDP instance level, not the ``FlatParameter`` level,
632
+ which may be different for the composable code path.
633
+ """
634
+ return self.mixed_precision.buffer_dtype is not None
635
+
636
+ def _low_precision_hook_enabled(self) -> bool:
637
+ """Whether a low precision hook is registered or not."""
638
+ return self._comm_hook is not None and self._comm_hook in LOW_PRECISION_HOOKS
639
+
640
+ def _reset_lazy_init(self) -> None:
641
+ """Reset instance so :func:`_lazy_init` will run on the next forward."""
642
+ self._is_root: Optional[bool] = None
643
+
644
+ @staticmethod
645
+ def set_state_dict_type(
646
+ module: nn.Module,
647
+ state_dict_type: StateDictType,
648
+ state_dict_config: Optional[StateDictConfig] = None,
649
+ optim_state_dict_config: Optional[OptimStateDictConfig] = None,
650
+ ) -> StateDictSettings:
651
+ """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module.
652
+
653
+ Also takes (optional) configuration for the model's and optimizer's state dict.
654
+ The target module does not have to be a FSDP module. If the target
655
+ module is a FSDP module, its ``state_dict_type`` will also be changed.
656
+
657
+ .. note:: This API should be called for only the top-level (root)
658
+ module.
659
+
660
+ .. note:: This API enables users to transparently use the conventional
661
+ ``state_dict`` API to take model checkpoints in cases where the
662
+ root FSDP module is wrapped by another ``nn.Module``. For example,
663
+ the following will ensure ``state_dict`` is called on all non-FSDP
664
+ instances, while dispatching into `sharded_state_dict` implementation
665
+ for FSDP:
666
+
667
+ Example::
668
+
669
+ >>> # xdoctest: +SKIP("undefined variables")
670
+ >>> model = DDP(FSDP(...))
671
+ >>> FSDP.set_state_dict_type(
672
+ >>> model,
673
+ >>> StateDictType.SHARDED_STATE_DICT,
674
+ >>> state_dict_config = ShardedStateDictConfig(offload_to_cpu=True),
675
+ >>> optim_state_dict_config = OptimStateDictConfig(offload_to_cpu=True),
676
+ >>> )
677
+ >>> param_state_dict = model.state_dict()
678
+ >>> optim_state_dict = FSDP.optim_state_dict(model, optim)
679
+
680
+ Args:
681
+ module (torch.nn.Module): Root module.
682
+ state_dict_type (StateDictType): the desired ``state_dict_type`` to set.
683
+ state_dict_config (Optional[StateDictConfig]): the configuration for the
684
+ target ``state_dict_type``.
685
+ optim_state_dict_config (Optional[OptimStateDictConfig]): the configuration
686
+ for the optimizer state dict.
687
+
688
+ Returns:
689
+ A StateDictSettings that include the previous state_dict type and
690
+ configuration for the module.
691
+ """
692
+ _state_dict_type_to_config = {
693
+ StateDictType.FULL_STATE_DICT: FullStateDictConfig,
694
+ StateDictType.LOCAL_STATE_DICT: LocalStateDictConfig,
695
+ StateDictType.SHARDED_STATE_DICT: ShardedStateDictConfig,
696
+ }
697
+ _optim_state_dict_type_to_config = {
698
+ StateDictType.FULL_STATE_DICT: FullOptimStateDictConfig,
699
+ StateDictType.LOCAL_STATE_DICT: LocalOptimStateDictConfig,
700
+ StateDictType.SHARDED_STATE_DICT: ShardedOptimStateDictConfig,
701
+ }
702
+
703
+ # Use the default config if a state_dict config is not set.
704
+ state_dict_config_type = _state_dict_type_to_config[state_dict_type]
705
+ optim_state_dict_config_type = _optim_state_dict_type_to_config[state_dict_type]
706
+ if state_dict_config is None:
707
+ state_dict_config = state_dict_config_type()
708
+ if optim_state_dict_config is None:
709
+ optim_state_dict_config = optim_state_dict_config_type()
710
+ if state_dict_config_type != type(state_dict_config):
711
+ raise RuntimeError(
712
+ f"Expected state_dict_config of type {state_dict_config_type} "
713
+ f"but got {type(state_dict_config)}"
714
+ )
715
+ if optim_state_dict_config_type != type(optim_state_dict_config):
716
+ raise RuntimeError(
717
+ f"Expected optim_state_dict_config of type {optim_state_dict_config_type} "
718
+ f"but got {type(optim_state_dict_config)}"
719
+ )
720
+
721
+ # Set the state_dict type and configurations.
722
+ prev_state_dict_type = None
723
+ prev_state_dict_config = None
724
+ prev_optim_state_dict_config = None
725
+ for submodule in traversal_utils._get_fsdp_states(module):
726
+ if prev_state_dict_type is None:
727
+ prev_state_dict_type = submodule._state_dict_type
728
+ else:
729
+ assert (
730
+ prev_state_dict_type == submodule._state_dict_type
731
+ ), "All FSDP modules should have the same state_dict_type."
732
+ if prev_state_dict_config is None:
733
+ prev_state_dict_config = submodule._state_dict_config
734
+ else:
735
+ assert isinstance(
736
+ submodule._state_dict_config, type(prev_state_dict_config)
737
+ ), "All FSDP modules must have the same type of state_dict_config."
738
+ if prev_optim_state_dict_config is None:
739
+ prev_optim_state_dict_config = submodule._optim_state_dict_config
740
+ else:
741
+ assert isinstance(
742
+ submodule._optim_state_dict_config,
743
+ type(prev_optim_state_dict_config),
744
+ ), "All FSDP modules must have the same type of optim_state_dict_config."
745
+
746
+ submodule._state_dict_type = state_dict_type
747
+ submodule._state_dict_config = state_dict_config
748
+ submodule._optim_state_dict_config = optim_state_dict_config
749
+
750
+ return StateDictSettings(
751
+ prev_state_dict_type, prev_state_dict_config, prev_optim_state_dict_config
752
+ )
753
+
754
+ @staticmethod
755
+ def get_state_dict_type(module: nn.Module) -> StateDictSettings:
756
+ """Get the state_dict_type and the corresponding configurations for the FSDP modules rooted at ``module``.
757
+
758
+ The target module does not have to be an FSDP module.
759
+
760
+ Returns:
761
+ A ``StateDictSettings`` containing the state_dict_type and
762
+ state_dict / optim_state_dict configs that are currently set.
763
+
764
+ Raises:
765
+ ``AssertionError`` if the ``StateDictSettings`` for different
766
+ FSDP submodules differ.
767
+ """
768
+ state_dict_settings: Optional[StateDictSettings] = None
769
+ for submodule in FullyShardedDataParallel.fsdp_modules(module):
770
+ if state_dict_settings is None:
771
+ state_dict_settings = StateDictSettings(
772
+ state_dict_type=submodule._state_dict_type,
773
+ state_dict_config=submodule._state_dict_config,
774
+ optim_state_dict_config=submodule._optim_state_dict_config,
775
+ )
776
+ _set_optim_use_dtensor(submodule, state_dict_settings)
777
+ else:
778
+ submodule_settings = StateDictSettings(
779
+ submodule._state_dict_type,
780
+ submodule._state_dict_config,
781
+ submodule._optim_state_dict_config,
782
+ )
783
+ assert state_dict_settings == submodule_settings, (
784
+ "All FSDP modules must have the same state dict settings."
785
+ f"Got {submodule_settings} and {state_dict_settings}."
786
+ )
787
+ _set_optim_use_dtensor(submodule, submodule_settings)
788
+ return state_dict_settings
789
+
790
+ @staticmethod
791
+ @contextlib.contextmanager
792
+ def state_dict_type(
793
+ module: nn.Module,
794
+ state_dict_type: StateDictType,
795
+ state_dict_config: Optional[StateDictConfig] = None,
796
+ optim_state_dict_config: Optional[OptimStateDictConfig] = None,
797
+ ) -> Generator:
798
+ """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module.
799
+
800
+ This context manager has the same functions as :meth:`set_state_dict_type`. Read the document of
801
+ :meth:`set_state_dict_type` for the detail.
802
+
803
+ Example::
804
+
805
+ >>> # xdoctest: +SKIP("undefined variables")
806
+ >>> model = DDP(FSDP(...))
807
+ >>> with FSDP.state_dict_type(
808
+ >>> model,
809
+ >>> StateDictType.SHARDED_STATE_DICT,
810
+ >>> ):
811
+ >>> checkpoint = model.state_dict()
812
+
813
+ Args:
814
+ module (torch.nn.Module): Root module.
815
+ state_dict_type (StateDictType): the desired ``state_dict_type`` to set.
816
+ state_dict_config (Optional[StateDictConfig]): the model ``state_dict``
817
+ configuration for the target ``state_dict_type``.
818
+ optim_state_dict_config (Optional[OptimStateDictConfig]): the optimizer
819
+ ``state_dict`` configuration for the target ``state_dict_type``.
820
+ """
821
+ prev_state_dict_settings = FullyShardedDataParallel.set_state_dict_type(
822
+ module,
823
+ state_dict_type,
824
+ state_dict_config,
825
+ optim_state_dict_config,
826
+ )
827
+ yield
828
+ FullyShardedDataParallel.set_state_dict_type(
829
+ module,
830
+ prev_state_dict_settings.state_dict_type,
831
+ prev_state_dict_settings.state_dict_config,
832
+ prev_state_dict_settings.optim_state_dict_config,
833
+ )
834
+
835
+ def forward(self, *args: Any, **kwargs: Any) -> Any:
836
+ """Run the forward pass for the wrapped module, inserting FSDP-specific pre- and post-forward sharding logic."""
837
+ handle = self._handle
838
+ with torch.autograd.profiler.record_function(
839
+ "FullyShardedDataParallel.forward"
840
+ ):
841
+ args, kwargs = _root_pre_forward(self, self, args, kwargs)
842
+ unused = None
843
+ args, kwargs = _pre_forward(
844
+ self,
845
+ handle,
846
+ _pre_forward_unshard,
847
+ self._fsdp_wrapped_module,
848
+ args,
849
+ kwargs,
850
+ )
851
+ if handle:
852
+ _p_assert(
853
+ handle.flat_param.device == self.compute_device,
854
+ "Expected `FlatParameter` to be on the compute device "
855
+ f"{self.compute_device} but got {handle.flat_param.device}",
856
+ )
857
+ output = self._fsdp_wrapped_module(*args, **kwargs)
858
+ return _post_forward(
859
+ self, handle, _post_forward_reshard, self, unused, output
860
+ )
861
+
862
+ @staticmethod
863
+ @contextlib.contextmanager
864
+ def summon_full_params(
865
+ module: nn.Module,
866
+ recurse: bool = True,
867
+ writeback: bool = True,
868
+ rank0_only: bool = False,
869
+ offload_to_cpu: bool = False,
870
+ with_grads: bool = False,
871
+ ) -> Generator:
872
+ r"""Expose full params for FSDP instances with this context manager.
873
+
874
+ Can be useful *after* forward/backward for a model to get
875
+ the params for additional processing or checking. It can take a non-FSDP
876
+ module and will summon full params for all contained FSDP modules as
877
+ well as their children, depending on the ``recurse`` argument.
878
+
879
+ .. note:: This can be used on inner FSDPs.
880
+ .. note:: This can *not* be used within a forward or backward pass. Nor
881
+ can forward and backward be started from within this context.
882
+ .. note:: Parameters will revert to their local shards after the context
883
+ manager exits, storage behavior is the same as forward.
884
+ .. note:: The full parameters can be modified, but only the portion
885
+ corresponding to the local param shard will persist after the
886
+ context manager exits (unless ``writeback=False``, in which case
887
+ changes will be discarded). In the case where FSDP does not shard
888
+ the parameters, currently only when ``world_size == 1``, or ``NO_SHARD``
889
+ config, the modification is persisted regardless of ``writeback``.
890
+ .. note:: This method works on modules which are not FSDP themselves but
891
+ may contain multiple independent FSDP units. In that case, the given
892
+ arguments will apply to all contained FSDP units.
893
+
894
+ .. warning:: Note that ``rank0_only=True`` in conjunction with
895
+ ``writeback=True`` is not currently supported and will raise an
896
+ error. This is because model parameter shapes would be different
897
+ across ranks within the context, and writing to them can lead to
898
+ inconsistency across ranks when the context is exited.
899
+
900
+ .. warning:: Note that ``offload_to_cpu`` and ``rank0_only=False`` will
901
+ result in full parameters being redundantly copied to CPU memory for
902
+ GPUs that reside on the same machine, which may incur the risk of
903
+ CPU OOM. It is recommended to use ``offload_to_cpu`` with
904
+ ``rank0_only=True``.
905
+
906
+ Args:
907
+ recurse (bool, Optional): recursively summon all params for nested
908
+ FSDP instances (default: True).
909
+ writeback (bool, Optional): if ``False``, modifications to params are
910
+ discarded after the context manager exits;
911
+ disabling this can be slightly more efficient (default: True)
912
+ rank0_only (bool, Optional): if ``True``, full parameters are
913
+ materialized on only global rank 0. This means that within the
914
+ context, only rank 0 will have full parameters and the other
915
+ ranks will have sharded parameters. Note that setting
916
+ ``rank0_only=True`` with ``writeback=True`` is not supported,
917
+ as model parameter shapes will be different across ranks
918
+ within the context, and writing to them can lead to
919
+ inconsistency across ranks when the context is exited.
920
+ offload_to_cpu (bool, Optional): If ``True``, full parameters are
921
+ offloaded to CPU. Note that this offloading currently only
922
+ occurs if the parameter is sharded (which is only not the case
923
+ for world_size = 1 or ``NO_SHARD`` config). It is recommended
924
+ to use ``offload_to_cpu`` with ``rank0_only=True`` to avoid
925
+ redundant copies of model parameters being offloaded to the same CPU memory.
926
+ with_grads (bool, Optional): If ``True``, gradients are also
927
+ unsharded with the parameters. Currently, this is only
928
+ supported when passing ``use_orig_params=True`` to the FSDP
929
+ constructor and ``offload_to_cpu=False`` to this method.
930
+ (Default: ``False``)
931
+ """
932
+ with _unshard_params(
933
+ module, recurse, writeback, rank0_only, offload_to_cpu, with_grads
934
+ ):
935
+ yield
936
+
937
+ @contextlib.contextmanager
938
+ def _deregister_orig_params_ctx(self):
939
+ """Deregister the original parameters and expose the :class:`FlatParameter`.
940
+
941
+ If a :class:`FlatParameter` is sharded, then
942
+ this refreshes the sharded views before exiting. This method should
943
+ only be called when using the original parameters.
944
+ """
945
+ _p_assert(
946
+ self._use_orig_params,
947
+ "`_deregister_orig_params_ctx()` should only be called when "
948
+ "`_use_orig_params=True`",
949
+ )
950
+ for fsdp_module in traversal_utils._get_fsdp_states(self):
951
+ _deregister_orig_params(fsdp_module, fsdp_module)
952
+ try:
953
+ yield
954
+ finally:
955
+ for fsdp_module in traversal_utils._get_fsdp_states(self):
956
+ _register_orig_params(fsdp_module, fsdp_module)
957
+
958
+ def _apply(self, *args, **kwargs):
959
+ """Deregister the original parameters and expose the :class:`FlatParameter` s before calling ``_apply()``."""
960
+ # When using the original parameters: Since (1) the `FlatParameter`s
961
+ # own the storage and (2) `_apply()` is the subroutine underlying the
962
+ # most common storage-changing ops like `to()` and `cuda()`, we
963
+ # override `_apply()` to have the storage change directly performed on
964
+ # the `FlatParameter`s instead of applying to the original parameters
965
+ # and then writing back to the `FlatParameter`s.
966
+ context = (
967
+ self._deregister_orig_params_ctx()
968
+ if self._use_orig_params
969
+ else contextlib.nullcontext()
970
+ )
971
+ with context:
972
+ return super()._apply(*args, **kwargs)
973
+
974
+ def named_buffers(
975
+ self,
976
+ *args,
977
+ **kwargs,
978
+ ) -> Iterator[Tuple[str, torch.Tensor]]:
979
+ """Return an iterator over module buffers, yielding both the name of the buffer and the buffer itself.
980
+
981
+ Intercepts buffer names and removes all occurrences of the FSDP-specific flattened buffer prefix
982
+ when inside the :meth:`summon_full_params` context manager.
983
+ """
984
+ should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS
985
+ for buffer_name, buffer in super().named_buffers(*args, **kwargs):
986
+ if should_clean_name:
987
+ # Remove any instances of the FSDP-specific prefix; there can
988
+ # be multiple in the case of nested FSDP modules
989
+ buffer_name = buffer_name.replace(FSDP_PREFIX, "")
990
+ yield (buffer_name, buffer)
991
+
992
+ def named_parameters(
993
+ self,
994
+ *args,
995
+ **kwargs,
996
+ ) -> Iterator[Tuple[str, torch.nn.Parameter]]:
997
+ """Return an iterator over module parameters, yielding both the name of the parameter and the parameter itself.
998
+
999
+ Intercepts parameter names and removes all occurrences of the FSDP-specific flattened parameter prefix
1000
+ when inside the :meth:`summon_full_params` context manager.
1001
+ """
1002
+ should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS
1003
+ for param_name, param in super().named_parameters(*args, **kwargs):
1004
+ if should_clean_name:
1005
+ # Remove any instances of the FSDP-specific prefix; there can
1006
+ # be multiple in the case of nested FSDP modules
1007
+ param_name = param_name.replace(FSDP_PREFIX, "")
1008
+ yield (param_name, param)
1009
+
1010
+ def _assert_state(self, state: Union[TrainingState, List[TrainingState]]) -> None:
1011
+ """Assert we are in the given state."""
1012
+ # Since assert can be turned off and this error checking
1013
+ # is really important, we use explicit error checking
1014
+ # and raise a ValueError if needed.
1015
+ if isinstance(state, TrainingState):
1016
+ state = [state]
1017
+ if self.training_state not in state:
1018
+ msg = (
1019
+ f"expected to be in states {state} but current state "
1020
+ f"is {self.training_state}"
1021
+ )
1022
+ # In case we are failing in the context of autograd hook, asserting
1023
+ # may not generate useful msg. So, let's print it to be sure.
1024
+ if self.rank == 0:
1025
+ print(f"Asserting FSDP instance is: {self}")
1026
+ print(f"ERROR: {msg}")
1027
+ traceback.print_stack()
1028
+ raise ValueError(msg)
1029
+
1030
+ @contextmanager
1031
+ def no_sync(self) -> Generator:
1032
+ """Disable gradient synchronizations across FSDP instances.
1033
+
1034
+ Within this context, gradients will be accumulated in module
1035
+ variables, which will later be synchronized in the first
1036
+ forward-backward pass after exiting the context. This should only be
1037
+ used on the root FSDP instance and will recursively apply to all
1038
+ children FSDP instances.
1039
+
1040
+ .. note:: This likely results in higher memory usage because FSDP will
1041
+ accumulate the full model gradients (instead of gradient shards)
1042
+ until the eventual sync.
1043
+
1044
+ .. note:: When used with CPU offloading, the gradients will not be
1045
+ offloaded to CPU when inside the context manager. Instead, they
1046
+ will only be offloaded right after the eventual sync.
1047
+ """
1048
+ _lazy_init(self, self)
1049
+ if not self._is_root:
1050
+ raise RuntimeError(
1051
+ "`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module."
1052
+ )
1053
+ self._assert_state(TrainingState.IDLE)
1054
+ old_flags = []
1055
+ for m in self.modules():
1056
+ if isinstance(m, FullyShardedDataParallel):
1057
+ old_flags.append((m, m._sync_gradients))
1058
+ m._sync_gradients = False
1059
+ try:
1060
+ yield
1061
+ finally:
1062
+ for m, old_flag in old_flags:
1063
+ assert not m._sync_gradients, (
1064
+ "`_sync_gradients` was incorrectly set to "
1065
+ "`True` while in the `no_sync()` context manager"
1066
+ )
1067
+ m._sync_gradients = old_flag
1068
+
1069
+ @torch.no_grad()
1070
+ def clip_grad_norm_(
1071
+ self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0
1072
+ ) -> torch.Tensor:
1073
+ """Clip the gradient norm of all parameters.
1074
+
1075
+ The norm is computed over all parameters' gradients as viewed as a single vector, and the
1076
+ gradients are modified in-place.
1077
+
1078
+ Args:
1079
+ max_norm (float or int): max norm of the gradients
1080
+ norm_type (float or int): type of the used p-norm. Can be ``'inf'``
1081
+ for infinity norm.
1082
+
1083
+ Returns:
1084
+ Total norm of the parameters (viewed as a single vector).
1085
+
1086
+ .. note:: If every FSDP instance uses ``NO_SHARD``, meaning that no
1087
+ gradients are sharded across ranks, then you may directly use
1088
+ :func:`torch.nn.utils.clip_grad_norm_`.
1089
+
1090
+ .. note:: If at least some FSDP instance uses a sharded strategy (i.e.
1091
+ one other than ``NO_SHARD``), then you should use this method
1092
+ instead of :func:`torch.nn.utils.clip_grad_norm_` since this method
1093
+ handles the fact that gradients are sharded across ranks.
1094
+
1095
+ .. note:: The total norm returned will have the "largest" dtype across
1096
+ all parameters/gradients as defined by PyTorch's type promotion
1097
+ semantics. For example, if *all* parameters/gradients use a low
1098
+ precision dtype, then the returned norm's dtype will be that low
1099
+ precision dtype, but if there exists at least one parameter/
1100
+ gradient using FP32, then the returned norm's dtype will be FP32.
1101
+
1102
+ .. warning:: This needs to be called on all ranks since it uses
1103
+ collective communications.
1104
+ """
1105
+ _lazy_init(self, self)
1106
+ if not self._is_root:
1107
+ raise RuntimeError(
1108
+ "`clip_grad_norm_()` should only be called on the root FSDP instance"
1109
+ )
1110
+ self._assert_state(TrainingState.IDLE)
1111
+ # If every FSDP instance uses `NO_SHARD`, then we can directly use
1112
+ # the normal `nn.utils` one targeting local gradients
1113
+ all_no_shard = all(
1114
+ not handle.uses_sharded_strategy for handle in self._all_handles
1115
+ )
1116
+ if all_no_shard:
1117
+ return torch.nn.utils.clip_grad_norm_(
1118
+ self.parameters(), max_norm, norm_type
1119
+ )
1120
+ # Otherwise, there exists some FSDP instance using a sharded strategy,
1121
+ # where sharded and non-sharded parameters must be handled separately
1122
+ max_norm = float(max_norm)
1123
+ norm_type = float(norm_type)
1124
+ sharded_params = set()
1125
+ nonsharded_params = set() # `NO_SHARD` or not FSDP-managed
1126
+ grads: List[torch.Tensor] = []
1127
+ for handle in self._all_handles:
1128
+ target_set = (
1129
+ sharded_params if handle.uses_sharded_strategy else nonsharded_params
1130
+ )
1131
+ if handle._use_orig_params:
1132
+ for param in handle.flat_param._params:
1133
+ target_set.add(param)
1134
+ if param.grad is not None:
1135
+ grads.append(param.grad)
1136
+ else:
1137
+ target_set.add(handle.flat_param)
1138
+ if handle.flat_param.grad is not None:
1139
+ grads.append(handle.flat_param.grad)
1140
+ for param in self.parameters():
1141
+ not_fsdp_managed = (
1142
+ param not in sharded_params and param not in nonsharded_params
1143
+ )
1144
+ if not_fsdp_managed:
1145
+ nonsharded_params.add(param)
1146
+ if param.grad is not None:
1147
+ grads.append(param.grad)
1148
+ # Compute local norms (forced to be in FP32)
1149
+ local_sharded_norm = _get_grad_norm(sharded_params, norm_type).to(
1150
+ self.compute_device
1151
+ )
1152
+ local_nonsharded_norm = _get_grad_norm(nonsharded_params, norm_type).to(
1153
+ self.compute_device
1154
+ )
1155
+ # Reconstruct the total gradient norm depending on the norm type
1156
+ if norm_type == math.inf:
1157
+ total_norm = torch.maximum(local_sharded_norm, local_nonsharded_norm)
1158
+ dist.all_reduce(
1159
+ total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group
1160
+ )
1161
+ else:
1162
+ total_norm = local_sharded_norm**norm_type
1163
+ dist.all_reduce(total_norm, group=self.process_group)
1164
+ # All-reducing the local non-sharded norm would count it an extra
1165
+ # world-size-many times
1166
+ total_norm += local_nonsharded_norm**norm_type
1167
+ total_norm = total_norm ** (1.0 / norm_type)
1168
+ if self.cpu_offload.offload_params:
1169
+ total_norm = total_norm.cpu()
1170
+
1171
+ clip_coef = max_norm / (total_norm + 1e-6)
1172
+ # Multiplying by the clamped coefficient is meaningless when it is
1173
+ # equal to 1, but it avoids the host-device sync that would result from
1174
+ # `if clip_coef < 1`
1175
+ clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
1176
+ for grad in grads:
1177
+ grad.mul_(clip_coef_clamped.to(grad.device, grad.dtype))
1178
+ # Use the "largest" dtype by type promotion semantics to use the same
1179
+ # dtype as if we did not force local norm computation to be in FP32
1180
+ if len(grads) == 0:
1181
+ # If this rank has no gradients, then we must default to FP32
1182
+ # unless we use additional communication, which we prefer to avoid
1183
+ # since `clip_grad_norm_()` is called in the training loop
1184
+ warnings.warn(
1185
+ f"Called FSDP.clip_grad_norm_() on rank {self.rank} with no "
1186
+ "gradients -- returning the total norm in the default dtype "
1187
+ f"{total_norm.dtype}"
1188
+ ) # warn since this is generally unexpected
1189
+ return total_norm
1190
+ total_norm_dtype = functools.reduce(
1191
+ torch.promote_types,
1192
+ [grad.dtype for grad in grads],
1193
+ )
1194
+ return total_norm.to(total_norm_dtype)
1195
+
1196
+ @staticmethod
1197
+ def _warn_optim_input(optim_input):
1198
+ if optim_input is not None:
1199
+ warnings.warn(
1200
+ "The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. You may remove it "
1201
+ "from your code without changing its functionality."
1202
+ )
1203
+
1204
+ @staticmethod
1205
+ def _is_using_optim_input(optim_input, optim) -> bool:
1206
+ if optim_input is None and optim is None:
1207
+ # Use the default behavior of `optim_input``
1208
+ return True
1209
+ if optim_input is not None:
1210
+ # Use the `optim_input` code path
1211
+ return True
1212
+ # Use the `optim` code path
1213
+ return False
1214
+
1215
+ @staticmethod
1216
+ def _warn_legacy_optim_state_dict(curr: str, new: str):
1217
+ warnings.warn(
1218
+ f"``FullyShardedDataParallel.{curr}``is being deprecated and is "
1219
+ f"replaced by ``FullyShardedDataParallel.{new}``. "
1220
+ f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2."
1221
+ )
1222
+
1223
+ @staticmethod
1224
+ def _optim_state_dict_impl(
1225
+ model: torch.nn.Module,
1226
+ optim: torch.optim.Optimizer,
1227
+ optim_state_dict: Dict[str, Any],
1228
+ optim_input: Optional[
1229
+ Union[
1230
+ List[Dict[str, Any]],
1231
+ Iterable[torch.nn.Parameter],
1232
+ ]
1233
+ ] = None,
1234
+ rank0_only: bool = True,
1235
+ full_state_dict: bool = True,
1236
+ group: Optional[dist.ProcessGroup] = None,
1237
+ cpu_offload: bool = True,
1238
+ ) -> Dict[str, Any]:
1239
+ """Transform the state-dict of an optimizer corresponding to a sharded model.
1240
+
1241
+ This is the internal API that is used by all the optim_state_dict implementations.
1242
+ Given model, optim, the original optim_state_dict, this API removes the
1243
+ FSDP internal information and internal sharding from the optim_state_dict.
1244
+ """
1245
+ if full_state_dict:
1246
+ FullyShardedDataParallel._warn_optim_input(optim_input)
1247
+ using_optim_input = FullyShardedDataParallel._is_using_optim_input(
1248
+ optim_input,
1249
+ optim,
1250
+ )
1251
+ else:
1252
+ using_optim_input = False
1253
+ assert optim_input is None and not rank0_only
1254
+
1255
+ use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[
1256
+ 0
1257
+ ]._use_orig_params
1258
+ assert all(
1259
+ use_orig_params == m._use_orig_params
1260
+ for m in FullyShardedDataParallel.fsdp_modules(model)
1261
+ ), "Not all FSDP modules have the same _use_orig_params value"
1262
+
1263
+ return _optim_state_dict(
1264
+ model=model,
1265
+ optim=optim,
1266
+ optim_state_dict=optim_state_dict,
1267
+ optim_input=optim_input,
1268
+ rank0_only=rank0_only,
1269
+ shard_state=not full_state_dict,
1270
+ group=group,
1271
+ using_optim_input=using_optim_input,
1272
+ use_orig_params=use_orig_params,
1273
+ cpu_offload=cpu_offload,
1274
+ )
1275
+
1276
+ @staticmethod
1277
+ def _optim_state_dict_to_load_impl(
1278
+ optim_state_dict: Dict[str, Any],
1279
+ model: torch.nn.Module,
1280
+ optim_input: Optional[
1281
+ Union[
1282
+ List[Dict[str, Any]],
1283
+ Iterable[torch.nn.Parameter],
1284
+ ]
1285
+ ] = None,
1286
+ optim: Optional[torch.optim.Optimizer] = None,
1287
+ full_state_dict: bool = True,
1288
+ rank0_only: bool = False,
1289
+ is_named_optimizer: bool = False,
1290
+ group: Optional[dist.ProcessGroup] = None,
1291
+ ) -> Dict[str, Any]:
1292
+ """
1293
+ Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model.
1294
+
1295
+ This is the internal API that is used by all the load optim_state_dict implementations.
1296
+ Given model, optim, and the saved optim_state_dict, this API adds the FSDP
1297
+ internal information and internal sharding to the optim_state_dict.
1298
+ """
1299
+ if full_state_dict:
1300
+ FullyShardedDataParallel._warn_optim_input(optim_input)
1301
+ using_optim_input = FullyShardedDataParallel._is_using_optim_input(
1302
+ optim_input,
1303
+ optim,
1304
+ )
1305
+ else:
1306
+ using_optim_input = False
1307
+ assert optim_input is None and not rank0_only
1308
+
1309
+ use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[
1310
+ 0
1311
+ ]._use_orig_params
1312
+ assert all(
1313
+ use_orig_params == m._use_orig_params
1314
+ for m in FullyShardedDataParallel.fsdp_modules(model)
1315
+ ), "Not all FSDP modules have the same _use_orig_params value"
1316
+
1317
+ if rank0_only and dist.get_rank(group) > 0:
1318
+ optim_state_dict = {}
1319
+ sharded_osd = _flatten_optim_state_dict(
1320
+ optim_state_dict,
1321
+ model=model,
1322
+ use_orig_params=use_orig_params,
1323
+ optim=(optim if is_named_optimizer else None),
1324
+ rank0_only=rank0_only,
1325
+ group=group,
1326
+ )
1327
+ return _rekey_sharded_optim_state_dict(
1328
+ sharded_osd,
1329
+ model=model,
1330
+ optim=optim,
1331
+ optim_input=optim_input,
1332
+ using_optim_input=using_optim_input,
1333
+ is_named_optimizer=is_named_optimizer,
1334
+ )
1335
+
1336
+ @staticmethod
1337
+ def full_optim_state_dict(
1338
+ model: torch.nn.Module,
1339
+ optim: torch.optim.Optimizer,
1340
+ optim_input: Optional[
1341
+ Union[
1342
+ List[Dict[str, Any]],
1343
+ Iterable[torch.nn.Parameter],
1344
+ ]
1345
+ ] = None,
1346
+ rank0_only: bool = True,
1347
+ group: Optional[dist.ProcessGroup] = None,
1348
+ ) -> Dict[str, Any]:
1349
+ """Return the full optimizer state-dict.
1350
+
1351
+ Consolidates the full optimizer state on rank 0 and returns it
1352
+ as a :class:`dict` following the convention of
1353
+ :meth:`torch.optim.Optimizer.state_dict`, i.e. with keys ``"state"``
1354
+ and ``"param_groups"``. The flattened parameters in ``FSDP`` modules
1355
+ contained in ``model`` are mapped back to their unflattened parameters.
1356
+
1357
+ .. warning:: This needs to be called on all ranks since it uses
1358
+ collective communications. However, if ``rank0_only=True``, then
1359
+ the state dict is only populated on rank 0, and all other ranks
1360
+ return an empty :class:`dict`.
1361
+
1362
+ .. warning:: Unlike ``torch.optim.Optimizer.state_dict()``, this method
1363
+ uses full parameter names as keys instead of parameter IDs.
1364
+
1365
+ .. note:: Like in :meth:`torch.optim.Optimizer.state_dict`, the tensors
1366
+ contained in the optimizer state dict are not cloned, so there may
1367
+ be aliasing surprises. For best practices, consider saving the
1368
+ returned optimizer state dict immediately, e.g. using
1369
+ ``torch.save()``.
1370
+
1371
+ Args:
1372
+ model (torch.nn.Module): Root module (which may or may not be a
1373
+ :class:`FullyShardedDataParallel` instance) whose parameters
1374
+ were passed into the optimizer ``optim``.
1375
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1376
+ parameters.
1377
+ optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
1378
+ Input passed into the optimizer ``optim`` representing either a
1379
+ :class:`list` of parameter groups or an iterable of parameters;
1380
+ if ``None``, then this method assumes the input was
1381
+ ``model.parameters()``. This argument is deprecated, and there
1382
+ is no need to pass it in anymore. (Default: ``None``)
1383
+ rank0_only (bool): If ``True``, saves the populated :class:`dict`
1384
+ only on rank 0; if ``False``, saves it on all ranks. (Default:
1385
+ ``True``)
1386
+ group (dist.ProcessGroup): Model's process group or ``None`` if using
1387
+ the default process group. (Default: ``None``)
1388
+
1389
+ Returns:
1390
+ Dict[str, Any]: A :class:`dict` containing the optimizer state for
1391
+ ``model`` 's original unflattened parameters and including keys
1392
+ "state" and "param_groups" following the convention of
1393
+ :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=True``,
1394
+ then nonzero ranks return an empty :class:`dict`.
1395
+ """
1396
+ FullyShardedDataParallel._warn_legacy_optim_state_dict(
1397
+ "full_optim_state_dict", "optim_state_dict"
1398
+ )
1399
+ return FullyShardedDataParallel._optim_state_dict_impl(
1400
+ model=model,
1401
+ optim=optim,
1402
+ optim_state_dict=optim.state_dict(),
1403
+ optim_input=optim_input,
1404
+ rank0_only=rank0_only,
1405
+ group=group,
1406
+ full_state_dict=True,
1407
+ )
1408
+
1409
+ @staticmethod
1410
+ def sharded_optim_state_dict(
1411
+ model: torch.nn.Module,
1412
+ optim: torch.optim.Optimizer,
1413
+ group: Optional[dist.ProcessGroup] = None,
1414
+ ) -> Dict[str, Any]:
1415
+ """Return the optimizer state-dict in its sharded form.
1416
+
1417
+ The API is similar to :meth:`full_optim_state_dict` but this API chunks
1418
+ all non-zero-dimension states to :class:`ShardedTensor` to save memory.
1419
+ This API should only be used when the model ``state_dict`` is derived
1420
+ with the context manager ``with state_dict_type(SHARDED_STATE_DICT):``.
1421
+
1422
+ For the detailed usage, refer to :meth:`full_optim_state_dict`.
1423
+
1424
+ .. warning:: The returned state dict contains ``ShardedTensor`` and
1425
+ cannot be directly used by the regular ``optim.load_state_dict``.
1426
+ """
1427
+ FullyShardedDataParallel._warn_legacy_optim_state_dict(
1428
+ "sharded_optim_state_dict", "optim_state_dict"
1429
+ )
1430
+ return FullyShardedDataParallel._optim_state_dict_impl(
1431
+ model=model,
1432
+ optim=optim,
1433
+ optim_state_dict=optim.state_dict(),
1434
+ optim_input=None,
1435
+ rank0_only=False,
1436
+ full_state_dict=False,
1437
+ group=group,
1438
+ )
1439
+
1440
+ @staticmethod
1441
+ def shard_full_optim_state_dict(
1442
+ full_optim_state_dict: Dict[str, Any],
1443
+ model: torch.nn.Module,
1444
+ optim_input: Optional[
1445
+ Union[
1446
+ List[Dict[str, Any]],
1447
+ Iterable[torch.nn.Parameter],
1448
+ ]
1449
+ ] = None,
1450
+ optim: Optional[torch.optim.Optimizer] = None,
1451
+ ) -> Dict[str, Any]:
1452
+ """Shard a full optimizer state-dict.
1453
+
1454
+ Remaps the state in ``full_optim_state_dict`` to flattened parameters instead of unflattened
1455
+ parameters and restricts to only this rank's part of the optimizer state.
1456
+ The first argument should be the return value of :meth:`full_optim_state_dict`.
1457
+
1458
+ Example::
1459
+
1460
+ >>> # xdoctest: +SKIP("undefined variables")
1461
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
1462
+ >>> model, optim = ...
1463
+ >>> full_osd = FSDP.full_optim_state_dict(model, optim)
1464
+ >>> torch.save(full_osd, PATH)
1465
+ >>> # Define new model with possibly different world size
1466
+ >>> new_model, new_optim = ...
1467
+ >>> full_osd = torch.load(PATH)
1468
+ >>> sharded_osd = FSDP.shard_full_optim_state_dict(full_osd, new_model)
1469
+ >>> new_optim.load_state_dict(sharded_osd)
1470
+
1471
+ .. note:: Both :meth:`shard_full_optim_state_dict` and
1472
+ :meth:`scatter_full_optim_state_dict` may be used to get the
1473
+ sharded optimizer state dict to load. Assuming that the full
1474
+ optimizer state dict resides in CPU memory, the former requires
1475
+ each rank to have the full dict in CPU memory, where each rank
1476
+ individually shards the dict without any communication, while the
1477
+ latter requires only rank 0 to have the full dict in CPU memory,
1478
+ where rank 0 moves each shard to GPU memory (for NCCL) and
1479
+ communicates it to ranks appropriately. Hence, the former has
1480
+ higher aggregate CPU memory cost, while the latter has higher
1481
+ communication cost.
1482
+
1483
+ Args:
1484
+ full_optim_state_dict (Dict[str, Any]): Optimizer state dict
1485
+ corresponding to the unflattened parameters and holding the
1486
+ full non-sharded optimizer state.
1487
+ model (torch.nn.Module): Root module (which may or may not be a
1488
+ :class:`FullyShardedDataParallel` instance) whose parameters
1489
+ correspond to the optimizer state in ``full_optim_state_dict``.
1490
+ optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
1491
+ Input passed into the optimizer representing either a
1492
+ :class:`list` of parameter groups or an iterable of parameters;
1493
+ if ``None``, then this method assumes the input was
1494
+ ``model.parameters()``. This argument is deprecated, and there
1495
+ is no need to pass it in anymore. (Default: ``None``)
1496
+ optim (Optional[torch.optim.Optimizer]): Optimizer that will load
1497
+ the state dict returned by this method. This is the preferred
1498
+ argument to use over ``optim_input``. (Default: ``None``)
1499
+
1500
+ Returns:
1501
+ Dict[str, Any]: The full optimizer state dict now remapped to
1502
+ flattened parameters instead of unflattened parameters and
1503
+ restricted to only include this rank's part of the optimizer state.
1504
+ """
1505
+ FullyShardedDataParallel._warn_legacy_optim_state_dict(
1506
+ "shard_full_optim_state_dict", "optim_state_dict_to_load"
1507
+ )
1508
+ return FullyShardedDataParallel._optim_state_dict_to_load_impl(
1509
+ optim_state_dict=full_optim_state_dict,
1510
+ model=model,
1511
+ optim_input=optim_input,
1512
+ optim=optim,
1513
+ full_state_dict=True,
1514
+ is_named_optimizer=False,
1515
+ )
1516
+
1517
+ @staticmethod
1518
+ def flatten_sharded_optim_state_dict(
1519
+ sharded_optim_state_dict: Dict[str, Any],
1520
+ model: torch.nn.Module,
1521
+ optim: torch.optim.Optimizer,
1522
+ ) -> Dict[str, Any]:
1523
+ """Flatten a sharded optimizer state-dict.
1524
+
1525
+ The API is similar to :meth:`shard_full_optim_state_dict`. The only
1526
+ difference is that the input ``sharded_optim_state_dict`` should be
1527
+ returned from :meth:`sharded_optim_state_dict`. Therefore, there will
1528
+ be all-gather calls on each rank to gather ``ShardedTensor`` s.
1529
+
1530
+ Args:
1531
+ sharded_optim_state_dict (Dict[str, Any]): Optimizer state dict
1532
+ corresponding to the unflattened parameters and holding the
1533
+ sharded optimizer state.
1534
+ model (torch.nn.Module):
1535
+ Refer to :meth:`shard_full_optim_state_dict`.
1536
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1537
+ parameters.
1538
+
1539
+ Returns:
1540
+ Refer to :meth:`shard_full_optim_state_dict`.
1541
+ """
1542
+ FullyShardedDataParallel._warn_legacy_optim_state_dict(
1543
+ "flatten_sharded_optim_state_dict", "optim_state_dict_to_load"
1544
+ )
1545
+ return FullyShardedDataParallel._optim_state_dict_to_load_impl(
1546
+ optim_state_dict=sharded_optim_state_dict,
1547
+ model=model,
1548
+ optim_input=None,
1549
+ optim=optim,
1550
+ full_state_dict=False,
1551
+ is_named_optimizer=False,
1552
+ )
1553
+
1554
+ @staticmethod
1555
+ def scatter_full_optim_state_dict(
1556
+ full_optim_state_dict: Optional[Dict[str, Any]],
1557
+ model: torch.nn.Module,
1558
+ optim_input: Optional[
1559
+ Union[
1560
+ List[Dict[str, Any]],
1561
+ Iterable[torch.nn.Parameter],
1562
+ ]
1563
+ ] = None,
1564
+ optim: Optional[torch.optim.Optimizer] = None,
1565
+ group: Optional[Any] = None,
1566
+ ) -> Dict[str, Any]:
1567
+ """Scatter the full optimizer state dict from rank 0 to all other ranks.
1568
+
1569
+ Returns the sharded optimizer state dict on each rank.
1570
+ The return value is the same as :meth:`shard_full_optim_state_dict`, and on rank
1571
+ 0, the first argument should be the return value of
1572
+ :meth:`full_optim_state_dict`.
1573
+
1574
+ Example::
1575
+
1576
+ >>> # xdoctest: +SKIP("undefined variables")
1577
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
1578
+ >>> model, optim = ...
1579
+ >>> full_osd = FSDP.full_optim_state_dict(model, optim) # only non-empty on rank 0
1580
+ >>> # Define new model with possibly different world size
1581
+ >>> new_model, new_optim, new_group = ...
1582
+ >>> sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, new_model, group=new_group)
1583
+ >>> new_optim.load_state_dict(sharded_osd)
1584
+
1585
+ .. note:: Both :meth:`shard_full_optim_state_dict` and
1586
+ :meth:`scatter_full_optim_state_dict` may be used to get the
1587
+ sharded optimizer state dict to load. Assuming that the full
1588
+ optimizer state dict resides in CPU memory, the former requires
1589
+ each rank to have the full dict in CPU memory, where each rank
1590
+ individually shards the dict without any communication, while the
1591
+ latter requires only rank 0 to have the full dict in CPU memory,
1592
+ where rank 0 moves each shard to GPU memory (for NCCL) and
1593
+ communicates it to ranks appropriately. Hence, the former has
1594
+ higher aggregate CPU memory cost, while the latter has higher
1595
+ communication cost.
1596
+
1597
+ Args:
1598
+ full_optim_state_dict (Optional[Dict[str, Any]]): Optimizer state
1599
+ dict corresponding to the unflattened parameters and holding
1600
+ the full non-sharded optimizer state if on rank 0; the argument
1601
+ is ignored on nonzero ranks.
1602
+ model (torch.nn.Module): Root module (which may or may not be a
1603
+ :class:`FullyShardedDataParallel` instance) whose parameters
1604
+ correspond to the optimizer state in ``full_optim_state_dict``.
1605
+ optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
1606
+ Input passed into the optimizer representing either a
1607
+ :class:`list` of parameter groups or an iterable of parameters;
1608
+ if ``None``, then this method assumes the input was
1609
+ ``model.parameters()``. This argument is deprecated, and there
1610
+ is no need to pass it in anymore. (Default: ``None``)
1611
+ optim (Optional[torch.optim.Optimizer]): Optimizer that will load
1612
+ the state dict returned by this method. This is the preferred
1613
+ argument to use over ``optim_input``. (Default: ``None``)
1614
+ group (dist.ProcessGroup): Model's process group or ``None`` if
1615
+ using the default process group. (Default: ``None``)
1616
+
1617
+ Returns:
1618
+ Dict[str, Any]: The full optimizer state dict now remapped to
1619
+ flattened parameters instead of unflattened parameters and
1620
+ restricted to only include this rank's part of the optimizer state.
1621
+ """
1622
+ FullyShardedDataParallel._warn_legacy_optim_state_dict(
1623
+ "scatter_full_optim_state_dict", "optim_state_dict_to_load"
1624
+ )
1625
+ return FullyShardedDataParallel._optim_state_dict_to_load_impl(
1626
+ optim_state_dict=full_optim_state_dict,
1627
+ model=model,
1628
+ optim_input=optim_input,
1629
+ optim=optim,
1630
+ full_state_dict=True,
1631
+ rank0_only=True,
1632
+ is_named_optimizer=False,
1633
+ group=group,
1634
+ )
1635
+
1636
+ @staticmethod
1637
+ def rekey_optim_state_dict(
1638
+ optim_state_dict: Dict[str, Any],
1639
+ optim_state_key_type: OptimStateKeyType,
1640
+ model: torch.nn.Module,
1641
+ optim_input: Optional[
1642
+ Union[
1643
+ List[Dict[str, Any]],
1644
+ Iterable[torch.nn.Parameter],
1645
+ ]
1646
+ ] = None,
1647
+ optim: Optional[torch.optim.Optimizer] = None,
1648
+ ) -> Dict[str, Any]:
1649
+ """Re-keys the optimizer state dict ``optim_state_dict`` to use the key type ``optim_state_key_type``.
1650
+
1651
+ This can be used to achieve compatibility between optimizer state dicts from models with FSDP
1652
+ instances and ones without.
1653
+
1654
+ To re-key an FSDP full optimizer state dict (i.e. from
1655
+ :meth:`full_optim_state_dict`) to use parameter IDs and be loadable to
1656
+ a non-wrapped model::
1657
+
1658
+ >>> # xdoctest: +SKIP("undefined variables")
1659
+ >>> wrapped_model, wrapped_optim = ...
1660
+ >>> full_osd = FSDP.full_optim_state_dict(wrapped_model, wrapped_optim)
1661
+ >>> nonwrapped_model, nonwrapped_optim = ...
1662
+ >>> rekeyed_osd = FSDP.rekey_optim_state_dict(full_osd, OptimStateKeyType.PARAM_ID, nonwrapped_model)
1663
+ >>> nonwrapped_optim.load_state_dict(rekeyed_osd)
1664
+
1665
+ To re-key a normal optimizer state dict from a non-wrapped model to be
1666
+ loadable to a wrapped model::
1667
+
1668
+ >>> # xdoctest: +SKIP("undefined variables")
1669
+ >>> nonwrapped_model, nonwrapped_optim = ...
1670
+ >>> osd = nonwrapped_optim.state_dict()
1671
+ >>> rekeyed_osd = FSDP.rekey_optim_state_dict(osd, OptimStateKeyType.PARAM_NAME, nonwrapped_model)
1672
+ >>> wrapped_model, wrapped_optim = ...
1673
+ >>> sharded_osd = FSDP.shard_full_optim_state_dict(rekeyed_osd, wrapped_model)
1674
+ >>> wrapped_optim.load_state_dict(sharded_osd)
1675
+
1676
+ Returns:
1677
+ Dict[str, Any]: The optimizer state dict re-keyed using the
1678
+ parameter keys specified by ``optim_state_key_type``.
1679
+ """
1680
+ FullyShardedDataParallel._warn_optim_input(optim_input)
1681
+ using_optim_input = FullyShardedDataParallel._is_using_optim_input(
1682
+ optim_input,
1683
+ optim,
1684
+ )
1685
+ assert optim_state_key_type in (
1686
+ OptimStateKeyType.PARAM_NAME,
1687
+ OptimStateKeyType.PARAM_ID,
1688
+ )
1689
+ osd = optim_state_dict # alias
1690
+ # Validate that the existing parameter keys are uniformly typed
1691
+ uses_param_name_mask = [type(param_key) is str for param_key in osd["state"]]
1692
+ uses_param_id_mask = [type(param_key) is int for param_key in osd["state"]]
1693
+ if (any(uses_param_name_mask) and not all(uses_param_name_mask)) or (
1694
+ any(uses_param_id_mask) and not all(uses_param_id_mask)
1695
+ ):
1696
+ error_msg = f"Invalid parameter keys: {osd['state'].keys()}"
1697
+ raise ValueError(error_msg)
1698
+ # Return directly if the existing key type matches the target key type
1699
+ if (
1700
+ optim_state_key_type == OptimStateKeyType.PARAM_NAME
1701
+ and all(uses_param_name_mask)
1702
+ ) or (
1703
+ optim_state_key_type == OptimStateKeyType.PARAM_ID
1704
+ and all(uses_param_id_mask)
1705
+ ):
1706
+ return osd
1707
+ # Otherwise, actually perform the re-keying
1708
+ new_osd = {}
1709
+ if optim_state_key_type == OptimStateKeyType.PARAM_NAME: # ID -> name
1710
+ param_id_to_param = (
1711
+ _get_param_id_to_param_from_optim_input(model, optim_input)
1712
+ if using_optim_input
1713
+ else _get_param_key_to_param(optim)
1714
+ )
1715
+ param_to_param_name = _get_param_to_fqn(model)
1716
+ param_id_to_param_name: List[str] = [
1717
+ param_to_param_name[param] for param in param_id_to_param.values()
1718
+ ]
1719
+ new_osd["state"] = {
1720
+ param_id_to_param_name[param_id]: param_state
1721
+ for param_id, param_state in osd["state"].items()
1722
+ }
1723
+ new_osd["param_groups"] = copy.deepcopy(osd["param_groups"])
1724
+ for param_group in new_osd["param_groups"]:
1725
+ param_group["params"] = sorted(
1726
+ [
1727
+ param_id_to_param_name[param_id]
1728
+ for param_id in param_group["params"]
1729
+ ]
1730
+ )
1731
+ return new_osd
1732
+ elif optim_state_key_type == OptimStateKeyType.PARAM_ID: # name -> ID
1733
+ param_name_to_param = _get_fqn_to_param(model)
1734
+ param_to_param_id = (
1735
+ _get_param_to_param_id_from_optim_input(model, optim_input)
1736
+ if using_optim_input
1737
+ else _get_param_to_param_key(optim)
1738
+ )
1739
+ # Because not all model parameters may be passed as the optimizer
1740
+ # input, we may need to drop some parameters from this mapping
1741
+ param_name_to_param_id = {
1742
+ param_name: param_to_param_id[param]
1743
+ for param_name, param in param_name_to_param.items()
1744
+ if param in param_to_param_id
1745
+ }
1746
+ new_osd["state"] = {
1747
+ param_name_to_param_id[param_name]: param_state
1748
+ for param_name, param_state in osd["state"].items()
1749
+ }
1750
+ new_osd["param_groups"] = copy.deepcopy(osd["param_groups"])
1751
+ for param_group in new_osd["param_groups"]:
1752
+ param_group["params"] = sorted(
1753
+ [
1754
+ param_name_to_param_id[param_name]
1755
+ for param_name in param_group["params"]
1756
+ ]
1757
+ )
1758
+ return new_osd
1759
+ return new_osd # should never reach here
1760
+
1761
+ @staticmethod
1762
+ def optim_state_dict(
1763
+ model: torch.nn.Module,
1764
+ optim: torch.optim.Optimizer,
1765
+ optim_state_dict: Optional[Dict[str, Any]] = None,
1766
+ group: Optional[dist.ProcessGroup] = None,
1767
+ ) -> Dict[str, Any]:
1768
+ """
1769
+ Transform the state-dict of an optimizer corresponding to a sharded model.
1770
+
1771
+ The given state-dict can be transformed to one of three types:
1772
+ 1) full optimizer state_dict, 2) sharded optimizer state_dict, 3) local optimizer state_dict.
1773
+
1774
+ For full optimizer state_dict, all states are unflattened and not sharded.
1775
+ Rank0 only and CPU only can be specified via :meth:`state_dict_type` to
1776
+ avoid OOM.
1777
+
1778
+ For sharded optimizer state_dict, all states are unflattened but sharded.
1779
+ CPU only can be specified via :meth:`state_dict_type` to further save
1780
+ memory.
1781
+
1782
+ For local state_dict, no transformation will be performed. But a state
1783
+ will be converted from nn.Tensor to ShardedTensor to represent its sharding
1784
+ nature (this is not supported yet).
1785
+
1786
+ Example::
1787
+
1788
+ >>> # xdoctest: +SKIP("undefined variables")
1789
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
1790
+ >>> from torch.distributed.fsdp import StateDictType
1791
+ >>> from torch.distributed.fsdp import FullStateDictConfig
1792
+ >>> from torch.distributed.fsdp import FullOptimStateDictConfig
1793
+ >>> # Save a checkpoint
1794
+ >>> model, optim = ...
1795
+ >>> FSDP.set_state_dict_type(
1796
+ >>> model,
1797
+ >>> StateDictType.FULL_STATE_DICT,
1798
+ >>> FullStateDictConfig(rank0_only=False),
1799
+ >>> FullOptimStateDictConfig(rank0_only=False),
1800
+ >>> )
1801
+ >>> state_dict = model.state_dict()
1802
+ >>> optim_state_dict = FSDP.optim_state_dict(model, optim)
1803
+ >>> save_a_checkpoint(state_dict, optim_state_dict)
1804
+ >>> # Load a checkpoint
1805
+ >>> model, optim = ...
1806
+ >>> state_dict, optim_state_dict = load_a_checkpoint()
1807
+ >>> FSDP.set_state_dict_type(
1808
+ >>> model,
1809
+ >>> StateDictType.FULL_STATE_DICT,
1810
+ >>> FullStateDictConfig(rank0_only=False),
1811
+ >>> FullOptimStateDictConfig(rank0_only=False),
1812
+ >>> )
1813
+ >>> model.load_state_dict(state_dict)
1814
+ >>> optim_state_dict = FSDP.optim_state_dict_to_load(
1815
+ >>> model, optim, optim_state_dict
1816
+ >>> )
1817
+ >>> optim.load_state_dict(optim_state_dict)
1818
+
1819
+ Args:
1820
+ model (torch.nn.Module): Root module (which may or may not be a
1821
+ :class:`FullyShardedDataParallel` instance) whose parameters
1822
+ were passed into the optimizer ``optim``.
1823
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1824
+ parameters.
1825
+ optim_state_dict (Dict[str, Any]): the target optimizer state_dict to
1826
+ transform. If the value is None, optim.state_dict() will be used. (
1827
+ Default: ``None``)
1828
+ group (dist.ProcessGroup): Model's process group across which parameters
1829
+ are sharded or ``None`` if using the default process group. (
1830
+ Default: ``None``)
1831
+
1832
+ Returns:
1833
+ Dict[str, Any]: A :class:`dict` containing the optimizer state for
1834
+ ``model``. The sharding of the optimizer state is based on
1835
+ ``state_dict_type``.
1836
+ """
1837
+ state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model)
1838
+ if optim_state_dict is None:
1839
+ optim_state_dict = optim.state_dict()
1840
+ return FullyShardedDataParallel._optim_state_dict_impl(
1841
+ model=model,
1842
+ optim=optim,
1843
+ optim_state_dict=optim_state_dict,
1844
+ optim_input=None,
1845
+ rank0_only=getattr(
1846
+ state_dict_settings.optim_state_dict_config, "rank0_only", False
1847
+ ),
1848
+ full_state_dict=state_dict_settings.state_dict_type
1849
+ == StateDictType.FULL_STATE_DICT,
1850
+ group=group,
1851
+ cpu_offload=getattr(
1852
+ state_dict_settings.optim_state_dict_config, "offload_to_cpu", True
1853
+ ),
1854
+ )
1855
+
1856
+ @staticmethod
1857
+ def optim_state_dict_to_load(
1858
+ model: torch.nn.Module,
1859
+ optim: torch.optim.Optimizer,
1860
+ optim_state_dict: Dict[str, Any],
1861
+ is_named_optimizer: bool = False,
1862
+ load_directly: bool = False,
1863
+ group: Optional[dist.ProcessGroup] = None,
1864
+ ) -> Dict[str, Any]:
1865
+ """
1866
+ Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model.
1867
+
1868
+ Given a ``optim_state_dict`` that is transformed through
1869
+ :meth:`optim_state_dict`, it gets converted to the flattened optimizer
1870
+ state_dict that can be loaded to ``optim`` which is the optimizer for
1871
+ ``model``. ``model`` must be sharded by FullyShardedDataParallel.
1872
+
1873
+ >>> # xdoctest: +SKIP("undefined variables")
1874
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
1875
+ >>> from torch.distributed.fsdp import StateDictType
1876
+ >>> from torch.distributed.fsdp import FullStateDictConfig
1877
+ >>> from torch.distributed.fsdp import FullOptimStateDictConfig
1878
+ >>> # Save a checkpoint
1879
+ >>> model, optim = ...
1880
+ >>> FSDP.set_state_dict_type(
1881
+ >>> model,
1882
+ >>> StateDictType.FULL_STATE_DICT,
1883
+ >>> FullStateDictConfig(rank0_only=False),
1884
+ >>> FullOptimStateDictConfig(rank0_only=False),
1885
+ >>> )
1886
+ >>> state_dict = model.state_dict()
1887
+ >>> original_osd = optim.state_dict()
1888
+ >>> optim_state_dict = FSDP.optim_state_dict(
1889
+ >>> model,
1890
+ >>> optim,
1891
+ >>> optim_state_dict=original_osd
1892
+ >>> )
1893
+ >>> save_a_checkpoint(state_dict, optim_state_dict)
1894
+ >>> # Load a checkpoint
1895
+ >>> model, optim = ...
1896
+ >>> state_dict, optim_state_dict = load_a_checkpoint()
1897
+ >>> FSDP.set_state_dict_type(
1898
+ >>> model,
1899
+ >>> StateDictType.FULL_STATE_DICT,
1900
+ >>> FullStateDictConfig(rank0_only=False),
1901
+ >>> FullOptimStateDictConfig(rank0_only=False),
1902
+ >>> )
1903
+ >>> model.load_state_dict(state_dict)
1904
+ >>> optim_state_dict = FSDP.optim_state_dict_to_load(
1905
+ >>> model, optim, optim_state_dict
1906
+ >>> )
1907
+ >>> optim.load_state_dict(optim_state_dict)
1908
+
1909
+ Args:
1910
+ model (torch.nn.Module): Root module (which may or may not be a
1911
+ :class:`FullyShardedDataParallel` instance) whose parameters
1912
+ were passed into the optimizer ``optim``.
1913
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1914
+ parameters.
1915
+ optim_state_dict (Dict[str, Any]): The optimizer states to be loaded.
1916
+ is_named_optimizer (bool): Is this optimizer a NamedOptimizer or
1917
+ KeyedOptimizer. Only set to True if ``optim`` is TorchRec's
1918
+ KeyedOptimizer or torch.distributed's NamedOptimizer.
1919
+ load_directly (bool): If this is set to True, this API will also
1920
+ call optim.load_state_dict(result) before returning the result.
1921
+ Otherwise, users are responsible to call ``optim.load_state_dict()``
1922
+ (Default: ``False``)
1923
+ group (dist.ProcessGroup): Model's process group across which parameters
1924
+ are sharded or ``None`` if using the default process group. (
1925
+ Default: ``None``)
1926
+ """
1927
+ state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model)
1928
+ result = FullyShardedDataParallel._optim_state_dict_to_load_impl(
1929
+ optim_state_dict=optim_state_dict,
1930
+ model=model,
1931
+ optim_input=None,
1932
+ optim=optim,
1933
+ full_state_dict=(
1934
+ state_dict_settings.state_dict_type == StateDictType.FULL_STATE_DICT
1935
+ ),
1936
+ rank0_only=getattr(
1937
+ state_dict_settings.optim_state_dict_config, "rank0_only", False
1938
+ ),
1939
+ is_named_optimizer=is_named_optimizer,
1940
+ group=group,
1941
+ )
1942
+ if load_directly:
1943
+ optim.load_state_dict(result)
1944
+ return result
1945
+
1946
+ def register_comm_hook(self, state: object, hook: callable):
1947
+ """Register a communication hook.
1948
+
1949
+ This is an enhancement that provides a flexible hook to users where they can specify how FSDP aggregates
1950
+ gradients across multiple workers.
1951
+ This hook can be used to implement several algorithms like
1952
+ `GossipGrad <https://arxiv.org/abs/1803.05880>`_ and gradient compression
1953
+ which involve different communication strategies for
1954
+ parameter syncs while training with :class:`FullyShardedDataParallel`.
1955
+
1956
+ .. warning ::
1957
+ FSDP communication hook should be registered before running an initial forward pass
1958
+ and only once.
1959
+
1960
+ Args:
1961
+ state (object): Passed to the hook to maintain any state information during the training process.
1962
+ Examples include error feedback in gradient compression,
1963
+ peers to communicate with next in `GossipGrad <https://arxiv.org/abs/1803.05880>`_, etc.
1964
+ It is locally stored by each worker
1965
+ and shared by all the gradient tensors on the worker.
1966
+ hook (Callable): Callable, which has one of the following signatures:
1967
+ 1) ``hook: Callable[torch.Tensor] -> None``:
1968
+ This function takes in a Python tensor, which represents
1969
+ the full, flattened, unsharded gradient with respect to all variables
1970
+ corresponding to the model this FSDP unit is wrapping
1971
+ (that are not wrapped by other FSDP sub-units).
1972
+ It then performs all necessary processing and returns ``None``;
1973
+ 2) ``hook: Callable[torch.Tensor, torch.Tensor] -> None``:
1974
+ This function takes in two Python tensors, the first one represents
1975
+ the full, flattened, unsharded gradient with respect to all variables
1976
+ corresponding to the model this FSDP unit is wrapping
1977
+ (that are not wrapped by other FSDP sub-units). The latter
1978
+ represents a pre-sized tensor to store a chunk of a sharded gradient after
1979
+ reduction.
1980
+ In both cases, callable performs all necessary processing and returns ``None``.
1981
+ Callables with signature 1 are expected to handle gradient communication for a `NO_SHARD` case.
1982
+ Callables with signature 2 are expected to handle gradient communication for sharded cases.
1983
+
1984
+ """
1985
+ if not self.check_is_root():
1986
+ raise AssertionError(
1987
+ "register_comm_hook can only be called on a root instance."
1988
+ )
1989
+ for fsdp_state in traversal_utils._get_fsdp_states(self):
1990
+ if fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES:
1991
+ raise AssertionError(
1992
+ f"Communication hook is not supported for hybrid strategies: {fsdp_state.sharding_strategy}"
1993
+ )
1994
+ if fsdp_state._comm_hook is not None:
1995
+ raise AssertionError("A communication hook is already registered")
1996
+ if not callable(hook):
1997
+ raise ValueError(
1998
+ f"The communication hook must be callable but got {hook}"
1999
+ )
2000
+ fsdp_state._comm_hook = hook
2001
+ fsdp_state._comm_hook_state = state
2002
+
2003
+
2004
+ def _get_grad_norm(
2005
+ params: Iterable[nn.Parameter],
2006
+ norm_type: float,
2007
+ ) -> torch.Tensor:
2008
+ """
2009
+ Return the gradient norm of parameters ``param`` s, where the gradients are viewed as a single vector.
2010
+
2011
+ The returned norm is in FP32 even if parameters/gradients are in a low precision. This is because the downstream
2012
+ use of this return value is a reduction across ranks.
2013
+ """
2014
+ params_with_grad = [param for param in params if param.grad is not None]
2015
+ if len(params_with_grad) == 0:
2016
+ return torch.tensor(0.0)
2017
+ grads = [param.grad for param in params_with_grad]
2018
+ grad_dtypes = {grad.dtype for grad in grads}
2019
+ if len(grad_dtypes) != 1:
2020
+ raise ValueError(
2021
+ f"Requires uniform dtype across all gradients but got {grad_dtypes}"
2022
+ )
2023
+ # Compute the gradient norm in FP32, where we treat the gradients as a
2024
+ # single vector
2025
+ grad_norm = torch.linalg.vector_norm(
2026
+ torch.stack(
2027
+ [
2028
+ torch.linalg.vector_norm(grad.detach(), norm_type, dtype=torch.float32)
2029
+ for grad in grads
2030
+ ],
2031
+ ),
2032
+ norm_type,
2033
+ dtype=torch.float32,
2034
+ )
2035
+ return grad_norm
2036
+
2037
+
2038
+ def _get_param_to_fqn(
2039
+ model: torch.nn.Module,
2040
+ ) -> Dict[torch.nn.Parameter, str]:
2041
+ """
2042
+ Construct a mapping from parameters to their parameter names.
2043
+
2044
+ The ``model`` should not contain any :class:`FullyShardedDataParallel` instances, which
2045
+ means that none of the parameters should be ``FlatParameter`` s. As a
2046
+ result, compared to :meth:`_get_param_to_fqns`, the mapped
2047
+ values may be flattened from singleton :class:`list` s to the contained
2048
+ names themselves.
2049
+
2050
+ Args:
2051
+ model (torch.nn.Module): Root module, which should not contain any
2052
+ :class:`FullyShardedDataParallel` instances.
2053
+ """
2054
+ param_to_param_names = _get_param_to_fqns(model)
2055
+ for param_names in param_to_param_names.values():
2056
+ assert (
2057
+ len(param_names) > 0
2058
+ ), "`_get_param_to_fqns()` should not construct empty lists"
2059
+ if len(param_names) > 1:
2060
+ raise RuntimeError(
2061
+ "Each parameter should only map to one parameter name but got "
2062
+ f"{len(param_names)}: {param_names}"
2063
+ )
2064
+ param_to_param_name = {
2065
+ param: param_names[0] for param, param_names in param_to_param_names.items()
2066
+ }
2067
+ return param_to_param_name
2068
+
2069
+
2070
+ def _get_fqn_to_param(
2071
+ model: torch.nn.Module,
2072
+ ) -> Dict[str, torch.nn.Parameter]:
2073
+ """Construct the inverse mapping of :meth:`_get_param_to_fqn`."""
2074
+ param_to_param_name = _get_param_to_fqn(model)
2075
+ return dict(zip(param_to_param_name.values(), param_to_param_name.keys()))
venv/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from collections import abc, defaultdict
3
+ from typing import Any, Dict, Iterable, List, Optional, overload, Sequence, Tuple, Union
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ from torch.amp.grad_scaler import _MultiDeviceReplicator, GradScaler, OptState
8
+ from torch.distributed.distributed_c10d import ProcessGroup
9
+
10
+ log = logging.getLogger(__name__)
11
+
12
+
13
+ def _refresh_per_optimizer_state() -> Dict[str, Any]:
14
+ return {"stage": OptState.READY, "found_inf_per_device": {}}
15
+
16
+
17
+ def _is_supported_device(tensor: torch.Tensor) -> bool:
18
+ return tensor.is_cuda or tensor.device.type in ("xla", "cpu", "hpu")
19
+
20
+
21
+ class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator):
22
+ """
23
+ Lazily serves tensor to request device. This class extends
24
+ _MultiDeviceReplicator to allow support for "cpu" as a device.
25
+ """
26
+
27
+ def __init__(self, master_tensor: torch.Tensor) -> None:
28
+ assert _is_supported_device(master_tensor)
29
+ self.master = master_tensor
30
+ self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
31
+
32
+
33
+ class ShardedGradScaler(GradScaler):
34
+ """
35
+ ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends
36
+ functionality from GradScaler:
37
+ * Supports Pytorch DDP and FSDP implementations
38
+ * Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP])
39
+ * Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns
40
+ * Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across
41
+ nodes
42
+
43
+ Example::
44
+
45
+ # Creates a ShardedGradScaler once at the beginning of training.
46
+ scaler = ShardedGradScaler()
47
+
48
+ for epoch in epochs:
49
+ for input, target in data:
50
+ optimizer.zero_grad()
51
+ output = model(input)
52
+ loss = loss_fn(output, target)
53
+
54
+ # Scales loss. Calls backward() on scaled loss to create scaled gradients.
55
+ scaler.scale(loss).backward()
56
+
57
+ # scaler.step() first unscales gradients of the optimizer's params.
58
+ # If gradients don't contain infs/NaNs, optimizer.step() is then called,
59
+ # otherwise, optimizer.step() is skipped.
60
+ scaler.step(optimizer)
61
+
62
+ # Updates the scale for next iteration.
63
+ scaler.update()
64
+
65
+ See :class:`GradScaler` for explanation of scaling/unscaling and more use cases.
66
+
67
+ Args:
68
+ init_scale (float, optional, default=2.**16): Initial scale factor.
69
+ growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
70
+ :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
71
+ backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
72
+ :meth:`update` if inf/NaN gradients occur in an iteration.
73
+ growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
74
+ that must occur for the scale to be multiplied by ``growth_factor``.
75
+ enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
76
+ invokes the underlying ``optimizer.step()``, and other methods become no-ops.
77
+ Default: ``True``
78
+ process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD):
79
+ process group for sharding
80
+ """
81
+
82
+ def __init__(
83
+ self,
84
+ device: str = "cuda",
85
+ init_scale: float = 2.0**16,
86
+ backoff_factor: float = 0.5,
87
+ growth_factor: float = 2.0,
88
+ growth_interval: int = 2000,
89
+ enabled: bool = True,
90
+ process_group: Optional[ProcessGroup] = dist.group.WORLD,
91
+ ) -> None:
92
+ super().__init__(
93
+ device,
94
+ init_scale=init_scale,
95
+ backoff_factor=backoff_factor,
96
+ growth_factor=growth_factor,
97
+ growth_interval=growth_interval,
98
+ enabled=enabled,
99
+ )
100
+ if self._enabled:
101
+ self.process_group = process_group
102
+ self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
103
+
104
+ @overload
105
+ def scale(self, outputs: torch.Tensor) -> torch.Tensor:
106
+ ...
107
+
108
+ @overload
109
+ def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]:
110
+ ...
111
+
112
+ @overload
113
+ def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
114
+ ...
115
+
116
+ @overload
117
+ def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]:
118
+ ...
119
+
120
+ def scale(
121
+ self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]]
122
+ ) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
123
+ if not self._enabled:
124
+ return outputs
125
+
126
+ if isinstance(outputs, torch.Tensor):
127
+ assert _is_supported_device(outputs)
128
+ if self._scale is None:
129
+ self._lazy_init_scale_growth_tracker(outputs.device)
130
+ assert self._scale is not None
131
+ scaled_output = outputs * self._scale.to(
132
+ device=outputs.device, non_blocking=True
133
+ )
134
+ # Here we ensure the return dtype is the same as the outputs dtype.
135
+ # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
136
+ # format (fp16, bf16) and so the scaled loss should be of the same dtype.
137
+ return scaled_output.type(outputs.dtype)
138
+
139
+ stash: List[_GeneralMultiDeviceReplicator] = []
140
+
141
+ def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
142
+ if isinstance(val, torch.Tensor):
143
+ assert _is_supported_device(val)
144
+ if len(stash) == 0:
145
+ if self._scale is None:
146
+ self._lazy_init_scale_growth_tracker(val.device)
147
+ assert self._scale is not None
148
+ stash.append(_GeneralMultiDeviceReplicator(self._scale))
149
+ scaled_val = val * stash[0].get(val.device)
150
+ # Here we ensure the return dtype is the same as the outputs dtype.
151
+ # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
152
+ # format (fp16, bf16) and so the scaled loss should be of the same dtype.
153
+ return scaled_val.type(val.dtype)
154
+ if isinstance(val, abc.Iterable):
155
+ iterator = map(apply_scale, val)
156
+ if isinstance(val, (list, tuple)):
157
+ return type(val)(iterator)
158
+ return iterator
159
+ raise ValueError("outputs must be a Tensor or an iterable of Tensors")
160
+
161
+ return apply_scale(outputs)
162
+
163
+ def _foreach_non_finite_check_and_unscale_cpu_(
164
+ self,
165
+ grads: Sequence[torch.Tensor],
166
+ found_inf: torch.Tensor,
167
+ inv_scale: torch.Tensor,
168
+ ) -> None:
169
+ if len(grads) == 0:
170
+ return
171
+ assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."
172
+ assert found_inf.numel() == 1, "found_inf must be a 1-element tensor."
173
+
174
+ for grad in grads:
175
+ if grad.device.type != "cpu":
176
+ log.error(
177
+ "tensor device is %s but was expected to be ``cpu``",
178
+ grad.device,
179
+ )
180
+ raise ValueError(
181
+ "Gradients were found on a non-CPU device when"
182
+ " expected to be on CPU."
183
+ )
184
+ if (
185
+ torch.isinf(grad).any().item() is True
186
+ or torch.isnan(grad).any().item() is True
187
+ ):
188
+ found_inf.data = torch.tensor([1.0])
189
+ break
190
+ else:
191
+ grad.data *= inv_scale.item()
192
+
193
+ def _unscale_grads_(
194
+ self,
195
+ optimizer: torch.optim.Optimizer,
196
+ inv_scale: torch.Tensor,
197
+ found_inf: torch.Tensor,
198
+ allow_fp16: bool = True,
199
+ ) -> Dict[torch.device, torch.Tensor]:
200
+ per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
201
+ per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
202
+
203
+ # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
204
+ # There could be thousands of grads, so we'd like to iterate through them just once.
205
+ # However, we don't know their devices or dtypes in advance.
206
+
207
+ # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
208
+ # Google says mypy struggles with defaultdicts type annotations.
209
+ per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
210
+ with torch.no_grad():
211
+ for group in optimizer.param_groups:
212
+ for param in group["params"]:
213
+ if param.grad is None:
214
+ continue
215
+ if (not allow_fp16) and param.grad.dtype == torch.float16:
216
+ raise ValueError("Attempting to unscale FP16 gradients.")
217
+ if param.grad.is_sparse:
218
+ # is_coalesced() == False means the sparse grad has values with duplicate indices.
219
+ # coalesce() deduplicates indices and adds all values that have the same index.
220
+ # For scaled fp16 values, there's a good chance coalescing will cause overflow,
221
+ # so we should check the coalesced _values().
222
+ if param.grad.dtype is torch.float16:
223
+ # coalesce is not supported in torch.float16
224
+ param_grad_fp32 = param.grad.type(torch.float32).coalesce()
225
+ param.grad = param_grad_fp32.type(torch.float16)
226
+ to_unscale = param.grad._values()
227
+ else:
228
+ to_unscale = param.grad
229
+
230
+ per_device_and_dtype_grads[to_unscale.device][
231
+ to_unscale.dtype
232
+ ].append(to_unscale)
233
+
234
+ for device, per_dtype_grads in per_device_and_dtype_grads.items():
235
+ for grads in per_dtype_grads.values():
236
+ if grads[0].device.type == "cpu":
237
+ self._foreach_non_finite_check_and_unscale_cpu_(
238
+ grads,
239
+ per_device_found_inf.get(device),
240
+ per_device_inv_scale.get(device),
241
+ )
242
+ else:
243
+ torch._amp_foreach_non_finite_check_and_unscale_(
244
+ grads,
245
+ per_device_found_inf.get(device),
246
+ per_device_inv_scale.get(device),
247
+ )
248
+ # There exist contexts (e.g. w/ `use_orig_params=True`) wherein some
249
+ # ranks may have no (non-zero sized) parameter shards, necessitating the
250
+ # initialization of `per_device_found_inf._per_device_tensors` here
251
+ if not per_device_found_inf._per_device_tensors:
252
+ assert self._scale is not None
253
+ per_device_found_inf.get(self._scale.device)
254
+ return per_device_found_inf._per_device_tensors
255
+
256
+ def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
257
+ if not self._enabled:
258
+ return
259
+
260
+ self._check_scale_growth_tracker("unscale_")
261
+
262
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
263
+
264
+ if optimizer_state["stage"] is OptState.UNSCALED:
265
+ raise RuntimeError(
266
+ "unscale_() has already been called on this optimizer since the last update()."
267
+ )
268
+ elif optimizer_state["stage"] is OptState.STEPPED:
269
+ raise RuntimeError("unscale_() is being called after step().")
270
+
271
+ # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
272
+ assert self._scale is not None
273
+ inv_scale = self._scale.double().reciprocal().float()
274
+ found_inf = torch.full(
275
+ (1,), 0.0, dtype=torch.float32, device=self._scale.device
276
+ )
277
+
278
+ optimizer_state["found_inf_per_device"] = self._unscale_grads_(
279
+ optimizer, inv_scale, found_inf, True
280
+ )
281
+ optimizer_state["stage"] = OptState.UNSCALED
282
+
283
+ # Synchronize the detected inf across the ranks
284
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
285
+ works = []
286
+ found_inf_on_cpus = []
287
+ found_inf_on_cudas = []
288
+
289
+ for found_inf in optimizer_state["found_inf_per_device"].values():
290
+ if self._device == "cuda" and found_inf.device.type == "cpu":
291
+ found_inf_on_cpus.append(found_inf)
292
+ found_inf_on_cuda = found_inf.cuda()
293
+ found_inf_on_cudas.append(found_inf_on_cuda)
294
+ works.append(
295
+ dist.all_reduce(
296
+ found_inf_on_cuda, async_op=True, group=self.process_group
297
+ )
298
+ )
299
+ else:
300
+ works.append(
301
+ dist.all_reduce(found_inf, async_op=True, group=self.process_group)
302
+ )
303
+ for work in works:
304
+ work.wait()
305
+ if found_inf_on_cpus:
306
+ torch._foreach_copy_(found_inf_on_cpus, found_inf_on_cudas)
307
+
308
+ def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None:
309
+ """
310
+ If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
311
+ Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
312
+ """
313
+ assert self._scale is not None and self._growth_tracker is not None
314
+
315
+ if found_inf.item() >= 1.0:
316
+ self._scale *= self._backoff_factor
317
+ self._growth_tracker.fill_(0)
318
+ else:
319
+ successful = self._growth_tracker + 1
320
+ if successful == self._growth_interval:
321
+ self._scale *= self._growth_factor
322
+ self._growth_tracker.fill_(0)
323
+ else:
324
+ self._growth_tracker = successful
325
+
326
+ def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
327
+ """
328
+ Updates the scale factor.
329
+ If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
330
+ to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
331
+ the scale is multiplied by ``growth_factor`` to increase it.
332
+ Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
333
+ used directly, it's used to fill GradScaler's internal scale tensor. So if
334
+ ``new_scale`` was a tensor, later in-place changes to that tensor will not further
335
+ affect the scale GradScaler uses internally.)
336
+ Args:
337
+ new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor.
338
+ .. warning::
339
+ :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
340
+ been invoked for all optimizers used this iteration.
341
+ """
342
+
343
+ if not self._enabled:
344
+ return
345
+
346
+ _scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated]
347
+
348
+ if new_scale is not None:
349
+ # Accept a new user-defined scale.
350
+ if isinstance(new_scale, float):
351
+ self._scale.fill_(new_scale) # type: ignore[union-attr]
352
+ else:
353
+ reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \
354
+ torch.FloatTensor with requires_grad=False."
355
+ assert new_scale.device.type == self._device, reason
356
+ assert new_scale.numel() == 1, reason
357
+ assert new_scale.requires_grad is False, reason
358
+ self._scale.copy_(new_scale) # type: ignore[union-attr]
359
+ else:
360
+ # Consume shared inf/nan data collected from optimizers to update the scale.
361
+ # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
362
+ found_infs = [
363
+ found_inf.to(device=_scale.device, non_blocking=True)
364
+ for state in self._per_optimizer_states.values()
365
+ for found_inf in state["found_inf_per_device"].values()
366
+ ]
367
+
368
+ assert len(found_infs) > 0, "No inf checks were recorded prior to update."
369
+
370
+ found_inf_combined = found_infs[0]
371
+ if len(found_infs) > 1:
372
+ for i in range(1, len(found_infs)):
373
+ found_inf_combined += found_infs[i]
374
+
375
+ if _scale.device.type == "cpu":
376
+ self._amp_update_scale_cpu_(found_inf_combined)
377
+ else:
378
+ torch._amp_update_scale_(
379
+ self._scale, # type: ignore[arg-type]
380
+ self._growth_tracker, # type: ignore[arg-type]
381
+ found_inf_combined,
382
+ self._growth_factor, # type: ignore[arg-type]
383
+ self._backoff_factor, # type: ignore[arg-type]
384
+ self._growth_interval, # type: ignore[arg-type]
385
+ )
386
+
387
+ # To prepare for next iteration, clear the data collected from optimizers this iteration.
388
+ self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
venv/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the BSD license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import contextlib
7
+ import copy
8
+ from abc import ABC, abstractmethod
9
+ from typing import (
10
+ Any,
11
+ Callable,
12
+ cast,
13
+ Dict,
14
+ Generator,
15
+ Iterable,
16
+ Optional,
17
+ Sequence,
18
+ Set,
19
+ Tuple,
20
+ Type,
21
+ Union,
22
+ )
23
+
24
+ import torch.nn as nn
25
+
26
+ __all__ = [
27
+ "always_wrap_policy",
28
+ "lambda_auto_wrap_policy",
29
+ "transformer_auto_wrap_policy",
30
+ "size_based_auto_wrap_policy",
31
+ "enable_wrap",
32
+ "wrap",
33
+ "CustomPolicy",
34
+ "ModuleWrapPolicy",
35
+ ]
36
+
37
+
38
+ # NOTE: We intentionally keep this function simple and isolate the complexity
39
+ # to `fn` to enable using this function generically. We may move this to a
40
+ # non-FSDP-specific folder and/or make it public in the future.
41
+ def _post_order_apply(
42
+ root_module: nn.Module,
43
+ fn: Callable[[nn.Module], Optional[nn.Module]],
44
+ ):
45
+ """
46
+ This applies ``fn`` to every module in the module tree of ``root_module``
47
+ following a post-order traversal. If ``fn`` returns an :class:`nn.Module`,
48
+ then this replaces the original module with the newly returned one in the
49
+ tree. Otherwise, ``fn`` should return ``None``, in which case the module is
50
+ not changed.
51
+ """
52
+ # Track visited modules to avoid visiting shared modules multiple times
53
+ visited_modules: Set[nn.Module] = {root_module}
54
+
55
+ def _post_order_apply_inner(
56
+ module: nn.Module,
57
+ module_name: str,
58
+ parent_module: Optional[nn.Module],
59
+ ):
60
+ for child_module_name, child_module in module.named_children():
61
+ if child_module not in visited_modules:
62
+ visited_modules.add(child_module)
63
+ _post_order_apply_inner(child_module, child_module_name, module)
64
+ optional_module = fn(module)
65
+ if optional_module is not None:
66
+ assert isinstance(parent_module, nn.Module), (
67
+ "Non-root modules should have their parent module set but got "
68
+ f"{parent_module} for {module}"
69
+ )
70
+ assert module_name, (
71
+ "Non-root modules should have their module name set but got "
72
+ f"an empty module name for {module}"
73
+ )
74
+ assert isinstance(
75
+ optional_module, nn.Module
76
+ ), f"fn should return None or an nn.Module but got {optional_module}"
77
+ setattr(parent_module, module_name, optional_module)
78
+
79
+ _post_order_apply_inner(root_module, "", None)
80
+
81
+
82
+ def _construct_wrap_fn(
83
+ root_module: nn.Module,
84
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]],
85
+ fsdp_fn: Callable,
86
+ ) -> Callable[[nn.Module], Optional[nn.Module]]:
87
+ """
88
+ This constructs the "wrap" function to pass to :func:`_post_order_apply`
89
+ based on ``target_module_to_kwargs``, which should be constructed from the
90
+ wrapping policy.
91
+ """
92
+
93
+ def fn(module: nn.Module) -> Optional[nn.Module]:
94
+ # Explicitly avoid wrapping the root module since for FSDP, it is
95
+ # handled by the caller
96
+ if module in target_module_to_kwargs and module is not root_module:
97
+ kwargs = target_module_to_kwargs[module]
98
+ return fsdp_fn(module, **kwargs)
99
+ return None
100
+
101
+ return fn
102
+
103
+
104
+ def _run_mixed_precision_override_policy(
105
+ root_module: nn.Module,
106
+ module_classes: Iterable[Type[nn.Module]],
107
+ ignored_modules: Set[nn.Module],
108
+ root_kwargs: Dict[str, Any],
109
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]],
110
+ ):
111
+ module_classes_tuple = tuple(set(module_classes))
112
+ for module in root_module.modules():
113
+ if module in ignored_modules:
114
+ continue
115
+ elif isinstance(module, module_classes_tuple):
116
+ # This policy overrides any existing policy
117
+ if module not in target_module_to_kwargs:
118
+ # Only inherit from the root kwargs if not already specified
119
+ target_module_to_kwargs[module] = root_kwargs
120
+ target_module_to_kwargs[module]["mixed_precision"] = None
121
+ return target_module_to_kwargs
122
+
123
+
124
+ def always_wrap_policy(*args, **kwargs) -> bool:
125
+ """
126
+ A simple recursive wrap policy that always returns ``True``. This means
127
+ that every submodule is wrapped by the wrapper class in
128
+ :func:`_recursive_wrap`.
129
+ """
130
+ return True
131
+
132
+
133
+ class _Policy(ABC):
134
+ """
135
+ This defines an abstract base class that represents a policy for applying
136
+ a module-level API.
137
+ """
138
+
139
+ @abstractmethod
140
+ def _run_policy(
141
+ self,
142
+ root_module: nn.Module,
143
+ ignored_modules: Set[nn.Module],
144
+ root_kwargs: Dict[str, Any],
145
+ ) -> Dict[nn.Module, Dict[str, Any]]:
146
+ """
147
+ This should return a dict ``target_module_to_kwargs`` that maps from
148
+ each target module to wrap to its kwargs.
149
+ """
150
+ ...
151
+
152
+
153
+ def _module_wrap_policy(
154
+ module: nn.Module,
155
+ recurse: bool,
156
+ nonwrapped_numel: int,
157
+ module_classes: Set[Type[nn.Module]],
158
+ ) -> bool:
159
+ """
160
+ This auto wrap policy wraps every module that is an instance of any type in
161
+ ``module_classes`` as its own FSDP instance. The root module given by
162
+ ``module`` is always wrapped as an FSDP instance regardless. Since the
163
+ wrapping proceeds bottom up, each FSDP instance manages the parameters in
164
+ its subtree excluding any already managed by a child FSDP instance.
165
+
166
+ Args:
167
+ module (nn.Module): Current module being considered.
168
+ recurse (bool): If ``False``, then this function must decide whether
169
+ ``module`` should be wrapped as an FSDP instance or not. If
170
+ ``True``, then the function is still recursing down the module
171
+ tree as a part of the DFS.
172
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
173
+ module_classes (Set[Type[nn.Module]]): Set of module classes that are
174
+ wrapped as FSDP instances.
175
+
176
+ Returns:
177
+ ``True`` if ``recurse=True``, and whether ``module`` should be wrapped
178
+ if ``recurse=False``.
179
+ """
180
+ if recurse:
181
+ return True # always recurse
182
+ return isinstance(module, tuple(module_classes))
183
+
184
+
185
+ class ModuleWrapPolicy(_Policy):
186
+ """
187
+ This policy applies to every module of the specified module classes,
188
+ passing in the kwargs given to the root.
189
+ """
190
+
191
+ def __init__(self, module_classes: Iterable[Type[nn.Module]]):
192
+ module_classes_set = set(module_classes)
193
+ self._module_classes = module_classes_set
194
+ self._module_classes_str = str(module_classes_set)
195
+
196
+ def _run_policy(
197
+ self,
198
+ root_module: nn.Module,
199
+ ignored_modules: Set[nn.Module],
200
+ root_kwargs: Dict[str, Any],
201
+ ) -> Dict[nn.Module, Dict[str, Any]]:
202
+ module_classes = tuple(self._module_classes)
203
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {}
204
+ for module in root_module.modules():
205
+ if module in ignored_modules:
206
+ continue
207
+ elif isinstance(module, module_classes):
208
+ # Shallow copy to avoid coupling changes across modules
209
+ target_module_to_kwargs[module] = copy.copy(root_kwargs)
210
+ return target_module_to_kwargs
211
+
212
+ def __call__(self, module, recurse, *args, **kwargs):
213
+ # nonwrapped_numel is not used.
214
+ return _module_wrap_policy(
215
+ module, recurse, nonwrapped_numel=-1, module_classes=self._module_classes
216
+ )
217
+
218
+ def __repr__(self) -> str:
219
+ return super().__repr__() + f"({self._module_classes_str})"
220
+
221
+
222
+ class CustomPolicy(_Policy):
223
+ """
224
+ This policy takes in a lambda function that maps a given ``nn.Module`` to
225
+ either ``False``, ``True``, or a kwarg dictionary.
226
+ - If the function returns ``False`` or an empty dictionary, then the module
227
+ does not have the API applied.
228
+ - If the function returns ``True``, then the module has the API applied
229
+ with the root's kwargs.
230
+ - If the function returns a non-empty dictionary, then the module has the
231
+ API applied, and the dictionary overrides the root's kwargs.
232
+
233
+ Example::
234
+
235
+ >>> # xdoctest: +SKIP("undefined variables")
236
+ >>> model = init_transformer_model(...)
237
+ >>> def lambda_fn(module: nn.Module):
238
+ >>> if module is model.lm_head:
239
+ >>> return {"sharding_strategy": ShardingStrategy.SHARD_GRAD_OP}
240
+ >>> elif isinstance(module, TransformerBlock):
241
+ >>> return True
242
+ >>> return False
243
+ >>> policy = CustomPolicy(lambda_fn)
244
+ >>> fsdp_model = FSDP(model, auto_wrap_policy=policy)
245
+ """
246
+
247
+ def __init__(self, lambda_fn: Callable[[nn.Module], Union[bool, Dict[str, Any]]]):
248
+ self._lambda_fn = lambda_fn
249
+
250
+ def _run_policy(
251
+ self,
252
+ root_module: nn.Module,
253
+ ignored_modules: Set[nn.Module],
254
+ root_kwargs: Dict[str, Any],
255
+ ) -> Dict[nn.Module, Dict[str, Any]]:
256
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {}
257
+ for module in root_module.modules():
258
+ if module in ignored_modules:
259
+ continue
260
+ res = self._lambda_fn(module)
261
+ if not isinstance(res, (dict, bool)):
262
+ raise ValueError(
263
+ "The lambda_fn passed to CustomPolicy should return "
264
+ f"False/True or a kwarg dict, but it returned {res}"
265
+ )
266
+ if not res:
267
+ continue
268
+ kwargs = copy.copy(root_kwargs)
269
+ if isinstance(res, dict):
270
+ # Override the root kwargs with the ones specified by the
271
+ # lambda function
272
+ kwargs.update(res)
273
+ target_module_to_kwargs[module] = kwargs
274
+ return target_module_to_kwargs
275
+
276
+
277
+ def lambda_auto_wrap_policy(
278
+ module: nn.Module, recurse: bool, nonwrapped_numel: int, lambda_fn: Callable
279
+ ) -> bool:
280
+ """
281
+ A convenient auto wrap policy to wrap submodules based on an arbitrary user
282
+ function. If `lambda_fn(submodule) == True``, the submodule will be wrapped as
283
+ a `wrapper_cls` unit.
284
+
285
+ Return if a module should be wrapped during auto wrapping.
286
+
287
+ The first three parameters are required by :func:`_recursive_wrap`.
288
+
289
+ Args:
290
+ module (nn.Module): Current module being considered.
291
+ recurse (bool): If ``False``, then this function must decide whether
292
+ ``module`` should be wrapped as an FSDP instance or not. If
293
+ ``True``, then the function is still recursing down the module
294
+ tree as a part of the DFS.
295
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
296
+
297
+ lambda_fn (Callable[[nn.Module], bool]): If this returns ``True``, then
298
+ this module will be wrapped.
299
+ """
300
+ if recurse:
301
+ return True # always recurse
302
+ return lambda_fn(module)
303
+
304
+
305
+ def transformer_auto_wrap_policy(
306
+ module: nn.Module,
307
+ recurse: bool,
308
+ nonwrapped_numel: int,
309
+ transformer_layer_cls: Set[Type[nn.Module]],
310
+ ) -> bool:
311
+ """
312
+ See :func:`_module_wrap_policy`, where ``transformer_layer_cls`` is the
313
+ same as ``module_classes``. Note that shared parameters must be wrapped in
314
+ the same FSDP instance, so this auto wrap policy can help wrap shared
315
+ embeddings into the same FSDP instance for transformer models.
316
+ """
317
+ return _module_wrap_policy(module, recurse, nonwrapped_numel, transformer_layer_cls)
318
+
319
+
320
+ def _wrap_module_cls_individually(
321
+ module: nn.Module, module_classes: Sequence[type], recurse: bool, *args, **kwargs
322
+ ):
323
+ if recurse:
324
+ # always recurse
325
+ return True
326
+ else:
327
+ # if not recursing, decide whether we should wrap based on whether the type of module
328
+ # is in `module_classes`.
329
+ return isinstance(module, tuple(module_classes))
330
+
331
+
332
+ def _or_policy(
333
+ module: nn.Module,
334
+ recurse: bool,
335
+ nonwrapped_numel: int,
336
+ policies,
337
+ ) -> bool:
338
+ """
339
+ A policy that wraps ``module`` if any policy in the passed in iterable of
340
+ ``policies`` returns ``True``.
341
+ """
342
+ return any(
343
+ policy(module=module, recurse=recurse, nonwrapped_numel=nonwrapped_numel)
344
+ for policy in policies
345
+ )
346
+
347
+
348
+ def size_based_auto_wrap_policy(
349
+ module: nn.Module,
350
+ recurse: bool,
351
+ nonwrapped_numel: int,
352
+ # Additional custom arguments
353
+ min_num_params: int = int(1e8),
354
+ force_leaf_modules: Optional[Set[Type[nn.Module]]] = None,
355
+ exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None,
356
+ ) -> bool:
357
+ """
358
+ A size-based auto wrap policy.
359
+
360
+ Args:
361
+ module (nn.Module): Current module being considered.
362
+ recurse (bool): If ``False``, then this function must decide whether
363
+ ``module`` should be wrapped as an FSDP instance or not. If
364
+ ``True``, then the function is still recursing down the module
365
+ tree as a part of the DFS.
366
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
367
+
368
+ min_num_params (int): Customizable policy input that controls the size
369
+ threshold over which a module is ready to be wrapped. This is in
370
+ units of numel.
371
+ force_leaf_modules (Set[Type[nn.Module]]): Set of module types to keep
372
+ as leaves, i.e. their children will never be wrapped.
373
+ exclude_wrap_modules (Set[Type[nn.Module]]): Set of module types to be
374
+ excluded in wrapping.
375
+
376
+ Returns:
377
+ Whether ``module`` should be wrapped.
378
+ """
379
+ force_leaf_modules = (
380
+ size_based_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore[attr-defined]
381
+ if force_leaf_modules is None
382
+ else force_leaf_modules
383
+ )
384
+ exclude_wrap_modules = (
385
+ size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore[attr-defined]
386
+ if exclude_wrap_modules is None
387
+ else exclude_wrap_modules
388
+ )
389
+
390
+ # Keep the argument `min_num_params` for BC for now, but it represents the
391
+ # minimum non-wrapped *numel* before triggering a wrapping
392
+ min_nonwrapped_numel = min_num_params
393
+ is_large = nonwrapped_numel >= min_nonwrapped_numel
394
+ if recurse:
395
+ # We should recurse if the module is big enough but not in force_leaf_modules list.
396
+ return is_large and not isinstance(module, tuple(force_leaf_modules))
397
+ else:
398
+ # If we are not recursing, determine if we should wrap.
399
+ return is_large and not isinstance(module, tuple(exclude_wrap_modules))
400
+
401
+
402
+ # Set those defaults to the size_based_auto_wrap_policy function. Make them easy to be imported.
403
+ size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES = {nn.ModuleList, nn.ModuleDict} # type: ignore[attr-defined]
404
+ size_based_auto_wrap_policy.FORCE_LEAF_MODULES = {nn.MultiheadAttention} # type: ignore[attr-defined]
405
+
406
+
407
+ @contextlib.contextmanager
408
+ def enable_wrap(
409
+ *, wrapper_cls: Any, **wrapper_kwargs: Any
410
+ ) -> Generator[None, None, None]:
411
+ """
412
+ Context manager to wrap modules using a wrapper.
413
+
414
+ Useful for when you'd like to apply the same configuration arguments to all
415
+ child modules that you wrap. A particularly important use case is wrapping
416
+ large layers so that they get sharded (in-place) during initialization, to
417
+ avoid running out of system memory. Large layers can indicate that they
418
+ should be sharded via the ``wrap`` annotation and this context manager can
419
+ provide the exact configuration for these nested instances.
420
+
421
+ Usage::
422
+
423
+ with enable_wrap(wrapper_cls, **params):
424
+ # Wraps layer in FSDP by default if within context
425
+ self.l1 = wrap(torch.nn.Linear(5, 5))
426
+
427
+ Args:
428
+ wrapper_cls:
429
+ Class that `wrap` annotation will `wrap` modules with, such as
430
+ `FullyShardedDataParallel`.
431
+ **wrapper_kwargs:
432
+ Configuration settings that will be passed to all ``wrap``
433
+ instances inside the context
434
+ """
435
+ kwargs = {
436
+ "wrapper_cls": wrapper_cls,
437
+ **wrapper_kwargs,
438
+ }
439
+ with _ConfigAutoWrap(**kwargs):
440
+ yield
441
+
442
+
443
+ def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module:
444
+ """
445
+ Annotate that a module should be wrapped. Annotated modules will only be
446
+ wrapped if inside of an :func:`enable_wrap` context manager. This allows
447
+ a module to be initialized both with and without a wrapper without code
448
+ change.
449
+
450
+ The class that this function wraps the passed in ``nn.Module`` with is the
451
+ passed in ``wrapper_cls`` argument into ``enable_wrap``. Both
452
+ ``enable_wrap`` and ``wrap`` can take in kwargs specifying how to construct
453
+ the ``wrapper_cls`` instance. In the case of duplicate kwargs in
454
+ ``enable_wrap`` and ``wrap``, the argument passed into ``wrap`` will be
455
+ respected.
456
+
457
+ Usage::
458
+
459
+ with enable_wrap(wrapper_cls=FSDP, **fsdp_config):
460
+ # Wraps layer in FSDP by default if within context
461
+ self.l1 = wrap(torch.nn.Linear(5, 5))
462
+
463
+ Args:
464
+ module (nn.Module): module to wrap (if in :func:`enable_wrap` context)
465
+ **wrap_overrides: configuration overrides that will take priority over
466
+ the values provided by the :func:`enable_wrap` context
467
+ """
468
+ if _ConfigAutoWrap.in_autowrap_context:
469
+ assert _ConfigAutoWrap.wrapper_cls is not None
470
+
471
+ wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides}
472
+ return _wrap(
473
+ module,
474
+ _ConfigAutoWrap.wrapper_cls,
475
+ **wrap_overrides,
476
+ )
477
+ return module
478
+
479
+
480
+ def _wrap(module: nn.Module, wrapper_cls: Callable, **kwargs) -> nn.Module:
481
+ assert wrapper_cls is not None
482
+ if hasattr(module, "_wrap_overrides"):
483
+ # If module has a _wrap_overrides attribute, we force overriding the
484
+ # FSDP config with these attributes for this module. Currently this
485
+ # is only used to disable mixed precision for BatchNorm when
486
+ # auto_wrapping.
487
+ overrides = {**kwargs, **module._wrap_overrides} # type: ignore[arg-type]
488
+ return wrapper_cls(module, **overrides)
489
+
490
+ return wrapper_cls(module, **kwargs)
491
+
492
+
493
+ def _recursive_wrap(
494
+ module: nn.Module,
495
+ auto_wrap_policy: Callable,
496
+ wrapper_cls: Callable,
497
+ ignored_modules: Set[nn.Module],
498
+ ignored_params: Set[nn.Parameter],
499
+ only_wrap_children: bool = False,
500
+ **kwargs: Any,
501
+ ) -> Tuple[nn.Module, int]:
502
+ """
503
+ Wraps submodules of ``module`` for which ``auto_wrap_policy`` returns
504
+ ``True`` with ``wrapper_cls``.
505
+
506
+ Args:
507
+ module (nn.Module): Module to recursively wrap.
508
+ auto_wrap_policy (Callable): A callable representing a policy that
509
+ determines which modules to recursively wrap with ``wrapper_cls``.
510
+ ignored_modules (Set[torch.nn.Module]): Modules to ignore when
511
+ wrapping.
512
+ ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
513
+ wrapping; these should be the parameters contained in the modules
514
+ in ``ignored_modules``.
515
+ Returns:
516
+ (nn.Module, int):
517
+ ``module`` after wrapping and the numel recursively wrapped.
518
+ """
519
+ assert auto_wrap_policy is not None, "Must specify auto_wrap_policy."
520
+ assert wrapper_cls is not None, "Must specify wrapper_cls"
521
+ # Make sure no child is already wrapped.
522
+ for _, child in module.named_modules():
523
+ if child in ignored_modules:
524
+ continue
525
+ try:
526
+ assert not isinstance(child, cast(type, wrapper_cls))
527
+ except TypeError:
528
+ # wrapper_cls is a function as opposed to a class type, just bypass above check.
529
+ pass
530
+
531
+ # We count all params, assuming none of them are already wrapped.
532
+ nonwrapped_numel = sum(
533
+ p.numel() for p in module.parameters() if p not in ignored_params
534
+ )
535
+
536
+ assert auto_wrap_policy is not None
537
+ if auto_wrap_policy(module=module, recurse=True, nonwrapped_numel=nonwrapped_numel):
538
+ total_wrapped_numel = 0
539
+ # Iterate through the children, recursively wrap if necessary
540
+ for name, child in module.named_children():
541
+ if child in ignored_modules:
542
+ continue
543
+ wrapped_child, num_wrapped_params = _recursive_wrap(
544
+ module=child,
545
+ auto_wrap_policy=auto_wrap_policy,
546
+ wrapper_cls=wrapper_cls,
547
+ ignored_modules=ignored_modules,
548
+ ignored_params=ignored_params,
549
+ **kwargs,
550
+ )
551
+ setattr(module, name, wrapped_child)
552
+ # Keep track of how many parameters have been wrapped
553
+ total_wrapped_numel += num_wrapped_params
554
+ # decide if we need to wrap the current module,
555
+ # since the left over parameters exceed the number of params to wrap
556
+ remainder = nonwrapped_numel - total_wrapped_numel
557
+ if not only_wrap_children and auto_wrap_policy(
558
+ module=module, recurse=False, nonwrapped_numel=remainder
559
+ ):
560
+ # Leaf node or final wrapping of the remainder both happen here.
561
+ return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel
562
+ else:
563
+ return module, total_wrapped_numel
564
+ return module, 0
565
+
566
+
567
+ class _ConfigAutoWrap:
568
+ """
569
+ Helper class to wrap modules based on default config args via a context manager.
570
+ See :func:`enable_wrap` for more information.
571
+ """
572
+
573
+ in_autowrap_context: bool = False # Context flag
574
+ wrapper_cls: Optional[Callable] = None # The wrapper class
575
+ kwargs: Dict[str, Any] = {} # Wrapper's args
576
+
577
+ def __init__(self, **kwargs: Dict[str, Any]):
578
+ self.kwargs = kwargs
579
+
580
+ @staticmethod
581
+ def enable_autowrap_context(kwargs: Any) -> None:
582
+ if _ConfigAutoWrap.in_autowrap_context:
583
+ raise NotImplementedError(
584
+ "You are already within an autowrap context and we currently do not supported nested autowrap."
585
+ )
586
+ _ConfigAutoWrap.in_autowrap_context = True
587
+ # Get and save the wrapper cls for the context.
588
+ assert (
589
+ "wrapper_cls" in kwargs.keys()
590
+ ), "Expected to pass in wrapper_cls arg into _ConfigAutoWrap."
591
+ _ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"])
592
+ del kwargs["wrapper_cls"]
593
+ # Save the rest.
594
+ _ConfigAutoWrap.kwargs = kwargs
595
+
596
+ @staticmethod
597
+ def disable_autowrap_context() -> None:
598
+ _ConfigAutoWrap.in_autowrap_context = False
599
+ _ConfigAutoWrap.wrapper_cls = None
600
+ _ConfigAutoWrap.kwargs = {}
601
+
602
+ def __enter__(self) -> None:
603
+ self.enable_autowrap_context(self.kwargs)
604
+
605
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
606
+ self.disable_autowrap_context()
venv/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.warn(
3
+ "torch.distributed.pipeline is deprecated. For up-to-date pipeline parallel "
4
+ "implementation, please refer to the PiPPy library under the PyTorch "
5
+ "organization (Pipeline Parallelism for PyTorch): "
6
+ "https://github.com/pytorch/PiPPy"
7
+ )
venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.65 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.21 kB). View file