diff --git a/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..70d2f4e6d0dd04df3ffff2c1583eb38ca4d2dec4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:168239d5c9986edf1477aea746d7a585af7fcb44ef2ba21b9a4af0fb3deb77c3 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..e32affe2053bb6406c2c328c6fab51ea80188e26 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d136286fc913d4d5c781d50ac52854c98d16f8eb91aa58e5c6fbf22ea3c989ac +size 33555533 diff --git a/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5bc98dbba5b8679c0f3a9eadf6762cfc5d21791e --- /dev/null +++ b/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f038fd7638035e27e4e966f0411a18d6a76603f60ad41884bb77024e3d4d123e +size 415237276 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e94ab1bb9d636d2ebe4d57ba8230eaf597c344ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py @@ -0,0 +1,52 @@ + +import sys +import torch + + +def is_available(): + return hasattr(torch._C, "_dist_autograd_init") + + +if is_available() and not torch._C._dist_autograd_init(): + raise RuntimeError("Failed to initialize torch.distributed.autograd") + +if is_available(): + from torch._C._distributed_autograd import ( + get_gradients, + backward, + _init, + _new_context, + _release_context, + _get_max_id, + _is_valid_context, + _retrieve_context, + _current_context, + _get_debug_info, + DistAutogradContext, + ) + + +class context: + ''' + Context object to wrap forward and backward passes when using + distributed autograd. The ``context_id`` generated in the ``with`` + statement is required to uniquely identify a distributed backward pass + on all workers. Each worker stores metadata associated with this + ``context_id``, which is required to correctly execute a distributed + autograd pass. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.distributed.autograd as dist_autograd + >>> with dist_autograd.context() as context_id: + >>> t1 = torch.rand((3, 3), requires_grad=True) + >>> t2 = torch.rand((3, 3), requires_grad=True) + >>> loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum() + >>> dist_autograd.backward(context_id, [loss]) + ''' + def __enter__(self): + self.autograd_context = _new_context() + return self.autograd_context._context_id() + + def __exit__(self, type, value, traceback): + _release_context(self.autograd_context._context_id()) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf462033774d2f84494644a04531a60803f8f18c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d887730f442f6d29e2ada45cfc068315257f9105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__init__.py @@ -0,0 +1,38 @@ +from ._flat_param import FlatParameter as FlatParameter +from .fully_sharded_data_parallel import ( + BackwardPrefetch, + CPUOffload, + FullOptimStateDictConfig, + FullStateDictConfig, + FullyShardedDataParallel, + LocalOptimStateDictConfig, + LocalStateDictConfig, + MixedPrecision, + OptimStateDictConfig, + OptimStateKeyType, + ShardedOptimStateDictConfig, + ShardedStateDictConfig, + ShardingStrategy, + StateDictConfig, + StateDictSettings, + StateDictType, +) + +__all__ = [ + "BackwardPrefetch", + "CPUOffload", + "FullOptimStateDictConfig", + "FullStateDictConfig", + "FullyShardedDataParallel", + "LocalOptimStateDictConfig", + "LocalStateDictConfig", + "MixedPrecision", + "OptimStateDictConfig", + "OptimStateKeyType", + "ShardedOptimStateDictConfig", + "ShardedStateDictConfig", + "ShardingStrategy", + "StateDictConfig", + "StateDictSettings", + "StateDictType", +] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31c2ba33cc2d7d02b18b3bb1ad4db557e99a342 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20430a398c4420fd7b3ca5893f70bdf89d52b790 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69b0bdb8c97003a352ebf87653018572a586a0b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_dynamo_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_dynamo_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3e9ee55aaa37f4109384055aedb204d0b2d2140 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_dynamo_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad63cc0c98fd0a90a07a2b53e36533295cd448ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba68de0f051b228763ff22422e30a7d690182d97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a36b0acf7c6c42f4a2890a36ad424230617027be Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de506004554ed3aa1587b8d0ff2b3f492e913945 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8978da3af4af5be8b45861d378b2fb870a42bd3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6765246ec97f5fdebf3cc97184779e50ff3b56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fccb2d99d24a84ec42deb42dc36a75c8a008ffe Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..759d3d3cd8505923ea3cc7339ec05c496a47c208 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_shard_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..304949c0edf2b7ebb851c1669ec1e1f422816b5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d21609e81242ab2af94c805a54ef36bbd4e0136d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..406a53ec6b5afb41c7d1b50edc1ea857b28cbb52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049ce0022e6f4ec5e7189998695e1f1a9e15484e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_wrap_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_wrap_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a364c615c028a638963cd5beb584501b2c063cfb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_wrap_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b71613cebc499fce33994ee4ca10fd2b7d5d0b6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/fully_sharded_data_parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/fully_sharded_data_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abc1ccc7c5f08a66afd49eb1027e92c85164f94d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/fully_sharded_data_parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6e9bad35c18e4ff0fae5d7ecc008fdf6bbb567a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/sharded_grad_scaler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/wrap.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/wrap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25e0f652099e7266ad6c85ba1ff1b65b08d0fdb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/wrap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..469a06d6dee4125a81f86227646ff2f29f86459c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py @@ -0,0 +1,563 @@ +""" +This file includes private common utilities for FSDP. +""" +import logging +import traceback +import warnings +import weakref +from enum import auto, Enum +from functools import partial +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterable, + List, + no_type_check, + Optional, + Set, + Tuple, + Type, + TYPE_CHECKING, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._flat_param as flat_param_file +import torch.nn as nn +from torch.distributed._composable_state import _get_module_state, _State +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + _CHECKPOINT_PREFIX, +) +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions +from torch.distributed.utils import _apply_to_tensors +from torch.utils._mode_utils import no_dispatch + +from .api import ( + FullOptimStateDictConfig, + FullStateDictConfig, + OptimStateDictConfig, + ShardingStrategy, + StateDictConfig, + StateDictType, +) + +if TYPE_CHECKING: + from ._flat_param import FlatParamHandle + +FSDP_WRAPPED_MODULE = "_fsdp_wrapped_module" +FSDP_PREFIX = FSDP_WRAPPED_MODULE + "." +FSDP_FLATTENED = "_fsdp_flattened" + +# Save a global mapping from module to its input tensor dtype to be populated +# during the forward pre-hook and consumed in the forward post-hook when +# overriding a module's mixed precision +# NOTE: We currently take the last input tensor's dtype in the case of multiple +# floating-point input tensors, which may be incorrect. However, since there is +# not a 1:1 correspondence between input and output tensors, we must use *some* +# heuristic like this to predict the desired output dtype. +_MODULE_TO_INP_DTYPE: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + + +class _FSDPDeviceHandle: + """ + This is a simple abstraction for FSDP computing devices, + which enables custom backends that implement CUDA-like + semantics to be integrated with FSDP. + """ + + def __init__(self, device: torch.device, backend: Any = None): + if backend is None: + try: + self.__backend = getattr(torch, device.type) + self.__device = device + except AttributeError as exc: + raise AttributeError( + f"Device '{device}' does not have a corresponding backend registered as 'torch.{device.type}'." + ) from exc + else: + self.__backend = backend + + @classmethod + def from_device(cls, device: torch.device) -> "_FSDPDeviceHandle": + """ + Return an device handle corresponding to the device, and through this handle, + operations with the same semantics as CUDA can be performed on the device. + Just return torch.cuda if the device is cuda to make attribute-access faster. + Custom backend must first register a module with the same name with {device.type} on torch. + """ + if device.type == "cuda": + return cast(_FSDPDeviceHandle, torch.cuda) + return cls(device) + + def __getattr__(self, __name: str) -> Any: + try: + return getattr(self.__backend, __name) + except AttributeError as exc: + raise AttributeError( + f"Custom backend '{self.__device.type}' not implement 'torch.{self.__device.type}.{__name}'" + ) from exc + + +class _UninitializedDeviceHandle(_FSDPDeviceHandle): + def __init__(self): + pass + + def __getattribute__(self, __name: str) -> Any: + raise RuntimeError("Trying to use an uninitialized device handle.") + + +class _FSDPState(_State): + def __init__(self) -> None: + # TODO: Move all the attributes to this class to enable typing for + # FSDP/fully_shard. + self._ignored_modules: Set[nn.Module] = set() + self._ignored_params: Set[nn.Parameter] = set() + # Buffer names are cleaned (without wrapper prefixes) + self._ignored_buffer_names: Set[str] = set() + self.process_group: Optional[dist.ProcessGroup] = None + self.rank: int = -1 + self.world_size: int = -1 + self._device_mesh: Optional[DeviceMesh] = None + self.sharding_strategy = ShardingStrategy.FULL_SHARD + self._use_orig_params: bool = False + self.training_state = TrainingState.IDLE + self._unshard_params_ctx: Dict[nn.Module, Generator] = {} + self._state_dict_type: StateDictType = StateDictType.FULL_STATE_DICT + self._state_dict_config: StateDictConfig = FullStateDictConfig() + self._optim_state_dict_config: OptimStateDictConfig = FullOptimStateDictConfig() + self._is_root: Optional[bool] = None + self._handle: Optional[flat_param_file.FlatParamHandle] = None + self._fully_sharded_module_to_handle: Dict[ + nn.Module, Optional[flat_param_file.FlatParamHandle] + ] = {} + self.compute_device: Optional[torch.device] = None + self._gradient_predivide_factor: int = 0 + self._gradient_postdivide_factor: int = 0 + self._comm_hook: Optional[Callable] = None + self._comm_hook_state: Optional[Any] = None + # Abstract device handle for fsdp compute device. For now, + # the compute device must implement cuda semantics used by fsdp + self._device_handle: _FSDPDeviceHandle = _UninitializedDeviceHandle() + # All following attributes should only be used for root states: + # Save these static lists to avoid the repeated tree traversals + self._all_fsdp_states: List[_FSDPState] = [] + self._all_handles: List[flat_param_file.FlatParamHandle] = [] + self._fsdp_extension: Optional[FSDPExtensions] = None + + +def _get_module_fsdp_state(module: nn.Module) -> Optional[_FSDPState]: + state = _get_module_state(module) + if state is None or not isinstance(state, _FSDPState): + return None + return state + + +def _get_module_fsdp_state_if_fully_sharded_module( + module: nn.Module, +) -> Optional[_FSDPState]: + state = _get_module_fsdp_state(module) + if state is None: + return None + if state == module: # FullyShardedDataParallel module case. + return state + if module in state._fully_sharded_module_to_handle: # fully_shard case. + return state + return None + + +class TrainingState(Enum): + """ + An enum that indicates the state of a ``FullyShardedDataParallel` instance. + """ + + IDLE = auto() + FORWARD_BACKWARD = auto() + SUMMON_FULL_PARAMS = auto() + + +class HandleTrainingState(Enum): + """ + An enum that indicates the state of a ``FlatParamHandle`. + """ + + IDLE = auto() + FORWARD = auto() + BACKWARD_PRE = auto() + BACKWARD_POST = auto() + SUMMON_FULL_PARAMS = auto() + + +def _is_composable(state: _FSDPState): + # TODO: This is a temporary hack for differentiate between code paths. + return not isinstance(state, nn.Module) + + +@no_type_check +def _module_handle(state: _FSDPState, module: nn.Module) -> Optional["FlatParamHandle"]: + """ + Returns the ``FlatParamHandle`` s corresponding to ``module``. This is + the handle that contains some parameter in ``module``. + """ + if _is_composable(state): + # A valid FSDP state may have no managed parameters and hence no + # handles, meaning no entry in `_fully_sharded_module_to_handles` + if state._handle is None: + return None + assert ( + module in state._fully_sharded_module_to_handle + ), f"Expects a fully sharded module but got {module} on rank {state.rank}" + return state._fully_sharded_module_to_handle[module] + else: + # NOTE: This assumes `module` is a `FullyShardedDataParallel` instance. + return module._handle + + +@no_type_check +def _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool: + """Returns if ``module`` has parameters managed by FSDP.""" + return _module_handle(state, module) is not None + + +def _get_sharding_strategy(handle): + """ + Returns the sharding strategy of the handle. + """ + return handle._sharding_strategy if handle else None + + +def clean_tensor_name(tensor_name: str) -> str: + """ + Cleans the parameter or buffer name by removing any module wrapper + prefixes. + """ + tensor_name = tensor_name.replace(FSDP_PREFIX, "") + # TODO: Explicitly replacing the checkpoint wrapper prefix is not ideal as + # it couples `CheckpointWrapper` and FSDP and also does not scale for more + # module wrappers. + tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, "") + return tensor_name + + +def _set_fsdp_flattened(tensor: torch.Tensor) -> None: + """ + Sets an attribute on ``tensor`` to mark it as flattened by FSDP. This is to + avoid re-flattening it during nested construction. + """ + setattr(tensor, FSDP_FLATTENED, True) + + +def _is_fsdp_flattened(tensor: torch.Tensor) -> bool: + """Returns if ``tensor`` has been marked as flattened by FSDP.""" + return getattr(tensor, FSDP_FLATTENED, False) + + +def _named_parameters_with_duplicates( + module: nn.Module, **kwargs: Any +) -> List[Tuple[str, nn.Parameter]]: + """ + This API is required as some modules overwrite `named_parameters()` but do not support + `remove_duplicate`. + """ + assert ( + "remove_duplicate" not in kwargs + ), "_named_parameters_with_duplicates cannot be used with `remove_duplicate` argument." + kwargs["remove_duplicate"] = False + try: + ret = list(module.named_parameters(**kwargs)) + except AssertionError as e: + kwargs.pop("remove_duplicate") + ret = list(module.named_parameters(**kwargs)) + return ret + + +def _get_param_to_fqns( + model: torch.nn.Module, + dedup_shared_params: bool = True, +) -> Dict[nn.Parameter, List[str]]: + """ + Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here, + we use canonical to mean the fully-qualified name assigned to the parameter + based on its position in the original nn.Module hierarchy before any wrapper + or parallelism has been applied to it. This is in contrast to FQNs that may be + generated after parallelisms or wrappers have been applied to the model. + + Each normal parameter maps to a singleton list containing its FQN, while each + ``FlatParameter`` maps to a list of its original parameter FQNs, which may + have length greater than one. All FQNs are prefixed starting from ``model``. + + In the case where FSDP was applied with ``use_orig_params=True``, there should be no + ``FlatParameter`` s registered to the model's modules and this mapping will only + contain mappings from ``nn.Parameter`` s to singleton FQN lists. + + It is only in the case where FSDP was applied with ``use_orig_params=False`` where + a ``FlatParameter`` will be registered in place of the original parameters and there + will be mappings from each ``FlatParameter`` to lists of FQNs corresponding to the + original parameters. + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance). + dedup_shared_params (bool): For shared parameters, if ``True``, only + includes the FQNs corresponding to the first encounter of the + shared parameter in the module traversal; if ``False``, then + includes the FQNs across all encounters. (Default: ``True``) + """ + + def module_fn(module, prefix, tree_level, param_to_fqns): + for param_name, param in _named_parameters_with_duplicates( + module, recurse=False + ): + local_fqns = ( + param._fqns + if isinstance(param, flat_param_file.FlatParameter) + else [param_name] + ) # prefixed from `module` + global_fqns = [ + clean_tensor_name(prefix + name) for name in local_fqns + ] # prefixed from the top level `model` (i.e. including `prefix`) + is_shared_param = param in param_to_fqns + if not is_shared_param: + param_to_fqns[param] = global_fqns + else: + if isinstance(param, flat_param_file.FlatParameter): + # DMP overwrites `named_parameters` and skip (advance to + # the next child module) the wrapped_module (e.g., + # _dmp_wrapped_module and _fsdp_wrapped_module). When a user + # calls `named_child` to traverse the module recursively and + # calls `named_parameters` with `recurse=False`, parameters + # will be traversed more than once. + # This hack is specified designed for DMP + FSDP. We + # overwrite the flat_parameters traversal result to only obtain + # the last one, which happens to be the correct one. + # + # TODO: Remove this hack once DMP + FSDP is not supported. + warnings.warn( + "FlatParameter is being traversed more than once. " + "This case should only happen when using " + "DistributedModelParallel with FullyShardedDataParallel." + ) + param_to_fqns[param] = global_fqns + elif not dedup_shared_params: + param_to_fqns[param].extend(global_fqns) + + def return_fn(param_to_fqns): + return param_to_fqns + + param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [key for key, _ in _named_parameters_with_duplicates(model)], + param_to_unflat_param_names, + ) + + +@no_type_check +def _log_post_backward_hook( + state: _FSDPState, handle: "FlatParamHandle", log: logging.Logger +) -> None: + # Under TORCH_DISTRIBUTED_DEBUG=INFO, log the module names this hook fires for. + # Below logging of module names this post-bwd hook fires for can help debug certain + # cases where hooks don't fire, such as under certain activation checkpoint configs. + if state._use_orig_params and handle._debug_level == dist.DebugLevel.INFO: + param_fqns = _get_handle_fqns_from_root(state, handle) + log.warning("FSDP firing post-backward hooks for parameters %s", param_fqns) + + +@no_type_check +def _get_handle_fqns_from_root( + state: _FSDPState, handle: "FlatParamHandle" +) -> Optional[List[str]]: + if handle is None: + return None + param_to_fqn = state._exec_order_data.param_to_fqn + handle_params = handle.flat_param._params # only populated for use_orig_params + param_fqns = [ + fqn for fqn_list in [param_to_fqn[p] for p in handle_params] for fqn in fqn_list + ] + return param_fqns + + +def _apply_to_modules( + root_module: torch.nn.Module, + module_fn: Callable, + return_fn: Callable, + filter_fqns: Optional[List[str]] = None, + *args, + **kwargs, +): + """ + Performs a pre-order traversal of the modules in the hierarchy rooted at + ``root_module``, applying ``module_fn`` at each module and finally + returning a value using ``return_fn``. The traversal constructs the full + module prefix name (e.g. "module.submodule." just like in model state dict) + and makes that available to ``module_fn``. + + ``filter_fqns`` is used because some module may have its own prefix similar + to ``FullyShardedDataParallel`` and the ``named_parameters()`` is overwritten + to remove the prefix. + """ + + def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs): + # Call the module function before recursing over children (pre-order) + module_fn(module, prefix, tree_level, *args, **kwargs) + for submodule_name, submodule in module.named_children(): + if submodule is None: + continue + new_prefix = prefix + submodule_name + "." + new_tree_level = tree_level + 1 + if filter_fqns is not None: + for fqn in filter_fqns: + if fqn.startswith(new_prefix): + break + else: + # DMP's named_parameter() will mess up the traversal with + # ``named_children`` + `named_parameter(recurse=False)``. + # This hack is a must to make the traversal work. + # TODO: Remove this hack once DMP + FSDP is not supported. + if ( + submodule_name == "_fsdp_wrapped_module" + or submodule_name == "_dmp_wrapped_module" + ): + if ( + not torch.distributed._functional_collectives.is_torchdynamo_compiling() + ): + # TODO(voz): Don't graph break on this + warnings.warn( + "An unexpected prefix is detected. This case " + " should only happen when using DMP with FSDP. " + f"prefix = {prefix}, " + f"submodule_name = {submodule_name}" + ) + new_prefix = prefix + elif submodule_name == "module": + warnings.warn( + "An unexpected prefix is detected. This case " + " should only happen when DDP wraps the outer " + " modules while FSDP wraps the inner ones." + f"prefix = {prefix}, " + f"submodule_name = {submodule_name}" + ) + new_prefix = prefix + f(submodule, new_prefix, new_tree_level, *args, **kwargs) + + f(root_module, "", 0, *args, **kwargs) + return return_fn(*args, **kwargs) + + +@no_type_check +def _assert_in_training_states( + state: _FSDPState, + training_states: List[TrainingState], +) -> None: + """Asserts that FSDP is in the states ``_training_states``.""" + # Raise a `ValueError` instead of using `assert` to ensure that these + # logical assertions run even if `assert`s are disabled + if state.training_state not in training_states: + msg = ( + f"expected to be in states {training_states} but current state is " + f"{state.training_state}" + ) + # Print the error on rank 0 in case this is called in the backward pass + if state.rank == 0: + if isinstance(state, nn.Module): + print(f"Asserting FSDP instance is: {state}") + print(f"ERROR: {msg}") + traceback.print_stack() + raise ValueError(msg) + + +def _get_root_modules(modules: Set[nn.Module]) -> Set[nn.Module]: + """ + Returns: + Set[nn.Module]: The subset of ``modules`` that are root modules (i.e. + parent-less) with respect to the modules in the set itself. In other + words, these are the modules in ``modules`` that are not the child of + any other module in ``modules``. + """ + root_modules: Set[nn.Module] = set() + module_to_submodules = {module: set(module.modules()) for module in modules} + for candidate_module in modules: + is_root_module = True + for module, submodules in module_to_submodules.items(): + is_child_module = ( + candidate_module is not module and candidate_module in submodules + ) + if is_child_module: + is_root_module = False + break + if is_root_module: + root_modules.add(candidate_module) + return root_modules + + +def _override_module_mixed_precision( + root: torch.nn.Module, + module_classes_to_override: Iterable[Type[nn.Module]], + wrap_override_dict: Dict[str, Any] = {"mixed_precision": None}, # noqa: B006 +) -> Set[Type[nn.Module]]: + module_classes_to_override = tuple(set(module_classes_to_override)) + # Return a set of the actually overridden module classes + overridden_module_classes: Set[Type[nn.Module]] = set() + for mod in root.modules(): + if isinstance(mod, module_classes_to_override): + overridden_module_classes.add(type(mod)) + mod._wrap_overrides = wrap_override_dict # type: ignore[assignment] + # TODO: We need to run this mixed precision ignored module in fp32, + # but ensure subsequent modules, that may possibly be running with + # mixed precision, still receive the appropriate precision inputs + # without user having to adjust mixed precision config too much. + # As a result, we attach pre and post forward hooks to up / down + # cast. We should revisit this design. + + def cast_fn( + dtype: torch.dtype, module: nn.Module, x: torch.Tensor + ) -> torch.Tensor: + if not torch.is_floating_point(x) or x.dtype == dtype: + return x + _MODULE_TO_INP_DTYPE[module] = x.dtype + return x.to(dtype) + + def forward_pre_hook(module, args): + return _apply_to_tensors(partial(cast_fn, torch.float32, module), args) + + def forward_post_hook(module, args, output): + # NOTE: If the forward did not have any floating-point tensors, + # then the dtype will not be set for this module, and we do not + # upcast the dtype. + if module in _MODULE_TO_INP_DTYPE: + old_dtype = _MODULE_TO_INP_DTYPE[module] + return _apply_to_tensors( + partial(cast_fn, old_dtype, module), output + ) + + # We intentionally append both of these hooks so that they run after + # all other hooks. + mod.register_forward_pre_hook(forward_pre_hook, prepend=False) + mod.register_forward_hook(forward_post_hook, prepend=False) + return overridden_module_classes + + +def _no_dispatch_record_stream(tensor: torch.Tensor, stream: torch.Stream) -> None: + # FIXME record_stream doesn't work with non-cuda tensors + if tensor.device.type not in ["cuda", torch._C._get_privateuse1_backend_name()]: + return + + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + return + # from @ezyang: + # The no_dispatch was added in https://github.com/pytorch/pytorch/pull/88014 cc @fegin + # Looking over the PR, it looks like this is because we don't actually support Stream arguments + # in torch dispatch, so it just chokes. + # If Dynamo is able to answer "are there any torch dispatch modes" active (it should answer False), + # a better version of this would just be to check if there are any modes before disabling dispatch. + # TODO(voz): Extend a dynamo util to answer the above, unify the codepaths here. + tensor.record_stream(stream) + else: + with no_dispatch(): + tensor.record_stream(stream) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_debug_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_debug_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4ed76476e56b6232ce8723cda6b6498219fc03b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_debug_utils.py @@ -0,0 +1,155 @@ +import logging +import time +from collections import defaultdict +from contextlib import contextmanager +from enum import Enum +from typing import Dict, Iterator, List, Set, Tuple + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._flat_param as flat_param_file +from torch.distributed.fsdp._common_utils import ( + _apply_to_modules, + _get_module_fsdp_state, + clean_tensor_name, +) + +logger = logging.getLogger(__name__) + + +class SimpleProfiler: + class Type(str, Enum): + ALL = "all" + ALLGATHER = "all_gather" + ALLGATHER_OBJ = "all_gather_object" + RESHARDING = "resharding" + H2D = "H2D" + D2H = "D2H" + + results: Dict[str, float] = defaultdict(float) + profiling: Set[str] = set() + + @classmethod + def reset(cls) -> None: + cls.results.clear() + cls.profiling.clear() + + @classmethod + @contextmanager + def profile(cls, profile_type: str) -> Iterator[None]: + assert profile_type not in cls.profiling, ( + f"{profile_type} is already being profiled. " + "SimpleProfiler does not support profiling multiple instances at " + "the same time. " + ) + + cls.profiling.add(profile_type) + begin = time.monotonic() + try: + yield + finally: + end = time.monotonic() + cls.results[profile_type] += end - begin + cls.profiling.remove(profile_type) + + @classmethod + def dump_and_reset(cls, msg: str) -> None: + # This cannot be combined with DETAIL distributed log + # as the profiling will be very incorrect. + if dist.get_rank() == 0 and dist.get_debug_level() == dist.DebugLevel.INFO: + logger.warning("%s %s", msg, cls.results) + cls.reset() + + +def _get_sharded_module_tree_with_module_name_to_fqns( + model: torch.nn.Module, +) -> Tuple[str, Dict[str, List[str]]]: + """ + It is used for composable fully_shard() code path, it returns + 1. sharded module tree info: each line reprents a submodule name that contats the + submodule's FQN and its submodule class name, if the submodule is sharded by `fully_shard`, + the submodule name will add a postfix with ' FULLY SHARDED'. Each increased tree + level adds 4 spaces before the printed name. A printed sharded module tree info for a toy model + is like this: + [CompositeModel] FULLY SHARDED + l1[Linear] + u1[UnitModule] FULLY SHARDED + u1.l1[Linear] + u1.seq[Sequential] + u1.seq.0[ReLU] + u1.seq.1[Linear] + u1.seq.2[ReLU] + u1.l2[Linear] + u2[UnitModule] FULLY SHARDED + u2.l1[Linear] + u2.seq[Sequential] + u2.seq.0[ReLU] + u2.seq.1[Linear] + u2.seq.2[ReLU] + u2.l2[Linear] + l2[Linear] + 2. a dict mapping from the concated module FQN and class name to a list of its managed + original parameters' FQNs. An example of the dict for the above toy sharded model is like this: + {'[CompositeModel]': ['l1.weight', 'l1.bias', 'l2.weight', 'l2.bias'], + 'u1[UnitModule]': ['u1.l1.weight', 'u1.l1.bias', 'u1.seq.1.weight', 'u1.seq.1.bias', 'u1.l2.weight', 'u1.l2.bias'], + 'u2[UnitModule]': ['u2.l1.weight', 'u2.l1.bias', 'u2.seq.1.weight', 'u2.seq.1.bias', 'u2.l2.weight', 'u2.l2.bias'] + } + All FQNs are prefixed starting from ``model``. + + Args: + model (torch.nn.Module): Root module (which may or may not be passed to + composable `fully_shard()`). + """ + + def module_fn( + module, prefix, tree_level, sharded_tree_info, sharded_module_name_to_fqns + ): + num_spaces = tree_level * 4 + trimed_prefix = ( + prefix[:-1] if (len(prefix) > 0 and prefix[-1] == ".") else prefix + ) + prefixed_module_name = trimed_prefix + "[" + module.__class__.__name__ + "]" + printed_prefixed_module_name = " " * num_spaces + prefixed_module_name + + state = _get_module_fsdp_state(module) + if state is None: + sharded_tree_info[0] += printed_prefixed_module_name + "\n" + return + + handle = state._fully_sharded_module_to_handle.get(module, None) + + if handle: + sharded_tree_info[0] += ( + printed_prefixed_module_name + " FULLY SHARDED" + "\n" + ) + else: + sharded_tree_info[0] += printed_prefixed_module_name + "\n" + + if handle: + param = handle.flat_param + assert isinstance(param, flat_param_file.FlatParameter) + global_fqns = [ + clean_tensor_name(prefix + name) for name in param._fqns + ] # prefixed from the top level `model` (i.e. including `prefix`) + + if prefixed_module_name in sharded_module_name_to_fqns: + sharded_module_name_to_fqns[prefixed_module_name].extend(global_fqns) + else: + sharded_module_name_to_fqns[prefixed_module_name] = global_fqns + + def return_fn(sharded_tree_info, sharded_module_name_to_fqns): + return sharded_tree_info[0], sharded_module_name_to_fqns + + # Use List to mutate its value in place while running the recursive functions + sharded_tree_info: List[str] = [ + "", + ] + sharded_module_name_to_fqns: Dict[str, List[str]] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [key for key, _ in model.named_parameters()], + sharded_tree_info, + sharded_module_name_to_fqns, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6c63dc5af8bbcb202787eb57a95625bd597e96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py @@ -0,0 +1,45 @@ +from typing import Set + +import torch.nn as nn + + +def _annotate_modules_for_dynamo( + module: nn.Module, + ignored_modules: Set[nn.Module], + use_orig_params: bool, +): + """ + Annotates the submodules in ``module`` 's tree, except those in + ``ignored_modules``, indicating that the submodules are FSDP-managed and + saving the ``use_orig_params`` setting passed to the FSDP constructor. + """ + for submodule in module.modules(): + if submodule not in ignored_modules: + """[note: Dynamo treats FSDP wrapped modules as UnspecializedNNModule] + + Dynamo doesn't get to see this instance (FullyShardedDataParallel) during tracing, since + it skips tracing all the torch.distributed.fsdp code. + - Why? Running the FSDP code eagerly avoids lots of issues trying to trace complex hooks, and also + gets us graph-breaks on FSDP module boundaries which we want anyway for comm ops. + - However, we _also_ want dynamo to treat the wrapped module inside FSDP 'unspecially' (*), + and we need a way to indicate to dynamo which modules are wrapped by FSDP. + + (*) UnspecializedNNModules in dynamo are traced-through without any assumptions, and with thorough + guards. NNModules otherwise are 'specialized', meaning there is less overhead due to assuming + their code is well-behaved. + + One particular issue with specialized NNModules for FSDP is that the + views created for orig_params are captured into the compiled graph on the first iteration, and while + they are always going to point to the correct flatparameter and give correct results, their order + of creation influences the order of backward execution, preventing overlap of comm and computation + during backward. We need to _use_ the new parameter views created on each forward iteration, in + order for backward to interleave hooks with compute per layer. UnspecializedNNModule lets us achieve + this by capturing the module code more 'functionally' and passing parameters in as inputs each time. + """ + submodule._is_fsdp_managed_module = True # type: ignore[assignment] + + # Dynamo only supports FSDP with use_orig_params=True. + # This is hacky, but I could not think of another way to add an assertion to dynamo + # for this, since Dynamo skips all the FSDP code frames and thus can't inspect the + # FSDP module directly + submodule._fsdp_use_orig_params = use_orig_params # type: ignore[assignment] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_exec_order_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_exec_order_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba2a43c059659a47c1fb25928dda8a142311b7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_exec_order_utils.py @@ -0,0 +1,364 @@ +import itertools +import warnings +from enum import auto, Enum +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed.fsdp._common_utils import _FSDPState, _get_param_to_fqns +from torch.distributed.fsdp._flat_param import FlatParamHandle + + +class _ExecOrderWarnStatus(Enum): + """Used internally for execution order validation.""" + + NONE = auto() # no deviation yet + WARNING = auto() # deviated this iteration; currently issuing warnings + WARNED = auto() # deviated in a previous iteration + + +class _ExecOrderData: + """ + This contains the data structures to track the execution order. We track + the pre-forward order on the *first* iteration for forward prefetching + (which thus assumes static graph) and the post-forward order on *every* + iteration for backward prefetching (which thus does not assume static + graph but may be provide an incorrect order). + """ + + def __init__( + self, + debug_level: dist.DebugLevel, + backward_prefetch_limit: int, + forward_prefetch_limit: int, + ) -> None: + # Tracks the (static) pre-forward order for execution order validation + # and forward prefetching + self.handles_pre_forward_order: List[FlatParamHandle] = [] + # Tracks the post-forward order for pre-backward prefetching + self.handles_post_forward_order: List[Optional[FlatParamHandle]] = [] + self._iter = 0 + + # Gives the max number of backward/forward prefetched all-gathers by a + # single module + self._backward_prefetch_limit = backward_prefetch_limit + self._forward_prefetch_limit = forward_prefetch_limit + + # Data structures for execution order validation + self._checking_order: bool = debug_level == dist.DebugLevel.DETAIL + self.process_group: Optional[dist.ProcessGroup] = None + self.world_size: Optional[int] = None + self.all_handles: List[FlatParamHandle] = [] + # Names are prefixed from the root module + self.param_to_fqn: Dict[nn.Parameter, List[str]] = {} + # Current index in the pre-forward execution order + self.current_order_index = 0 + self.warn_status = _ExecOrderWarnStatus.NONE + + def init( + self, + state: _FSDPState, + root_module: nn.Module, + process_group: dist.ProcessGroup, + ) -> None: + """ + Initializes the data structures needed for checking the forward order. + This should be called after a root FSDP instance has been set during + lazy initialization. + """ + self.process_group = process_group + self.rank = process_group.rank() + self.world_size = process_group.size() + # Fix an order over the handles, which should be the same across ranks + for handle in traversal_utils._get_fsdp_handles(root_module): + index = len(self.all_handles) + self.all_handles.append(handle) + handle._handle_index = index + self.param_to_fqn = _get_param_to_fqns(root_module) + # TODO (awgu): We can broadcast the metadata of rank 0's `all_handles` + # to check that all ranks have the same handles in the same order. + # https://github.com/pytorch/pytorch/issues/79620 + + @property + def is_first_iter(self) -> bool: + return self._iter == 0 + + def get_handle_to_backward_prefetch( + self, + current_handle: FlatParamHandle, + ) -> Optional[FlatParamHandle]: + """ + Returns a :class:`list` of the handles keys of the handles to backward + prefetch given the current handles key. If there are no valid handles + keys to prefetch, then this returns an empty :class:`list`. + """ + current_index = current_handle._post_forward_index + if current_index is None: + return None + target_index = current_index - 1 + target_handle: Optional[FlatParamHandle] = None + for _ in range(self._backward_prefetch_limit): + if target_index < 0: + break + target_handle = self.handles_post_forward_order[target_index] + target_index -= 1 + return target_handle + + def get_handle_to_forward_prefetch( + self, + current_handle: FlatParamHandle, + ) -> Optional[FlatParamHandle]: + """ + Returns a :class:`list` of the handles keys of the handles to forward + prefetch given the current handles key. If there are no valid handles + keys to prefetch, then this returns an empty :class:`list`. + """ + current_index = current_handle._pre_forward_order_index + if current_index is None: + return None + target_index = current_index + 1 + target_handle: Optional[FlatParamHandle] = None + for _ in range(self._forward_prefetch_limit): + if target_index >= len(self.handles_pre_forward_order): + break + target_handle = self.handles_pre_forward_order[target_index] + target_index += 1 + return target_handle + + def record_post_forward(self, handle: Optional[FlatParamHandle]) -> None: + """ + Records ``handles`` in the post-forward order, where ``handles`` should + be a group of handles used in the same module's forward. If ``handles`` + is empty, then it is omitted. + + Unlike :meth:`record_pre_forward`, this records the order *every* + iteration with the expectation that the recorded order is reset in + :meth:`next_iter`. + """ + if not handle: + return + # Only record the first usage of a handles key + if handle._post_forward_index: + self.handles_post_forward_order.append(handle) + return + index = len(self.handles_post_forward_order) + handle._post_forward_index = index + self.handles_post_forward_order.append(handle) + + def record_pre_forward( + self, handle: Optional[FlatParamHandle], is_training: bool + ) -> None: + """ + Records ``handles`` in the pre-forward order, where ``handles`` should + be a group of handles used in the same module's forward. If ``handles`` + is empty, then it is omitted. + + On the first iteration, this checks the execution order across ranks. + See :meth:`_check_order` for details. + """ + if not handle: + return + self._check_order(handle, is_training) + # Fix the order after the first iteration and only record the first + # usage of a handles key + if not self.is_first_iter or handle._pre_forward_order_index is not None: + return + index = len(self.handles_pre_forward_order) + handle._pre_forward_order_index = index + self.handles_pre_forward_order.append(handle) + + def _check_order(self, handle: FlatParamHandle, is_training: bool) -> None: + """ + Checks the forward execution order as long as ``is_training`` is + ``True`` since checking in eval mode is not supported. This only checks + if the distributed debug level is DETAIL. + + - On the first iteration, this uses all-gathers to check that all ranks + are all-gathering the same handles and hence ``FlatParameter`` s, + raising an error if not. + - On subsequent iterations, this checks that each rank is locally + consistent with its own forward order from the first iteration, issuing + a warning if not. This issues a warning on the first deviating + iteration and stops warning thereafter. + """ + # Do not check order in eval mode since the post-backward callback does + # not run so it cannot be used to mark the end of an iteration + if not is_training or not self._checking_order: + return + if self.is_first_iter: + msg_prefix = "Forward order differs across ranks:" + optional_local_indices: Tuple[ + Optional[int], ... + ] = self._get_handle_indices(handle) + device = handle.device # guaranteed to be non-CPU + num_valid_indices = sum( + (index is not None) for index in optional_local_indices + ) + tensor_kwargs: Dict[str, Union[torch.dtype, torch.device]] = { + "dtype": torch.int32, + "device": device, + } + world_num_valid_indices = torch.zeros(self.world_size, **tensor_kwargs) # type: ignore[arg-type, call-overload] + local_num_valid_indices = torch.tensor([num_valid_indices], **tensor_kwargs) # type: ignore[arg-type, call-overload] + dist.all_gather_into_tensor( + world_num_valid_indices, + local_num_valid_indices, + group=self.process_group, + ) + # Copy entire tensor from D2H once to avoid per element D2H copies + world_num_valid_indices = world_num_valid_indices.cpu() + # Check that all ranks plan to all-gather the same number of + # parameters + # TODO (awgu): Since every module has at most one handle in the + # current implementation, this should never raise the error. + assert self.world_size is not None # mypy + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + # TODO(voz): Don't graph break on this - dynamo hates the n1 != n2 + # tensor comparison control flow. + # https://github.com/pytorch/pytorch/issues/107055 + for (r1, n1), (r2, n2) in itertools.combinations( + ( + (rank, world_num_valid_indices[rank]) + for rank in range(self.world_size) + ), + 2, + ): + if n1 != n2: + raise RuntimeError( + f"{msg_prefix} rank {r1} is all-gathering {n1} parameters " + f"while rank {r2} is all-gathering {n2} parameters" + ) + world_indices = torch.zeros( # type: ignore[call-overload] + self.world_size * num_valid_indices, **tensor_kwargs + ) + local_indices = torch.tensor(optional_local_indices, **tensor_kwargs) # type: ignore[arg-type] + dist.all_gather_into_tensor( + world_indices, local_indices, group=self.process_group + ) + # Copy entire tensor from D2H once to avoid per element D2H copies + world_indices = world_indices.cpu() + # Check that all ranks plan to all-gather the same index parameters + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + # TODO(voz): Don't graph break on this - dynamo hates the i1 != i2 + # tensor comparison control flow. + # https://github.com/pytorch/pytorch/issues/107055 + for (r1, i1), (r2, i2) in itertools.combinations( + ( + ( + rank, + world_indices[ + rank + * num_valid_indices : (rank + 1) + * num_valid_indices + ], + ) + for rank in range(self.world_size) + ), + 2, + ): + if i1 != i2: + r1_param_names = self._get_names_from_handle_indices(i1) + r2_param_names = self._get_names_from_handle_indices(i2) + raise RuntimeError( + f"{msg_prefix} rank {r1} is all-gathering parameters " + f"for {r1_param_names} while rank {r2} is all-gathering " + f"parameters for {r2_param_names}" + ) + else: + # Only issue warnings on the first deviating iteration and stop + # checking thereafter to avoid flooding the console + if self.warn_status == _ExecOrderWarnStatus.WARNED: + return + msg_prefix = None # non-`None` means we should warn + if self.current_order_index >= len(self.handles_pre_forward_order): + # This iteration sees extra all-gather(s) compared to the first + msg_prefix = ( + "Expected to not all-gather any more parameters in the " + "forward but trying to all-gather parameters for " + ) + else: + expected_handle = self.handles_pre_forward_order[ + self.current_order_index + ] + if expected_handle != handle: + expected_param_names = self._get_names_from_handles(expected_handle) + msg_prefix = ( + f"Expected to all-gather for {expected_param_names} " + "but trying to all-gather parameters for " + ) + if msg_prefix is not None: + param_names = self._get_names_from_handles(handle) + msg_suffix = ( + f"{param_names}" + if param_names + else "a newly-added parameter since construction time" + ) + warnings.warn( + "Forward order differs from that of the first iteration " + f"on rank {self.rank}. Collectives are unchecked and may " + f"give incorrect results or hang.\n{msg_prefix}{msg_suffix}" + ) + self.warn_status = _ExecOrderWarnStatus.WARNING + self.current_order_index += 1 + + def _get_handle_indices( + self, + handle: FlatParamHandle, + ) -> Tuple[Optional[int], ...]: + """ + Returns the handle indices (i.e. indices into ``self.all_handles``) + corresponding to the handles in ``handle``. An entry in the + returned tuple is ``None`` if the handle is invalid. + """ + indices: List[Optional[int]] = [] + if handle: + indices.append(handle._handle_index) + return tuple(indices) + + def _get_names_from_handle_indices( + self, + handle_indices: Tuple[int, ...], + ) -> List[List[str]]: + """ + Returns a list of FQNs for each handle in ``handle_indices``. If a + handle index is invalid, then its FQNs are omitted from the returned + list. + """ + fqns: List[List[str]] = [] + for index in handle_indices: + if index is None or index < 0 or index >= len(self.all_handles): + continue + handle = self.all_handles[index] + flat_param = handle.flat_param + fqns.append(self.param_to_fqn[flat_param]) + return fqns + + def _get_names_from_handles( + self, + handle: FlatParamHandle, + ) -> List[List[str]]: + """ + Returns a list of FQNs for each handle in ``handles_key``. If a handle + is invalid, then its FQNs are omitted from the returned list. + """ + fqns: List[List[str]] = [] + if handle: + flat_param = handle.flat_param + if flat_param in self.param_to_fqn: + fqns.append(self.param_to_fqn[flat_param]) + return fqns + + def next_iter(self): + """ + Advances the internal data structures per iteration. This should be + called in the post-backward callback since that marks the true end of + an iteration. + """ + self._iter += 1 + self.handles_post_forward_order.clear() + if self._checking_order: + self.current_order_index = 0 + if self.warn_status == _ExecOrderWarnStatus.WARNING: + self.warn_status = _ExecOrderWarnStatus.WARNED diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py new file mode 100644 index 0000000000000000000000000000000000000000..c6806e47a5ca78f561df7b85012ba2faf65ca36d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py @@ -0,0 +1,2723 @@ +import contextlib +import functools +import logging +import os +import warnings +from enum import auto, Enum +from itertools import accumulate, chain +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterator, + List, + NamedTuple, + no_type_check, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.distributed.fsdp._common_utils import ( + _FSDPDeviceHandle, + _named_parameters_with_duplicates, + _no_dispatch_record_stream, + _set_fsdp_flattened, + HandleTrainingState, +) +from torch.distributed.utils import ( + _alloc_storage, + _data_ptr_allocated, + _free_storage, + _p_assert, +) +from torch.nn.parameter import _ParameterMeta # type: ignore[attr-defined] +from torch.testing._internal.distributed.fake_pg import FakeProcessGroup + +from ._fsdp_extensions import ( + _ext_post_unflatten_transform, + _ext_pre_flatten_transform, + FSDPExtensions, +) + +__all__ = [ + "FlatParameter", + "FlatParamHandle", + "FlatParamShardMetadata", + "ParamInfo", + "SharedParamInfo", + "HandleShardingStrategy", +] + +log = logging.getLogger(__name__) + + +""" +[Note: Fully Sharded Module] +We define the "fully sharded module" to be the original ``nn.Module`` that owns +a ``FlatParamHandle``. It is the *single* module logically responsible for the +*single* unshard/reshard pair for the handle's ``FlatParameter`` for a given +forward or backward pass. The fully sharded module should be passed to the +``FlatParamHandle`` constructor. + +For the wrapper code path: +- The ``FullyShardedDataParallel`` module wrapping the fully sharded module +runs the unshard/reshard on behalf of the fully sharded module by overriding +``nn.Module.forward``. +- The fully sharded module is exactly the module passed to the +``FullyShardedDataParallel`` constructor's ``module`` argument. + +For the non-wrapper code path: +- Hooks registered on the fully sharded module run the unshard/reshard. +- The fully sharded module may either be the direct argument to ``fully_shard`` +or a submodule chosen by the provided wrapping policy. +""" + +# Environment variable toggling whether to use unsafe `setattr()` for view +# setting in `_use_sharded_views()` and `_use_unsharded_views()` +# We should use 'safe' by default since it respects method overrides, but for +# special cases such as for high CPU overhead or for intentionally bypassing +# checks in the overrides, we may use 'unsafe'. +_FSDP_USE_UNSAFE_SETATTR = "FSDP_USE_UNSAFE_SETATTR" + +# Environment variable toggling whether to check for parameter/gradient +# writeback in case their storages change after FSDP initialization +# We should check by default since it prevents silent correctness errors, but +# since such changes are atypical, we may want to skip the check to save CPU +# overhead, especially since the check happens in the pre-forward and +# pre-backward each iteration. +_FSDP_SKIP_WRITEBACK_CHECK = "FSDP_SKIP_WRITEBACK_CHECK" + +# Env var toggling whether when model is in .eval() mode, should we run in fp32 +# or the reduced precision. +_FSDP_USE_FULL_PREC_IN_EVAL = "FSDP_USE_FULL_PREC_IN_EVAL" + +# Some value to set padding in tensors to for debuggability +_FLAT_PARAM_PADDING_VALUE = 42 + +# Environment variables for disabling the all-gather and reduce-scatter +# communication ops for ablation studies. Note that without these communication +# ops the training won't converge, and you probably need to disable correctness +# checks in your model. +_FSDP_USE_FAKE_ALL_GATHER = "FSDP_USE_FAKE_ALL_GATHER" +_FSDP_USE_FAKE_REDUCE = "FSDP_USE_FAKE_REDUCE" + + +# TODO: Define this for now to avoid circular imports. See if we can remove. +class HandleShardingStrategy(Enum): + FULL_SHARD = auto() + SHARD_GRAD_OP = auto() + NO_SHARD = auto() + HYBRID_SHARD = auto() + _HYBRID_SHARD_ZERO2 = auto() + + +RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES = ( + HandleShardingStrategy.FULL_SHARD, + HandleShardingStrategy.HYBRID_SHARD, +) +NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES = ( + HandleShardingStrategy.SHARD_GRAD_OP, + HandleShardingStrategy._HYBRID_SHARD_ZERO2, +) + + +class ParamInfo(NamedTuple): + """Information for an original parameter.""" + + param_name: str # unprefixed + module: nn.Module + module_name: str + + +class SharedParamInfo(NamedTuple): + """ + Additional information for a shared parameter. + + For each shared parameter, we designate one module and its parameter + variable to be the primary owner, determined as the first one encountered + in the parameter walk. These are prefixed with "prim". The primary module + and parameter do not have their own :class:`SharedParamInfo` instance. + """ + + param_name: str # unprefixed + module: nn.Module + module_name: str + prim_param_name: str # unprefixed + prim_module: nn.Module + prim_module_name: str + + +class _ShardParamInfo(NamedTuple): + """Shard-related information for an original parameter.""" + + in_shard: bool + # Use to index into the sharded flat parameter, e.g. + # `flat_param[offset_in_shard : offset_in_shard + numel_in_shard]` + offset_in_shard: Optional[int] + numel_in_shard: Optional[int] + # Use to get part of the parameter in the local shard from a flattened + # version of the unsharded parameter, e.g. + # `param.flatten()[intra_param_start_idx : intra_param_end_idx + 1]` + intra_param_start_idx: Optional[int] + intra_param_end_idx: Optional[int] # inclusive + + +class FlatParamShardMetadata(NamedTuple): + """ + This holds metadata specific to this rank's shard of the flat parameter. + + Attributes: + param_names (Tuple[str, ...]): Prefixed parameter names of this rank's + shard of the parameters; see :class:`FlatParameter`. + param_shapes (Tuple[torch.Size, ...]): Parameter shapes of this rank's + shard of the parameters; see :class:`FlatParameter`. + param_numels (Tuple[int, ...]): Parameter numels of this rank's shard + of the parameters; see :class:`FlatParameter`. + param_offsets (Tuple[Tuple[int, int], ...]): [start, end] offsets (in + units of numels) giving this rank's part of each flattened + original parameter. + """ + + param_names: Tuple[str, ...] + param_shapes: Tuple[torch.Size, ...] + param_numels: Tuple[int, ...] + param_offsets: Tuple[Tuple[int, int], ...] + + +class _FlatParameterMeta(_ParameterMeta): + # Make `isinstance(t, FlatParameter)` return True for custom tensor + # instances that have the _is_flat_param flag for BC + def __instancecheck__(self, instance): + # NB: do NOT test the super implementation + return isinstance(instance, torch.Tensor) and getattr( + instance, "_is_flat_param", False + ) + + +class FlatParameter(nn.Parameter, metaclass=_FlatParameterMeta): + """ + This is the flat parameter used by :class:`FullyShardedDataParallel`. + + It is comprised of one or more original parameters, which are flattened and + concatenated to construct the flat parameter. + + Under the current design, this parameter logically represents both the + unsharded and sharded flat parameter, and its data changes storages + dynamically. + - In the :class:`FullyShardedDataParallel` constructor, the parameter + is initialized as unsharded and then sharded in-place. + - At runtime, the parameter is lazily (re)-initialized. The sharded + parameter data is saved in ``self._local_shard``, and a new ``Tensor`` + ``self._full_param_padded`` is created, which is the all-gather + destination and owns the unsharded parameter storage thereafter. (See + :meth:`FlatParamHandle.init_flat_param_attributes`.) + - Throughout runtime, the parameter data changes storages as needed, + e.g. to the sharded flat parameter, low precision sharded flat + parameter, or the unsharded flat parameter. + + NOTE: Since ``use_orig_params=True`` supports intra-``FlatParameter`` + padding, we have two versions of the per-parameter numels, one that + includes the padding (``_numels_with_padding``) and one that does not + (``_numels``). The former may have length longer than the other data + structures, while the latter has the same length as the number of actual + original parameters like the other per-parameter data structures. + + NOTE: This is not a real class; instead, you will always get a Parameter + back out if you try to create one of these. This is similar to the trick + we implemented for Parameter to get it to work with subclasses; this + is primarily so that FlatParameter supports combination with FakeTensor. + + Attributes: + _unpadded_unsharded_size (torch.Size): Unsharded flat parameter's size + without right-hand-side padding for divisibility by the world size. + For ``use_orig_params=True``, this includes alignment padding. + _padded_unsharded_size (torch.Size): Unsharded flat parameter's size + with right-hand-side padding for divisibility by the world size. + For ``use_orig_params=True``, this includes alignment padding. This + is only set for sharded strategies since they require padding for + the all-gather. + _sharded_size (torch.Size): Sharded flat parameter's size with padding. + This is also set for ``NO_SHARD``, in which case it is the same as + the unsharded sizes. (We omit "padded" because there is no + analogous unpadded one.) + + _num_params (int): Number of original parameters flattened into this + flat parameter. This is the length of the per-parameter data + structures. + _param_infos (Tuple[ParamInfo, ...]): Each parameter's parameter info + entry; see :class:`ParamInfo` for details. + _shapes (Tuple[torch.Size, ...]): Each parameter's original shape. + _fqns (Tuple[str, ...]): Each parameter's fully-qualified name (FQN) + prefixed from the ``_fully_sharded_module``. The names are + guaranteed to be unique in the subtree rooted at that module. + _param_extensions (Tuple[Optional[Any], ...]): Each parameter's + extension (i.e. some per-parameter state) used to customize + pre-flatten and post-unflatten behavior or ``None``. This is + experimental, and users should not depend on its existence in the + future. + _numels_with_padding (Tuple[int, ...]): Each parameter's numel + including entries for the padding. This is used to construct views + into the flat parameter via ``torch.split()``. This may have length + longer than ``_num_params``. + _numels (Tuple[int, ...]): Each parameter's numel excluding entries for + padding. This has length equal to ``_num_params``. + _shard_param_infos (Tuple[_ShardParamInfo, ...]): Each parameter's + shard parameter info; see :class:`_ShardParamInfo` for details. + _shared_param_infos (Tuple[SharedParamInfo, ...]): Shared parameter + info entries; see :class:`SharedParamInfo` for details. + _modules (Set[nn.Module]): Modules that contain some original parameter + that is flattened into the flat parameter. + + _shard_numel_padded (int): Numel padded for this rank's sharded flat + parameter. + _local_shard (Tensor): Sharded flat parameter with padding if using a + sharded strategy. If using ``NO_SHARD``, then this is the unpadded + unsharded flat parameter, and there is no notion of a sharded flat + parameter or padded unsharded flat parameter. + _full_param_padded (Tensor): Unsharded flat parameter with padding. + This is not defined for ``NO_SHARD``. When using mixed precision + for parameters, this has the low precision. + _full_prec_full_param_padded (Tensor): Full precision unsharded flat + parameter with padding. This is used for unsharding outside of + computation when using mixed precision for parameters. This is + never defined for ``NO_SHARD``. + _post_backward_hook_handle (RemovableHandle): + Flat parameter's post-backward hook handle. (Compile only) + _post_backward_hook_state (Tuple[AccumulateGrad, RemovableHandle]): + Flat parameter's :class:`AccumulateGrad` object and post-backward + hook handle. (Eager only) + _mp_shard (Tensor): Low precision sharded flat parameter with padding. + This is only defined when parameter mixed precision is enabled. For + ``NO_SHARD``, this is used for computation. + _cpu_grad (Tensor): Sharded gradient with padding stored on CPU. + This is only defined when offloading parameters is enabled. + _saved_grad_shard (Tensor): Sharded gradient with padding from previous + iterations for gradient accumulation without :meth:`no_sync`. + + _params (Optional[List[nn.Parameter]]): If ``use_orig_params=True``, + then each original parameter variable; otherwise, ``None``. This + does not include any padding tensors. + _shared_params (Optional[List[nn.Parameter]]): The original shared + parameter variables if ``use_orig_params=True`` and ``None`` + otherwise. + _tensors (Optional[List[Optional[Tensor]]]): This saves the ``Tensor`` + views created in the forward and tracked by autograd when + ``use_orig_params=True`` and is ``None`` otherwise. This is to + preserve those ``Tensor`` variables for the backward to ensure that + the ``FlatParameter`` 's ``AccumulateGrad`` object does not change + in which case the post-backward hook does not run. This is relevant + for cases like reentrant activation checkpointing. + _is_grad_none_mask (Optional[List[bool]]): If ``use_orig_params=True``, + a mask over the original parameters' gradients indicating if it is + logically ``None`` or not; otherwise, ``None``. This does not + include entries for padding. This mask is needed because only some + of the parameters may have ``None`` gradient, in which case the + flat gradient must be non-``None`` and must use zeros to + approximate those original ``None`` gradients. This mask informs + FSDP to set the original parameter gradients to ``None`` (instead + of zeros) as needed. + """ + + _unpadded_unsharded_size: torch.Size + _padded_unsharded_size: torch.Size + _sharded_size: torch.Size + _num_params: int + _param_infos: Tuple[ParamInfo, ...] + _shapes: Tuple[torch.Size, ...] + _fqns: Tuple[str, ...] + _param_extensions: Tuple[Optional[Any], ...] + _numels_with_padding: Tuple[int, ...] + _numels: Tuple[int, ...] + _shard_param_infos: Tuple[_ShardParamInfo, ...] + _shared_param_infos: Tuple[SharedParamInfo, ...] + _modules: Set[nn.Module] + _shard_numel_padded: int + _local_shard: Tensor + _full_param_padded: Tensor + _full_prec_full_param_padded: Tensor + # Eager only + _post_backward_hook_state: Tuple[Any, Any] + # Compile only + _post_backward_hook_handle: Any + _mp_shard: Tensor + _cpu_grad: Tensor + _saved_grad_shard: Tensor + _params: Optional[List[nn.Parameter]] + _shared_params: Optional[List[nn.Parameter]] + _tensors: Optional[List[Optional[Tensor]]] + _is_grad_none_mask: Optional[List[bool]] + + _is_padding_mask: List[bool] + + def __new__(cls, data=None, requires_grad=True): + assert cls is FlatParameter, "subclasses FlatParameter not supported" + r = nn.Parameter.__new__(nn.Parameter, data, requires_grad) # type: ignore[call-arg] + r._is_flat_param = True # type: ignore[attr-defined] + return r + + # NB: This is not a regular method, because FlatParameters are not actually + # instances of this class (see __new__ above). So you must indirectly + # call this directly through the classmethod. + @classmethod + def _init_metadata( + cls, + self, + param_infos: List[ParamInfo], + numels: List[int], + shapes: List[torch.Size], + fqns: List[str], + shared_param_infos: List[SharedParamInfo], + param_extensions: List[Optional[Any]], + params: Optional[List[nn.Parameter]], + shared_params: Optional[List[nn.Parameter]], + is_padding_mask: List[bool], + ) -> None: + """ + Initialize attributes holding metadata about the original parameters comprising the flat parameter. + + We expose this method separate from the constructor to keep the + constructor only responsible for the flat parameter's tensor data. This + method should only be called once per model, while the constructor may + be called multiple times, e.g. when reloading from a checkpoint, in + which case only the tensor data needs to be passed to the constructor. + Since :meth:`load_state_dict` is implemented via :meth:`copy_`, the + metadata is correctly assumed to be unchanged. + + Args: + See the Attributes in the class docstring. + """ + assert len(param_infos) == len(shapes) + assert len(param_infos) == len(fqns) + assert len(param_infos) == len(param_extensions) + self._num_params = len(param_infos) + self._param_infos = param_infos + self._shapes = shapes + self._fqns = fqns + self._param_extensions = param_extensions + self._is_padding_mask = is_padding_mask + + numels_without_padding: List[int] = [] + for numel, is_padding in zip(numels, is_padding_mask): + if not is_padding: + numels_without_padding.append(numel) + self._numels = tuple(numels_without_padding) + self._numels_with_padding = tuple(numels) + assert len(self._numels) == self._num_params + + self._shared_param_infos = tuple(shared_param_infos) + self._modules = {pi.module for pi in self._param_infos}.union( + {spi.module for spi in self._shared_param_infos} + ) + assert (params is None) == (shared_params is None) + if params is not None: + assert shared_params is not None and len(shared_params) == len( + shared_param_infos + ) + self._params = [] + for param, is_padding in zip(params, is_padding_mask): + if not is_padding: + self._params.append(param) + self._shared_params = shared_params + # Mark the original parameters to avoid flattening them into + # another `FlatParameter` during recursive construction + for param in chain(self._params, self._shared_params): + _set_fsdp_flattened(param) + self._is_grad_none_mask = [False for _ in range(self._num_params)] + self._tensors = [None for _ in range(self._num_params)] + else: + self._params = None + self._shared_params = None + self._is_grad_none_mask = None + self._tensors = None + self._unpadded_unsharded_size = self.size() + _set_fsdp_flattened(self) + # Tracks whether the `FlatParameter`'s post-backward hook has been + # called to modify the behavior of the post-backward callback + self._post_backward_called = False + + +class FlatParamHandle: + """ + A handle that manages a flat parameter (:class:`FlatParameter`). + + This includes sharding and view management. + + Args: + params (Sequence[nn.Parameter]): The parameters to flatten into the + flat parameter. + fully_sharded_module (nn.Module): See [Note: Fully Sharded Module]. + device (torch.device): The compute and communication device, which + should be a non-CPU device. We refer to it as the compute device. + sharding_strategy (ShardingStrategy): Sharding strategy to apply to + this handle's ``FlatParameter``. + offload_params (bool): Whether to offload the handle's + ``FlatParameter`` to CPU. + mp_param_dtype (Optional[torch.dtype]): Parameter mixed precision + setting passed to the FSDP constructor. + mp_reduce_dtype (Optional[torch.dtype]): Gradient reduction mixed + precision setting passed to the FSDP constructor. + keep_low_precision_grads (bool): Whether to keep gradients in low + precision. + use_orig_params (bool): If ``True``, then FSDP preserves the original + parameter variables and returns them from ``named_parameters()`` + (e.g. to support different optimizer hyperparameters within one + :class:`FlatParameter`). If ``False``, then FSDP reconstructs the + parameters every iteration and returns the :class:`FlatParameter` s + from ``named_parameters()``. + """ + + ################## + # INITIALIZATION # + ################## + def __init__( + self, + params: Sequence[Union[nn.Parameter, Tensor]], + fully_sharded_module: nn.Module, + device: torch.device, + sharding_strategy: HandleShardingStrategy, + offload_params: bool, + mp_param_dtype: Optional[torch.dtype], + mp_reduce_dtype: Optional[torch.dtype], + keep_low_precision_grads: bool, + process_group: dist.ProcessGroup, + use_orig_params: bool, + *, + fsdp_extension: Optional[FSDPExtensions] = None, + ): + super().__init__() + params = list(params) + if len(params) == 0: + raise ValueError( + f"Cannot construct a {self.__class__.__name__} with an empty parameter list" + ) + self._init_setattr_fns() + self._skip_writeback_check = ( + os.environ.get(_FSDP_SKIP_WRITEBACK_CHECK, "") == "1" + ) + self._use_full_prec_in_eval = ( + os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1" + ) + self._use_fake_all_gather = os.environ.get(_FSDP_USE_FAKE_ALL_GATHER, "") == "1" + self._use_fake_reduce = os.environ.get(_FSDP_USE_FAKE_REDUCE, "") == "1" + if self._skip_writeback_check: + _warn_skip_writeback_check( + log, + f"Since {_FSDP_SKIP_WRITEBACK_CHECK}=1, FSDP will not check " + "for parameter or gradient writeback. Changing parameter or " + "gradient storages may lead to silent correctness errors.", + ) + if self._use_fake_all_gather: + _warn_use_fake_all_gather( + log, + f"Since {_FSDP_USE_FAKE_ALL_GATHER}=1, FSDP will not execute " + "all-gather ops. Your training will be incorrect, but " + "can reveal how much time spent on all-gather ops.", + ) + if self._use_fake_reduce: + _warn_use_fake_reduce( + log, + f"Since {_FSDP_USE_FAKE_REDUCE}=1, FSDP will not execute " + "reduce-scatter ops. Your training will be incorrect, but " + "can reveal how much time spent on reduce-scatter ops.", + ) + # Only align addresses for `use_orig_params=True` (for now) + align_addresses = use_orig_params + self._init_get_unflat_views_fn(align_addresses) + self.device = device + self._device_handle = _FSDPDeviceHandle.from_device(self.device) + self.process_group = process_group + if self._use_fake_all_gather or self._use_fake_reduce: + self._fake_process_group = FakeProcessGroup( + rank=process_group.rank(), world_size=process_group.size() + ) + self.rank = process_group.rank() + self.world_size = process_group.size() + self._sharding_strategy = sharding_strategy + self._offload_params = offload_params + self._use_orig_params = use_orig_params + self._keep_low_precision_grads = keep_low_precision_grads + self._training_state = HandleTrainingState.IDLE + self._debug_level = dist.get_debug_level() + self._fully_sharded_module = fully_sharded_module + # For strategies that do not free after forward, we skip using sharded + # views after forward since the unsharded data exists. We still switch + # `self.flat_param` to point to the sharded flat parameter since what + # it points to parameterizes behavior. We use the following attribute + # to track which tensor data the parameters are unsharded views into. + self._unsharded_flat_param_for_skipped_views: Optional[Tensor] = None + # The index in the state's `all_handles`, which must be the + # same across ranks for the execution order validation to work + self._handle_index: Optional[int] = None + # Index in handles_to_pre_forward_order + self._pre_forward_order_index: Optional[int] = None + # Index in `handles_post_forward_order` + self._post_forward_index: Optional[int] = None + # Used for guarding against mistargeted forward prefetches + self._needs_pre_forward_unshard = False + # Used for guarding against mistargeted backward prefetches + self._needs_pre_backward_unshard = False + # Was the handle prefetched? Set on successful _prefetch_handle and unshard + self._prefetched = False + # Optimistically assume a valid input `params` and set dtype attributes + # before `_init_flat_param()`, which performs the actual validation + self._orig_param_dtype = params[0].dtype + self._init_param_reduce_dtypes(mp_param_dtype, mp_reduce_dtype) + assert self._fwd_bwd_param_dtype is not None # mypy + self._aligned_numel = ( + _get_aligned_numel(unsharded_dtype=self._fwd_bwd_param_dtype) + if align_addresses + else 0 + ) + self._fsdp_extension = fsdp_extension + self._init_flat_param_and_metadata( + params, fully_sharded_module, self._aligned_numel, use_orig_params # type: ignore[arg-type] + ) + self._use_unsharded_views(as_params=False) + + def _init_setattr_fns(self): + use_unsafe_setattr = os.environ.get(_FSDP_USE_UNSAFE_SETATTR, "") == "1" + self._setattr_tensor: Callable[[nn.Module, str, Tensor], None] + self._setattr_param: Callable[[nn.Module, str, nn.Parameter], None] + if use_unsafe_setattr: + self._setattr_tensor = _unsafe_setattr_tensor + self._setattr_param = _unsafe_setattr_param + else: + self._setattr_tensor = _safe_setattr_tensor_or_param + self._setattr_param = _safe_setattr_tensor_or_param + + def _init_get_unflat_views_fn(self, align_addresses: bool): + self._get_unflat_views = ( + self._get_unflat_views_aligned + if align_addresses + else self._get_unflat_views_unaligned + ) + + def _init_flat_param_and_metadata( + self, + params: List[Union[Tensor, nn.Parameter]], + module: nn.Module, + aligned_numel: int, + use_orig_params: bool, + ) -> None: + """ + Initialize the ``FlatParameter`` and its metadata. + + NOTE: This should only be called once at construction time, after which + the ``FlatParameter`` metadata is assumed to be static. + + NOTE: The elements of ``params`` should only be ``Tensor`` s when + composing with ``DTensor`` -based tensor parallelism, in which case the + elements may be ``DTensor`` local shards. + """ + if len(params) == 0: + raise ValueError("Expects non-empty `params`") + if aligned_numel < 0: + raise ValueError( + f"Expects non-negative `aligned_numel` but got {aligned_numel}" + ) + ( + dtype, + flat_param_requires_grad, + device, + ) = self._validate_tensors_to_flatten(params) + params_set = set(params) + # For alignment padding, only `numels` gets strictly non-`None` + # elements, and all other lists get `None` elements for padding. + param_infos: List[ParamInfo] = [] + numels: List[int] = [] + shapes: List[torch.Size] = [] + fqns: List[str] = [] + shared_param_infos: List[SharedParamInfo] = [] + shared_param_memo: Dict[ + Union[Tensor, nn.Parameter], Tuple[nn.Module, str, str] + ] = {} + params_to_flatten: List[Union[Tensor, nn.Parameter]] = [] + shared_params: List[Union[Tensor, nn.Parameter]] = [] + param_extensions: List[Any] = [] + is_padding_mask: List[bool] = [] + total_numel = total_numel_without_padding = 0 + for submodule_name, submodule in module.named_modules(remove_duplicate=False): + for param_name, param in _named_parameters_with_duplicates( + submodule, recurse=False + ): + if param not in params_set: + continue + if param in shared_param_memo: # shared reference + prim_module, prim_module_name, prim_param_name = shared_param_memo[ + param + ] + shared_params.append(param) + shared_param_infos.append( + SharedParamInfo( + param_name, + submodule, + submodule_name, + prim_param_name, + prim_module, + prim_module_name, + ) + ) + else: + if aligned_numel > 0: + numel_to_pad = aligned_numel - (total_numel % aligned_numel) + if numel_to_pad > 0 and numel_to_pad < aligned_numel: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + params_to_flatten.append(padding_tensor) + is_padding_mask.append(True) + numels.append(numel_to_pad) + total_numel += numel_to_pad + transform_t, extension = _ext_pre_flatten_transform( + param, + self._fsdp_extension, + ) + param = cast(nn.Parameter, transform_t) + param_extensions.append(extension) + shared_param_memo[param] = (submodule, submodule_name, param_name) + params_to_flatten.append(param) + is_padding_mask.append(False) + param_infos.append(ParamInfo(param_name, submodule, submodule_name)) + numels.append(param.numel()) + shapes.append(param.shape) + fqn = ( + submodule_name + "." + param_name + if submodule_name + else param_name + ) + fqns.append(fqn) + total_numel += param.numel() + total_numel_without_padding += param.numel() + if len(params_to_flatten) == 0: + raise ValueError( + f"`params` were not found in `module`'s tree" + f"params: {params}\nmodule: {module}" + ) + if ( + self.rank == 0 + and aligned_numel > 0 + and total_numel != total_numel_without_padding + ): + log.info( + "FSDP FlatParameter address alignment created " + "%s numel of padding (%s vs. %s)", + total_numel - total_numel_without_padding, + total_numel, + total_numel_without_padding, + ) + if aligned_numel > 0: + # Pad to be divisible by world size to avoid a copy for the + # post-backward reduce-scatter + numel_to_pad = self.world_size - (total_numel % self.world_size) + if numel_to_pad > 0 and numel_to_pad < self.world_size: + if self.rank == 0: + log.info( + "FSDP FlatParameter world size divisibility created " + "%s numel of padding", + numel_to_pad, + ) + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + params_to_flatten.append(padding_tensor) + is_padding_mask.append(True) + numels.append(numel_to_pad) + total_numel += numel_to_pad + # Pass `aligned_numel=0` since we already included padding tensors + self.flat_param: FlatParameter = self.flatten_tensors_into_flat_param( + params_to_flatten, + aligned_numel=0, + requires_grad=flat_param_requires_grad, + ) + FlatParameter._init_metadata( + self.flat_param, + param_infos, + numels, + shapes, + fqns, + shared_param_infos, + param_extensions, + _convert_to_params(params_to_flatten) if use_orig_params else None, + _convert_to_params(shared_params) if use_orig_params else None, + is_padding_mask, + ) + + def _validate_tensors_to_flatten( + self, tensors: List[Union[Tensor, nn.Parameter]] + ) -> Tuple: + """Validate the tensors to flatten and returns any necessary metadata.""" + dtype: Optional[torch.dtype] = None + # Return as the logical OR over each tensor's value + flat_param_requires_grad: Optional[bool] = None + device: Optional[torch.device] = None + # For `use_orig_params=True`, permit non-uniform `requires_grad` + for tensor in tensors: + if isinstance(tensor, FlatParameter): + raise ValueError("Cannot flatten a `FlatParameter`") + if dtype is None and not tensor.is_floating_point(): + raise ValueError("Cannot flatten integer dtype tensors") + if dtype is not None and tensor.dtype != dtype: + raise ValueError( + f"Must flatten tensors with uniform dtype but got {dtype} " + f"and {tensor.dtype}" + ) + if ( + not self._use_orig_params + and flat_param_requires_grad is not None + and tensor.requires_grad != flat_param_requires_grad + ): + raise ValueError( + "Must flatten tensors with uniform `requires_grad` when " + "`use_orig_params=False`" + ) + if device is not None and tensor.device != device: + raise ValueError( + "Must flatten tensors on the same device but got both " + f"{device} and {tensor.device}" + ) + dtype = tensor.dtype + flat_param_requires_grad = flat_param_requires_grad or tensor.requires_grad + device = tensor.device + assert flat_param_requires_grad is not None, "Requires non-empty `tensors` list" + return dtype, flat_param_requires_grad, device + + def flatten_tensors( + self, + tensors: List[Tensor], + aligned_numel: int, + ) -> Tensor: + """ + Flatten ``tensors`` into a single flat tensor. + + The flattening optionally includes + padding if ``aligned_numel`` is greater than 0, where ``aligned_numel`` + gives the numel required to have address alignment. + + NOTE: The padding alignment algorithm must be kept in sync with + :meth:`_init_flat_param_metadata`. We separate the two methods because + the initialization happens once, whereas this method may be called + multiple times throughout training (e.g. for checkpointing). + """ + if len(tensors) == 0: + raise ValueError("Expects non-empty `tensors`") + if aligned_numel < 0: + raise ValueError( + f"Expects non-negative `aligned_numel` but got {aligned_numel}" + ) + dtype, _, device = self._validate_tensors_to_flatten(tensors) + flat_tensors: List[Tensor] = [] + if aligned_numel > 0: + total_numel = 0 + for tensor in tensors: + numel_to_pad = aligned_numel - (total_numel % aligned_numel) + if numel_to_pad > 0 and numel_to_pad < aligned_numel: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + flat_tensors.append(padding_tensor) + total_numel += numel_to_pad + flat_tensors.append(torch.flatten(_detach_if_needed(tensor))) + total_numel += tensor.numel() + numel_to_pad = self.world_size - (total_numel % self.world_size) + if numel_to_pad > 0 and numel_to_pad < self.world_size: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + flat_tensors.append(padding_tensor) + total_numel += numel_to_pad + else: + flat_tensors = [ + torch.flatten(_detach_if_needed(tensor)) for tensor in tensors + ] + return torch.cat(flat_tensors, dim=0) + + def flatten_tensors_into_flat_param( + self, + tensors: List[Tensor], + aligned_numel: int, + requires_grad: bool, + ) -> FlatParameter: + flat_param_data = self.flatten_tensors(tensors, aligned_numel) + return FlatParameter(flat_param_data, requires_grad=requires_grad) + + def _init_param_reduce_dtypes( + self, + mp_param_dtype: Optional[torch.dtype], + mp_reduce_dtype: Optional[torch.dtype], + ) -> None: + """ + Initialize param and reduce dtypes. + + Precondition: ``self.flat_param`` is set. This ensures that this + handle's parameters have a single dtype. + + Postcondition: This sets ``self._fwd_bwd_param_dtype`` and + ``self._reduce_dtype``. If ``mp_param_dtype`` or ``mp_reduce_dtype`` + is ``None``, then we assume the original parameter dtype. One special + case is if ``mp_param_dtype`` is not ``None`` and ``mp_reduce_dtype`` + is ``None``, in which case we assume the gradient reduction dtype + matches the forward/backward parameter dtype. + """ + # Save whether these dtypes were specified so that we permit the + # parameter dtype to change up until the lazy initialization + self._low_prec_param_dtype_specified = mp_param_dtype is not None + self._low_prec_reduce_dtype_specified = mp_reduce_dtype is not None + if ( + self._low_prec_param_dtype_specified + and not self._low_prec_reduce_dtype_specified + ): + # Special case: infer gradient reduction mixed precision + self._fwd_bwd_param_dtype = mp_param_dtype + self._reduce_dtype = self._fwd_bwd_param_dtype + else: + self._fwd_bwd_param_dtype = mp_param_dtype or self._orig_param_dtype + self._reduce_dtype = mp_reduce_dtype or self._orig_param_dtype + assert self._fwd_bwd_param_dtype is not None + assert self._reduce_dtype is not None + + ################################### + # SHARD INITIALIZATION & METADATA # + ################################### + @torch.no_grad() + def shard(self): + """ + Shard the handle's ``FlatParameter``. + + This allocates new memory for + the sharded flat parameter and frees the unsharded flat parameter's + storage. + + Postcondition: ``self.flat_param`` is the sharded flat parameter. Shard + metadata attributes are set for all sharding strategies. + """ + flat_param = self.flat_param + if not self.uses_sharded_strategy: + self._init_shard_metadata(0, 0, flat_param.numel() - 1) + else: + _p_assert( + flat_param.storage_offset() == 0, + "The `FlatParameter` is not the sole occupant of its storage", + ) + sharded_flat_param, numel_padded = FlatParamHandle._get_shard( + flat_param, self.rank, self.world_size + ) + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + allocated = flat_param._typed_storage()._size() > 0 + if allocated: + flat_param._typed_storage()._resize_(0) + flat_param.set_(sharded_flat_param) # type: ignore[call-overload] + start_idx = sharded_flat_param.numel() * self.rank + end_idx = sharded_flat_param.numel() * (self.rank + 1) - 1 # inclusive + self._init_shard_metadata(numel_padded, start_idx, end_idx) + if self._use_orig_params: + self._use_sharded_views() + + def _init_shard_metadata( + self, + numel_padded: int, + unsharded_start_idx: int, + unsharded_end_idx: int, + ) -> None: + """ + Initialize shard-related metadata for this rank's shard of the flat parameter. + + This includes ``_sharded_size``, ``_shard_param_infos``, and ``_shard_numel_padded``. + + Args: + numel_padded (int): Numel padded for this rank's sharded flat + parameter. + unsharded_start_idx (int): Start index in the unsharded flat + parameter assigned to this rank. + unsharded_end_idx (int): End index (inclusive) in the unsharded + flat parameter assigned to this rank. + + Precondition: ``self.flat_param`` 's data is the sharded flat + parameter. + """ + flat_param = self.flat_param + flat_param._sharded_size = flat_param.size() # type: ignore[attr-defined] + sharded_flat_param_numel = flat_param.numel() # includes `numel_padded` + _p_assert( + unsharded_start_idx >= 0 and unsharded_start_idx <= unsharded_end_idx, + f"unsharded_start_idx: {unsharded_start_idx} unsharded_end_idx: {unsharded_end_idx}", + ) + _p_assert( + numel_padded <= sharded_flat_param_numel, + f"numel_padded: {numel_padded} " + f"sharded_flat_param_numel: {sharded_flat_param_numel}", + ) + shard_param_infos = self._get_shard_metadata( + unsharded_start_idx, unsharded_end_idx + ) + assert ( + len(shard_param_infos) == flat_param._num_params + ), f"Expects length {flat_param._num_params} but got {len(shard_param_infos)}" + flat_param._shard_param_infos = shard_param_infos # type: ignore[attr-defined] + flat_param._shard_numel_padded = numel_padded # type: ignore[attr-defined] + + def _get_shard_metadata( + self, + unsharded_start_idx: int, + unsharded_end_idx: int, + ) -> Tuple[_ShardParamInfo, ...]: + """ + Compute the shard metadata based on ``unsharded_start_idx`` and ``unsharded_end_idx`` (inclusive). + + ``unsharded_start_idx`` and ``unsharded_end_idx`` give the interval of the + unsharded flat parameter specifying the shard. + """ + flat_param_offsets = self._get_flat_param_offsets() + assert len(flat_param_offsets) == len( + self.flat_param._numels_with_padding + ), f"Expected {len(self.flat_param._numels_with_padding)} but got {len(flat_param_offsets)}" + shard_param_infos: List[_ShardParamInfo] = [] + sharded_flat_param_numel = unsharded_end_idx - unsharded_start_idx + 1 + # `unsharded_param_start_idx` and `unsharded_param_end_idx` are indices + # into the unsharded flat parameter (inclusive) of the given parameter + for i, ( + (unsharded_param_start_idx, unsharded_param_end_idx), + is_padding, + ) in enumerate(zip(flat_param_offsets, self.flat_param._is_padding_mask)): + if is_padding: + continue + in_sharded_flat_param = ( + unsharded_start_idx <= unsharded_param_end_idx + and unsharded_end_idx >= unsharded_param_start_idx + ) + if not in_sharded_flat_param: + shard_param_info = _ShardParamInfo(False, None, None, None, None) + else: + if unsharded_start_idx <= unsharded_param_start_idx: + # This branch can only happen once since the rank's + # unsharded start index can only intersect one parameter + intra_param_start_idx = 0 + offset_in_shard = unsharded_param_start_idx - unsharded_start_idx + else: + intra_param_start_idx = ( + unsharded_start_idx - unsharded_param_start_idx + ) + offset_in_shard = 0 + assert ( + offset_in_shard >= 0 and offset_in_shard < sharded_flat_param_numel + ), ( + f"Invalid `offset_in_shard` of {offset_in_shard} for " + f"sharded flat parameter with {sharded_flat_param_numel} numel" + ) + intra_param_end_idx = ( + min(unsharded_param_end_idx, unsharded_end_idx) + - unsharded_param_start_idx + ) + numel_in_shard = intra_param_end_idx - intra_param_start_idx + 1 + shard_param_info = _ShardParamInfo( + True, + offset_in_shard, + numel_in_shard, + intra_param_start_idx, + intra_param_end_idx, + ) + shard_param_infos.append(shard_param_info) + return tuple(shard_param_infos) + + @staticmethod + def _get_unpadded_shard( + tensor: Tensor, + rank: int, + world_size: int, + ) -> Tuple[Tensor, int]: + """ + Return the unpadded shard of ``tensor`` for the given ``rank`` and ``world_size``. + + The returned value is a tuple of the shard of ``tensor`` without any + padding and the numel to pad for that shard. + + If ``tensor`` is already flattened or may be viewed in the flattened + shape (which is true in the expected usage), then this method does not + allocate any new tensor memory. + """ + chunks = torch.flatten(tensor).chunk(world_size) + if len(chunks) < (rank + 1): + # This rank gets an empty chunk fully padded with zeros since there + # are not enough chunks across ranks + chunk = chunks[0].new_empty(0) + else: + chunk = chunks[rank] + numel_to_pad = chunks[0].numel() - chunk.numel() + assert ( + numel_to_pad >= 0 + ), "Chunk's size should be at most the first chunk's size" + return chunk, numel_to_pad + + @staticmethod + def _get_shard( + tensor: Tensor, + rank: int, + world_size: int, + ) -> Tuple[Tensor, int]: + """ + Return the shard of ``tensor`` with padding for the given ``rank`` and ``world_size`` and the numel padded for that shard. + + This method allocates new memory (via :meth:`clone`) since the + unsharded ``tensor`` may be deallocated after this method returns. + """ + chunk, numel_to_pad = FlatParamHandle._get_unpadded_shard( + tensor, rank, world_size + ) + shard = chunk.clone() + if numel_to_pad > 0: + shard = F.pad(shard, [0, numel_to_pad]) + return shard, numel_to_pad + + @staticmethod + def _get_sharded_size(tensor: Tensor, rank: int, world_size: int) -> torch.Size: + """ + Return the shape of ``tensor`` after sharding including padding. + + This requires ``tensor`` to have 1D shape and ensures that the returned + shape is 1D. + """ + assert len(tensor.shape) == 1, f"{tensor.shape}" + unpadded_sharded_tensor, numel_to_pad = FlatParamHandle._get_unpadded_shard( + tensor, rank, world_size + ) + unpadded_sharded_size = unpadded_sharded_tensor.size() + assert len(unpadded_sharded_size) == 1, f"{unpadded_sharded_size}" + return torch.Size([unpadded_sharded_size[0] + numel_to_pad]) + + def _get_flat_param_offsets(self) -> List[Tuple[int, int]]: + """ + Return [start, end] offsets of each original parameter's flattened data in the unsharded flat parameter (without padding). + + NOTE: The returned list includes elements for alignment padding. + """ + cumulative_sum = list(accumulate(self.flat_param._numels_with_padding)) + starts = [0] + cumulative_sum[:-1] + ends = [end - 1 for end in cumulative_sum] # inclusive + param_offsets = list(zip(starts, ends)) + return param_offsets + + @no_type_check + def shard_metadata( + self, + ) -> FlatParamShardMetadata: + """ + Return the shard-related metadata specific to this rank's shard of the flat parameter. + + NOTE: The returned tuple does not include elements for alignment + padding but does account for the padding. + """ + fqns_list = [] + shapes_list = [] + numels_list = [] + shard_param_offsets = [] + for fqn, shape, numel, shard_param_info in zip( + self.flat_param._fqns, + self.flat_param._shapes, + self.flat_param._numels, + self.flat_param._shard_param_infos, + ): + if not shard_param_info.in_shard: + continue + fqns_list.append(fqn) + shapes_list.append(shape) + numels_list.append(numel) + shard_param_offsets.append( + ( + shard_param_info.intra_param_start_idx, + shard_param_info.intra_param_end_idx, + ) + ) + return FlatParamShardMetadata( + tuple(fqns_list), + tuple(shapes_list), + tuple(numels_list), + shard_param_offsets, + ) + + @no_type_check + @torch.no_grad() + def init_flat_param_attributes(self) -> None: + """ + This initializes some attributes on the handle's ``FlatParameter``. + This should be called during lazy initialization since it requires the + parameter to be on the compute device if not offloading to CPU and we + want to give users the chance to move the parameter appropriately after + the FSDP constructor. + + For each tensor attribute on the ``FlatParameter``, see the unshard and + reshard methods in this class for the allocation and free pattern. + """ + flat_param = self.flat_param + if flat_param.dtype != self._orig_param_dtype: + # Entering this branch means that the user changed the parameter + # dtype after FSDP initialization, in which case we may need to + # refresh some saved dtype attributes (dtypes specified as a part + # of mixed precision take precedence). + if not self._low_prec_param_dtype_specified: + self._fwd_bwd_param_dtype = flat_param.dtype + # For `reduce_dtype`, require `param_dtype` was not specified since + # then we infer the `reduce_dtype` from the specified `param_dtype` + if ( + not self._low_prec_reduce_dtype_specified + and not self._low_prec_param_dtype_specified + ): + self._reduce_dtype = flat_param.dtype + self._orig_param_dtype = flat_param.dtype + cpu_device = torch.device("cpu") + if self._offload_params: + _p_assert( + flat_param.device == cpu_device, + f"Expects the `FlatParameter` to be on CPU when parameter CPU " + f"offloading is enabled, not {flat_param.device}", + ) + else: + self._check_on_compute_device(self.flat_param) + flat_param._local_shard = flat_param.data + if self._offload_params: + # Pin the memory for faster H2D transfer + flat_param._local_shard = flat_param._local_shard.pin_memory() + # Pre-allocate the sharded gradient on CPU to enable non-blocking + # D2H transfer during the backward pass + flat_param._cpu_grad = torch.zeros_like( + flat_param._local_shard, device=cpu_device + ).pin_memory() + if self._uses_param_mixed_precision: + # For parameter mixed precision, we maintain a low precision + # sharded tensor on the compute device to be all-gathered (for + # sharded strategies) or directly used (for `NO_SHARD`) for + # computation. + flat_param._mp_shard = torch.empty_like( + flat_param._local_shard, + device=self.device, + dtype=self._fwd_bwd_param_dtype, + ) + _free_storage(flat_param._mp_shard) + if self.uses_sharded_strategy: + # We maintain a padded unsharded tensor that serves as the + # all-gather destination and owns the original parameter storages. + unsharded_param_dtype = ( + self._fwd_bwd_param_dtype + if self._uses_param_mixed_precision + else flat_param.dtype + ) # use low precision if parameter mixed precision is enabled + padded_unsharded_numel = flat_param.numel() * self.world_size + flat_param._full_param_padded = torch.empty( + padded_unsharded_numel, + device=self.device, + dtype=unsharded_param_dtype, + ) + flat_param._padded_unsharded_size = flat_param._full_param_padded.size() + _free_storage(flat_param._full_param_padded) + + if self._uses_param_mixed_precision: + # For parameter mixed precision, we maintain a full precision + # padded unsharded tensor for when we force full precision. + flat_param._full_prec_full_param_padded = torch.empty( + padded_unsharded_numel, + device=self.device, + dtype=flat_param.dtype, # full precision + ) + _free_storage(flat_param._full_prec_full_param_padded) + + ################### + # UNSHARD/RESHARD # + ################### + def pre_unshard(self) -> bool: + """ + Return ``False`` if this is a no-op and ``True`` otherwise. + + Postcondition: ``self.flat_param`` 's data is on the device for + communication and is what should be all-gathered. This means that it + matches the dtype of the expected unsharded parameter. + """ + if ( + self._training_state == HandleTrainingState.SUMMON_FULL_PARAMS + and self._skipped_use_sharded_views + ): + # Since this path imposes special semantics for the unsharded flat + # parameter (e.g. forcing full precision), use sharded views to + # reuse the existing logic for that special handling + self._use_sharded_views() + ret = False + if self._use_orig_params and not self._skip_writeback_check: + ret = self._writeback_orig_params() + if ( + self.uses_sharded_strategy + and not self._offload_params + and not self.needs_unshard() + ): + pass # no-op + elif self._uses_param_mixed_precision and not self._force_full_precision: + self._use_low_precision_shard() + ret = True + elif self._offload_params and self.flat_param.device != self.device: + # NOTE: This creates a new tensor distinct from any attributes. + self.flat_param_to(self.device, non_blocking=True) + ret = True + self._check_on_compute_device(self.flat_param) + return ret + + def _use_low_precision_shard(self): + """Allocate on the compute device and switch to using the low precision sharded flat parameter.""" + self._check_low_precision_shard() + flat_param = self.flat_param + _alloc_storage( + flat_param._mp_shard, flat_param._local_shard.size() # type: ignore[attr-defined] + ) + # `copy_()` implicitly casts to the low precision + flat_param._mp_shard.copy_( # type: ignore[attr-defined] + flat_param._local_shard.to( # type: ignore[attr-defined] + self.device, non_blocking=True + ) + ) + # Invariant: `_mp_shard` is always on the compute device. + flat_param.data = flat_param._mp_shard # type: ignore[attr-defined] + + def unshard(self): + """ + Run the unshard logic. + + This includes all-gathering the flat parameter + and switching to using the unsharded flat parameter. If the handle does + not need unsharding, then this only switches to using the unsharded + flat parameter. For ``NO_SHARD``, this is a no-op. + + If FSDP is in :meth:`summon_full_params` and the handle uses parameter + mixed precision, then the parameter is forced to full precision. + """ + if not self.needs_unshard(): + # Even when not needing an unshard, we should switch to using + # the unsharded flat parameter + unsharded_flat_param = ( + self._get_padded_unsharded_flat_param() + if self.uses_sharded_strategy + else self.flat_param + ) + self._use_unsharded_flat_param(unsharded_flat_param) + return + unsharded_flat_param = self._alloc_padded_unsharded_flat_param() + padded_unsharded_flat_param = self._all_gather_flat_param(unsharded_flat_param) + self._use_unsharded_flat_param(padded_unsharded_flat_param) + + def needs_unshard(self) -> bool: + """Return if the handle's flat parameter needs to be unsharded.""" + if not self.uses_sharded_strategy: + return False + unsharded_flat_param = self._get_padded_unsharded_flat_param() + already_unsharded = _same_storage_size( + unsharded_flat_param, unsharded_flat_param.numel() + ) + return not already_unsharded + + def _alloc_padded_unsharded_flat_param(self): + """ + Allocate the *padded* unsharded flat parameter. + + The unpadded unsharded + flat parameter is always a view into the padded one. This padded + parameter is saved to a different attribute on the ``FlatParameter`` + depending on if we force full precision. + """ + self._check_sharded_strategy() + flat_param = self.flat_param + unsharded_flat_param = self._get_padded_unsharded_flat_param() + self._check_storage_freed(unsharded_flat_param) + _alloc_storage(unsharded_flat_param, flat_param._padded_unsharded_size) # type: ignore[attr-defined] + return unsharded_flat_param + + def _get_padded_unsharded_flat_param(self) -> torch.Tensor: + """ + Return a reference to the padded unsharded flat parameter depending on the calling context. + + This should only be called if using a sharded strategy. + """ + self._check_sharded_strategy() + flat_param = self.flat_param + if self._force_full_precision and self._uses_param_mixed_precision: + # When parameter mixed precision is enabled, we use a different + # tensor as the all-gather destination to preserve the invariant + # that `_full_param_padded` is in the low precision + unsharded_flat_param = flat_param._full_prec_full_param_padded # type: ignore[attr-defined] + _p_assert( + unsharded_flat_param.dtype != self._fwd_bwd_param_dtype, + f"Expects full precision but got {self._fwd_bwd_param_dtype}", + ) + # For no-reshard-after-forward strategies, `_full_param_padded` may + # still be allocated from a previous forward. As we are forcing + # full precision here, the full-precision unsharded copy may be + # modified, invalidating the existing low-precision unsharded copy, + # so we should free it here to ensure a new all-gather for the next + # forward/backward computation to persist the modifications. + if flat_param._full_param_padded.untyped_storage().size() > 0: + _free_storage(flat_param._full_param_padded) + else: + unsharded_flat_param = flat_param._full_param_padded # type: ignore[attr-defined] + return unsharded_flat_param + + def _all_gather_flat_param( + self, + padded_unsharded_flat_param: Tensor, + ) -> Tensor: + """ + All-gather the handle's flat parameter to the destination ``padded_unsharded_flat_param``. + + Then switch to use the all-gathered tensor. + """ + _p_assert( + hasattr(self, "process_group") and hasattr(self, "world_size"), + "Expects a process group and world size to have been set via `shard()`", + ) + sharded_flat_param = self.flat_param.data + expected_numel = sharded_flat_param.numel() * self.world_size + _p_assert( + padded_unsharded_flat_param.numel() == expected_numel, + f"Expects {expected_numel} numel but got {padded_unsharded_flat_param.numel()}", + ) + + pg = ( + self._fake_process_group + if self._use_fake_all_gather + else self.process_group + ) + + # HACK this should be handled by C10D + if sharded_flat_param.is_cpu: # type: ignore[attr-defined] + tensor_list = list( + torch.chunk(padded_unsharded_flat_param, dist.get_world_size(pg)) + ) + work = dist.all_gather(tensor_list, sharded_flat_param, group=pg) + else: + dist.all_gather_into_tensor( + padded_unsharded_flat_param, + sharded_flat_param, + pg, + ) + + if self._offload_params: + # In case of offloading, `flat_param.data` (i.e. sharded param) is + # created on the pre-unshard stream. We need to hand it over to the + # unshard stream for all-gather + _no_dispatch_record_stream( + sharded_flat_param, + self._device_handle.current_stream(), # unshard_stream + ) + return padded_unsharded_flat_param + + def _use_unsharded_flat_param( + self, + padded_unsharded_flat_param: torch.Tensor, + ) -> None: + """ + Switch to use the *unpadded* unsharded flat parameter. + + This is a view into the *padded* unsharded flat parameter. + """ + unsharded_size = self.flat_param._unpadded_unsharded_size + flat_param_part = padded_unsharded_flat_param[: unsharded_size.numel()] + # slicing [:] is not visible to autograd because of .data + self.flat_param.data = flat_param_part + in_forward = self._training_state == HandleTrainingState.FORWARD + in_pre_backward = self._training_state == HandleTrainingState.BACKWARD_PRE + if self._use_orig_params: + if self._skipped_use_sharded_views and in_pre_backward: + # This call corresponds to the complementary pre-backward + # `_use_unsharded_views()` to the skipped pre-forward + # `_use_sharded_views()`, so we should skip this one too. + return + # We use `Tensor` views in the forward so that they are tracked by + # autograd. We use them in the pre-backward as well to support + # reentrant activation checkpointing, which needs the views to be + # tracked by autograd in the backward pass's recomputed forward. + self._use_unsharded_views( + as_params=(not in_forward and not in_pre_backward) + ) + elif in_forward: + self._use_unsharded_views(as_params=False) + + def post_unshard(self): + """ + Run the post-unshard logic. + + This includes freeing the low precision shard if needed. + """ + if self._uses_param_mixed_precision and self.uses_sharded_strategy: + self._free_low_precision_sharded_param() + self._check_on_compute_device(self.flat_param) + + def _free_low_precision_sharded_param(self): + """Frees the low precision sharded flat parameter.""" + self._check_low_precision_shard() + # `_mp_shard` is allocated in the pre-unshard stream, consumed in the + # unshard stream for sharded strategies, and consumed in both the + # unshard and default streams for `NO_SHARD`. For sharded strategies, + # the current stream here is the unshard stream, and for `NO_SHARD`, + # it is the default stream. For `NO_SHARD`, only recording for the + # default stream suffices since the default stream waits for the + # unshard stream. + _no_dispatch_record_stream( + self.flat_param._mp_shard, self._device_handle.current_stream() # type: ignore[attr-defined] + ) + _free_storage(self.flat_param._mp_shard) # type: ignore[attr-defined] + + @torch.no_grad() + def unshard_grad(self): + """ + Unshard the handle's ``FlatParameter``'s gradient. + + If all ranks have + ``None`` gradient, then all original parameters will as well. This + method performs an all-reduce and an all-gather. The additional + all-reduce is tolerable since this method is not meant to be used on + the computation critical path. + + Postcondition: ``_saved_grad_shard`` is defined and contains the value + to set ``flat_param.grad`` after gradients are resharded. + """ + if not self.uses_sharded_strategy: + self._use_unsharded_grad_views() + return + flat_param = self.flat_param + self._check_unsharded(flat_param) + + # Check if all ranks have a `None` gradient + num_grad_none = torch.zeros(1, dtype=torch.int32, device=self.device) + num_grad_none[0] = flat_param.grad is None + dist.all_reduce(num_grad_none, group=self.process_group) + if num_grad_none[0] == self.world_size: + flat_param._saved_grad_shard = None # type: ignore[assignment] + self._use_unsharded_grad_views() + return + + if flat_param.grad is None: + # In the case that only some ranks have `None` gradient, we use + # zeros to approximate as a best effort attempt + if self._debug_level == dist.DebugLevel.INFO: + warnings.warn( + f"[Rank {self.rank}] Only some but not all ranks have a " + "`None` `FlatParameter` gradient, so FSDP is using zeros to " + "approximate those ranks' sharded gradients being `None`" + ) + flat_param._saved_grad_shard = None # type: ignore[assignment] + sharded_grad = torch.zeros(flat_param._sharded_size, device=self.device) # type: ignore[attr-defined] + else: + self._check_sharded(flat_param.grad) + flat_param._saved_grad_shard = flat_param.grad # type: ignore[attr-defined] + sharded_grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + padded_unsharded_grad = torch.empty( + flat_param._padded_unsharded_size, # type: ignore[attr-defined] + device=self.device, + dtype=sharded_grad.dtype, + ) + dist.all_gather_into_tensor( + padded_unsharded_grad, sharded_grad, self.process_group + ) + unsharded_size = self.flat_param._unpadded_unsharded_size + flat_param.grad = padded_unsharded_grad[: unsharded_size.numel()].view( + unsharded_size + ) + self._use_unsharded_grad_views() + + def reshard_grad(self): + if self._use_orig_params: + self._use_sharded_grad_views() + if not self.uses_sharded_strategy: + return + self.flat_param.grad = self.flat_param._saved_grad_shard # type: ignore[attr-defined] + delattr(self.flat_param, "_saved_grad_shard") + + def prepare_gradient_for_backward(self): + """ + Prepare the gradient for the backward computation. + + This is done by saving and clearing any existing sharded gradient + in ``.grad`` to enable computing a new unsharded gradient. + """ + _p_assert( + self._training_state + in (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.IDLE), + "Expects to be in `BACKWARD_PRE` or `IDLE` (if prefetching)", + ) + flat_param = self.flat_param + if flat_param.grad is not None and ( + flat_param.grad.size() != flat_param._unpadded_unsharded_size + or flat_param.grad.device != flat_param.device # grad on CPU + ): + self._check_on_compute_device(self.flat_param) + grad_offloaded = flat_param.grad.device != self.device + _p_assert( + not grad_offloaded or self._offload_params, + f"Expects the sharded gradient to be on {self.device} " + f"but got {flat_param.grad.device}", + ) + prev_iter_synced_gradients = ( + flat_param.grad.size() + == flat_param._local_shard.size() # type: ignore[attr-defined] + ) + if prev_iter_synced_gradients: + # TODO (awgu): Gradient accumulation outside `no_sync()` + # does not work with CPU offloading. The issue should be + # that, in the post-backward hook, we cannot do an addition + # between a CPU tensor (the existing sharded gradient) and + # a GPU tensor (the new sharded gradient). + if not grad_offloaded: + flat_param._saved_grad_shard = flat_param.grad.data # type: ignore[attr-defined] + sharded_grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + else: + _p_assert( + hasattr(flat_param, "_cpu_grad"), + "`_cpu_grad` should be defined if the gradient is on CPU", + ) + sharded_grad = flat_param._cpu_grad # type: ignore[attr-defined] + # If user specified to keep the gradient in low precision, then + # the gradient may still be of the low precision dtype if the + # user did not set the gradient to `None` after the previous + # backward, in which case FSDP should cast back to the full + # precision dtype so that FSDP can accumulate in that dtype in + # the post-backward hook and assign to `.grad` in that dtype in + # the post-backward callback. + local_shard_dtype = flat_param._local_shard.dtype # type: ignore[attr-defined] + if ( + self._keep_low_precision_grads + and sharded_grad.dtype != local_shard_dtype + ): + sharded_grad.data = sharded_grad.to(local_shard_dtype) + else: + padded_unsharded_size = flat_param._padded_unsharded_size # type: ignore[attr-defined] + _p_assert( + flat_param.grad.size() == padded_unsharded_size, + "Expects `.grad` to be the unsharded gradient in " + f"`no_sync()` with size {padded_unsharded_size} " + f"but got size {flat_param.grad.size()}", + ) + flat_param.grad = None + + def prepare_gradient_for_optim(self): + """Prepare the gradient for optimizer computation by moving the sharded gradient to the ``.grad`` attribute.""" + + def cast_grad_to_param_dtype_if_needed(flat_param): + # TODO (rohan-varma): test for full precision with keep_low_precision_grads + if not self._force_full_precision and self._keep_low_precision_grads: + _p_assert(flat_param.grad is not None, "Unexpected None grad!") + if flat_param.grad.dtype != self._fwd_bwd_param_dtype: + flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype) + if self._use_orig_params: + self._use_sharded_grad_views() + + flat_param = self.flat_param + # TODO (awgu): We should replace these conditional checks to encode + # the logical intention more directly. + if hasattr(flat_param, "_cpu_grad"): + # NOTE: This branch includes `NO_SHARD`. + self._check_sharded(flat_param) + self._check_on_cpu(flat_param) + flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined] + cast_grad_to_param_dtype_if_needed(flat_param) + elif hasattr(flat_param, "_saved_grad_shard"): + self._check_sharded(flat_param) + self._check_on_compute_device(flat_param) + if flat_param._saved_grad_shard is not None: + self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined] + # If no sharded gradient was computed this iteration, then there is + # no need to forward `_saved_grad_shard` to `grad` + if flat_param._post_backward_called: # type: ignore[attr-defined] + flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + if flat_param.grad is not None: + cast_grad_to_param_dtype_if_needed(flat_param) + else: + _p_assert( + not self.uses_sharded_strategy + or not flat_param._post_backward_called, # type: ignore[attr-defined] + "All sharded parameters that received a gradient in the " + "post-backward should use `_saved_grad_shard`", + ) + # Delete `_saved_grad_shard` since its existence indicates a previous + # gradient to accumulate with in the post-backward hook + if hasattr(flat_param, "_saved_grad_shard"): + delattr(flat_param, "_saved_grad_shard") + + @contextlib.contextmanager + def to_cpu(self): + """ + Move the unpadded unsharded flat parameter to CPU while in the context and moves it back to the previous device upon exit. + + For now, this assumes the ``FlatParameter`` is the unpadded unsharded flat parameter + since (1) there is no reason to include the padding in the copy and (2) + there is no use case for the sharded flat parameter. + + Precondition: ``self.flat_param`` 's data is the unpadded unsharded + flat parameter on the compute device, and the handle uses a sharded + strategy. + Postcondition: Same as the precondition. + """ + self._check_sharded_strategy() + _p_assert( + self.flat_param.size() == self.flat_param._unpadded_unsharded_size, + f"Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}", + ) + self._check_on_compute_device(self.flat_param) + # Check that the unpadded unsharded flat parameter is a view into the + # padded unsharded flat parameter as expected + # NOTE: This check is not strictly needed for correctness but is a + # useful sanity check since the tensor should only be used internally. + _p_assert( + _same_storage(self.flat_param, self._get_padded_unsharded_flat_param()), + "Expects the unpadded parameter to be a view into the padded parameter", + ) + self.flat_param_to(torch.device("cpu")) + self._free_unsharded_flat_param() + try: + yield + finally: + _p_assert( + self.flat_param.size() == self.flat_param._unpadded_unsharded_size, + f"Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}", + ) + padded_unsharded_flat_param = self._alloc_padded_unsharded_flat_param() + # Copy from CPU to the compute device + padded_unsharded_flat_param[: self.flat_param.numel()].copy_( + self.flat_param + ) + self._use_unsharded_flat_param(padded_unsharded_flat_param) + + def reshard(self, free_unsharded_flat_param: bool): + """ + Run the reshard logic. + + This includes freeing the unsharded flat + parameter if ``free_unsharded_flat_param`` and switching to using the + sharded flat parameter. Note that this also implicitly offloads + the sharded flat parameter (if CPU offload is enabled) by pointing + it to the ``_local_shard`` attribute which resides on CPU. + """ + # Switch to the sharded `FlatParameter` before freeing to prevent + # "use-after-free"-type bugs with external profiling tools, where for + # `use_orig_params=True`, the `param` does not point to valid memory + # when setting `param.data = ...` in `_use_sharded_views()`. + self._use_sharded_flat_param() + if free_unsharded_flat_param: + self._free_unsharded_flat_param() + + def post_reshard(self): + """ + Run the post-reshard logic. + + This includes freeing any memory that + can now be freed given that the ``FlatParameter`` points to the full + precision sharded flat parameter. + + Precondition: ``self.flat_param`` 's data points to the full precision + sharded flat parameter. + """ + # For `NO_SHARD`, `_mp_shard` is not freed in the post-unshard since it + # is also the low precision *unsharded* flat parameter. Hence, we delay + # the free until the reshard. + if ( + self._uses_param_mixed_precision + and not self.uses_sharded_strategy + and not self._force_full_precision # did not use the low precision shard + ): + self._free_low_precision_sharded_param() + + def _free_unsharded_flat_param(self): + """ + Free the padded unsharded flat parameter. We allow this + function to be called even when storage is not allocated + + The tensor to free depends + on the calling context since the unshard may have forced full + precision, in which case a different tensor is used. + """ + self._check_sharded_strategy() + unsharded_flat_param = self._get_padded_unsharded_flat_param() + self._check_on_compute_device(unsharded_flat_param) + # Do not free the memory until all ops in the current stream finish + _no_dispatch_record_stream( + unsharded_flat_param, self._device_handle.current_stream() + ) + _free_storage(unsharded_flat_param) + + def _use_sharded_flat_param(self) -> None: + """Switches to using the sharded flat parameter.""" + flat_param = self.flat_param + if self._use_orig_params: + in_forward = self._training_state == HandleTrainingState.FORWARD + skip_use_sharded_views = ( + torch.is_grad_enabled() + and in_forward + and self._sharding_strategy + in NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES + ) + # Only incur the extra `.data` call if needed + if skip_use_sharded_views: + unsharded_flat_param = flat_param.data + if self._offload_params: + device = flat_param._local_shard.device # type: ignore[attr-defined] + _p_assert( + device == torch.device("cpu"), + f"Expects the local shard to be on CPU but got {device}", + ) + flat_param.data = flat_param._local_shard # type: ignore[attr-defined] + if self._use_orig_params: + if skip_use_sharded_views: # type: ignore[possibly-undefined] + self._unsharded_flat_param_for_skipped_views = unsharded_flat_param # type: ignore[possibly-undefined] + else: + self._use_sharded_views() + # For the post-forward reshard, we may try to use sharded gradient + # views (or unsharded gradient views if a gradient was accumulated + # in `no_sync()`), but for the post-backward reshard, we delay the + # call to after the reduce-scatter. + if ( + in_forward # type: ignore[possibly-undefined] + # Skip using gradient views if skipped using sharded views + # since exposing unsharded parameters with sharded gradients + # may be confusing to the user + and not self._skipped_use_sharded_views + ): + # TODO: Change `_unpadded_unsharded_size` if we change the + # gradient to be computed directly with padding. + accumulated_grad_in_no_sync = ( + flat_param.grad is not None + and self.uses_sharded_strategy + and flat_param.grad.shape == flat_param._unpadded_unsharded_size + ) + if accumulated_grad_in_no_sync: + self._use_unsharded_grad_views() + else: + self._use_sharded_grad_views() + + ######### + # VIEWS # + ######### + @no_type_check + def _get_unflat_views_unaligned( + self, + tensor: Optional[torch.Tensor] = None, + ) -> Iterator[Tensor]: + """ + Return unflattened ``Tensor`` views into ``tensor``. + + If `tensor`` is ``None``, ``flat_param`` is used. The unflattening is based + on ``flat_param`` 's metadata. + + Examples for ``tensor`` include ``flat_param.grad`` or unsharded + tensor optimizer state. + """ + flat_param = self.flat_param + if tensor is None: + tensor = flat_param + views = ( + _ext_post_unflatten_transform( + subtensor.view(shape), + param_extension, + self._fsdp_extension, + ) + for (subtensor, shape, param_extension) in zip( + torch.split(tensor, flat_param._numels, dim=0), + flat_param._shapes, + flat_param._param_extensions, + ) + ) + return views + + @no_type_check + def _get_unflat_views_aligned( + self, + tensor: Optional[Tensor] = None, + ) -> List[Tensor]: + """ + Return unflattened ``Tensor`` views into ``tensor`` with handling for padding. + + This method has the same contract as :meth:`_get_unflat_views_unaligned` + except it checks for ``None`` placeholders representing padding for + alignment, which may incur slightly more CPU overhead. + """ + flat_param = self.flat_param + if tensor is None: + tensor = flat_param + splits: List[Tensor] = torch.split( + tensor, flat_param._numels_with_padding, dim=0 + ) + idx = 0 + views: List[Tensor] = [] + for split, is_padding in zip(splits, flat_param._is_padding_mask): + if is_padding: + continue + views.append( + _ext_post_unflatten_transform( + split.view(flat_param._shapes[idx]), + flat_param._param_extensions[idx], + self._fsdp_extension, + ) + ) + idx += 1 + return views + + @no_type_check + @torch.enable_grad() + def _use_unsharded_views(self, as_params: bool) -> None: + """ + Unflatten the unsharded flat parameter by setting the original parameter variables to be views into it. + + Args: + as_params (bool): If ``True``, then registers the original + parameters as ``nn.Parameter`` s; if ``False``, then registers + the original parameters only as ``Tensor`` s. ``False`` should + be used during forward/backward computation and when hiding the + original parameters from :meth:`nn.Module.named_parameters`. + + Note: + when prefetching for next forward, current forward may be + annotated with `@torch.no_grad()` + `@torch.enable_grad()` ensures non-empty `view.grad_fn` + otherwise `_post_backward_hook` will not get called + """ + flat_param = self.flat_param + self._check_unsharded(flat_param) + views = self._get_unflat_views() + from torch.distributed._tensor import DTensor + + for i, (view, (param_name, module, _)) in enumerate( + zip(views, flat_param._param_infos) + ): + if self._use_orig_params and as_params: + if type(view) is DTensor: + # A `DTensor` `view` is not compatible with assigning + # `param.data = view`, so we cannot preserve the parameter + # variable. + self._setattr_param( + module, + param_name, + nn.Parameter(view, requires_grad=flat_param.requires_grad), + ) + continue + param = self.flat_param._params[i] + self._setattr_param(module, param_name, param) + param.data = view + elif as_params: + self._setattr_param( + module, + param_name, + nn.Parameter(view, requires_grad=flat_param.requires_grad), + ) + else: # `as_params=False` + param_var: Tensor = view + if self._use_orig_params: + if self._training_state == HandleTrainingState.FORWARD: + # Save the `Tensor` for the pre-backward + self.flat_param._tensors[i] = view # save for pre-backward + elif self._training_state == HandleTrainingState.BACKWARD_PRE: + # Use the saved `Tensor` variable from the forward to + # preserve the autograd graph so that the post-backward + # hook fires (e.g. for reentrant AC) + tensor = self.flat_param._tensors[i] + tensor.data = view + param_var = tensor + self._setattr_tensor(module, param_name, param_var) + if ( + self._use_orig_params + and self._training_state == HandleTrainingState.FORWARD + ): + module._parameters[param_name] = param_var + for i, ( + param_name, + module, + _, + prim_param_name, + prim_module, + _, + ) in enumerate(self.flat_param._shared_param_infos): + prim_param: Union[Tensor, nn.Parameter] = getattr( + prim_module, prim_param_name + ) + _p_assert( + not as_params or isinstance(prim_param, nn.Parameter), + f"as_params={as_params} type(prim_param)={type(prim_param)}", + ) + if self._use_orig_params and as_params: + shared_param = self.flat_param._shared_params[i] + self._setattr_param(module, param_name, shared_param) + shared_param.data = prim_param + elif as_params: + self._setattr_param(module, param_name, prim_param) + else: + self._setattr_tensor(module, param_name, prim_param) + if ( + self._use_orig_params + and self._training_state == HandleTrainingState.FORWARD + ): + module._parameters[param_name] = prim_param + + @no_type_check + def _use_unsharded_grad_views(self) -> None: + """ + Unflatten the unsharded flat parameter's gradient. + + The original parameter variables' gradients are set to be views into + the unsharded flat parameter's gradient. + """ + # Expects the gradient to be in `flat_param.grad` + if self.flat_param.grad is None: + for param in chain(self.flat_param._params, self.flat_param._shared_params): + param.grad = None + return + self._check_unsharded(self.flat_param.grad) + views = self._get_unflat_views(self.flat_param.grad) + for i, (view, (param_name, module, _)) in enumerate( + zip(views, self.flat_param._param_infos) + ): + _p_assert( + hasattr(module, param_name), + f"{self.flat_param._fqns[i]} is missing", + ) + param = getattr(module, param_name) + if ( + param.shape != view.shape + or param.dtype != view.dtype + or param.device != view.device + ): + # NOTE: This is a hack using `.data` to side step the check + # that parameter/gradient sizes/dtypes/devices match. From + # calling `reshard()`, `param` has the sharded size, has the + # full precision dtype, and if CPU offloading is enabled, is on + # CPU. Thus, one or more of the following cases can hold when + # in `no_sync()`, where `view` is the original parameter's + # gradient: + # 1. `view` can have the unsharded size. + # 2. `view` can have the parameter low precision dtype. + # 3. `view` can be on GPU. + if param.grad is None: + param.grad = torch.empty_like(param) + param.grad.data = view + else: + param.grad = view + for i, ( + param_name, + module, + module_name, + prim_param_name, + prim_module, + _, + ) in enumerate(self.flat_param._shared_param_infos): + _p_assert( + hasattr(module, param_name), + f"{module_name + '.' + param_name if module_name else param_name} is missing", + ) # did not save FQN info in `_shared_param_infos` + param = getattr(module, param_name) + prim_param = getattr(prim_module, prim_param_name) + if ( + param.shape != prim_param.grad.shape + or param.dtype != prim_param.grad.dtype + or param.device != prim_param.grad.device + ): + # NOTE: This is the same hack to use `.data` to side step the + # size check. + if param.grad is None: + param.grad = torch.empty_like(param) + param.grad.data = prim_param.grad + else: + param.grad = prim_param.grad + + @contextlib.contextmanager + def unflatten_as_params(self) -> Generator: + """ + Unflatten the original parameters. + + The function assumes that the flat parameter is unsharded. When in the context, + unflattens the original parameters as ``nn.Parameter`` views into the + flat parameter, and after the context, restores the original parameters + as ``Tensor`` views into the flat parameter. + """ + self._use_unsharded_views(as_params=True) + try: + yield + finally: + self._use_unsharded_views(as_params=False) + + @no_type_check + @torch.no_grad() + def _use_sharded_views(self) -> None: + """ + Set the original parameter variables' data to be flattened views into the sharded flat parameter. + + The views are kept as flattened to simplify the case where a parameter + is sharded across ranks. Parameters whose data is not present in the + sharded flat parameter have their data set to a size-0 empty tensor. We + do not delete them to ensure to preserve expected behaviors like model + printability. Parameters whose data is present must preserve their + variables to be passable to an optimizer. + """ + self._unsharded_flat_param_for_skipped_views = None + if not self.uses_sharded_strategy: + # For `NO_SHARD`, use the *unflattened* unsharded views since we + # have the unsharded parameter + self._use_unsharded_views(as_params=True) + return + flat_param = self.flat_param + self._check_sharded(flat_param) + # Construct once and reuse for all parameters not in the local shard + size_0_empty_tensor = torch.empty( + 0, + dtype=self.flat_param.dtype, # in case `flat_param` changed dtype + device=self.flat_param.device, + requires_grad=False, + ) + for param, shard_param_info, (param_name, module, _) in zip( + flat_param._params, flat_param._shard_param_infos, flat_param._param_infos + ): + self._setattr_param(module, param_name, param) + if not shard_param_info.in_shard: + # Allow the original data to be freed via garbage collection + param.data = size_0_empty_tensor + else: + offset = shard_param_info.offset_in_shard + numel_in_shard = shard_param_info.numel_in_shard + param.data = flat_param[offset : offset + numel_in_shard] + assert self.flat_param._shared_params is not None + for i, ( + param, + (param_name, module, _, prim_param_name, prim_module, _), + ) in enumerate( + zip(self.flat_param._shared_params, self.flat_param._shared_param_infos) + ): + self._setattr_param(module, param_name, param) + prim_param = getattr(prim_module, prim_param_name) + param.data = prim_param # could be both empty and non-empty + if self._training_state == HandleTrainingState.BACKWARD_POST: + # Clear the saved `Tensor`s since they are unneeded now + for i in range(len(self.flat_param._tensors)): + self.flat_param._tensors[i] = None + + @no_type_check + @torch.no_grad() + def _use_sharded_grad_views(self) -> None: + """ + Set the original parameter variables' gradients to be flattened views into the sharded flat parameter's gradient. + + This is a no-op if there is no gradient. + + Parameters whose data is not present in the sharded flat parameter and + parameters with ``requires_grad=False`` have their gradients set to + ``None``. Since the gradient variables do not need to be preserved, + this method does not manipulate existing ``Tensor`` data directly and + creates new ``Tensor`` variables instead. + """ + flat_param = self.flat_param + self._check_sharded(flat_param) + grad = self.sharded_grad + if grad is None: + for param in chain(flat_param._params, flat_param._shared_params): + param.grad = None + return + self._check_sharded(grad) + for param, shard_param_info, is_grad_none in zip( + flat_param._params, + flat_param._shard_param_infos, + flat_param._is_grad_none_mask, + ): + if not shard_param_info.in_shard: + param.grad = None + else: + numel_in_shard = shard_param_info.numel_in_shard + if param.requires_grad and not is_grad_none: + offset = shard_param_info.offset_in_shard + if self._keep_low_precision_grads or param.dtype != grad.dtype: + # NOTE: This is a hack using `.data` to side step the + # check that parameter/gradient dtypes match. Here, + # `param` has full precision; `grad` has low precision. + if param.grad is None: + # `.grad` must have the same shape as `param` + param.grad = torch.empty_like(param) + param.grad.data = grad[ + offset : offset + numel_in_shard + ].reshape(param.shape) + else: + param.grad = grad[offset : offset + numel_in_shard].reshape( + param.shape + ) + else: + param.grad = None + assert flat_param._shared_params is not None + for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate( + zip(flat_param._shared_params, flat_param._shared_param_infos) + ): + in_sharded_flat_param = hasattr(prim_module, prim_param_name) + if in_sharded_flat_param and param.requires_grad: + prim_param = getattr(prim_module, prim_param_name) + param.grad = prim_param.grad # share the same reference + else: + param.grad = None + + @no_type_check + @torch.no_grad() + def _writeback_orig_params(self) -> bool: + """ + Write back any parameters that changed storage to the handle's ``FlatParameter``. + + Iterates over the original parameters and writes back any parameters + that changed storages (due to a non-inplace operator) to the handle's + ``FlatParameter``. This method preserves the ``FlatParameter` 's + device even if an original parameter's device changes. + + Raises: + RuntimeError: If an original parameter or gradient changes storages + but no longer has the expected flattened shape. + Returns: ``True`` if some writeback happened, and ``False`` otherwise. + """ + if ( + self.uses_sharded_strategy + and not self.is_sharded(self.flat_param) + and not self._skipped_use_sharded_views + ): + # For `NO_SHARD`, we may still need to writeback + return False + flat_param = self.flat_param + wroteback = False + if self._skipped_use_sharded_views and self.uses_sharded_strategy: + # NOTE: We must use the unsharded flat parameter from which the + # unsharded views were computed, not the one from the current + # calling context (`_get_padded_unsharded_flat_param()`) since that + # may be different (e.g. the model changed from train to eval). + flat_param_tensor = self._unsharded_flat_param_for_skipped_views + _p_assert( + _data_ptr_allocated(flat_param_tensor), + "If skipped using sharded views, the unsharded flat parameter " + "should be allocated", + ) + else: + flat_param_tensor = flat_param + # NOTE: Since this method is called in the pre-unshard, which is only + # called during computation in the pre-forward or pre-backward, the + # sharded gradient should be guaranteed to be in `.grad`, not in + # `._saved_grad_shard`. + flat_param_grad = ( + flat_param.grad + if self.uses_sharded_strategy or not self._offload_params + else flat_param._cpu_grad + ) + for i, ( + param, + (in_shard, offset_in_shard, numel_in_shard, _, _), + (param_name, module, _), + ) in enumerate( + zip( + flat_param._params, + flat_param._shard_param_infos, + flat_param._param_infos, + ) + ): + if not in_shard: + continue + if not hasattr(module, param_name): + # Do not writeback if original parameters are deregistered + # (e.g. during model checkpointing) + continue + + # Check for parameter writeback + if self._skipped_use_sharded_views: + param = flat_param._tensors[i] + _p_assert( + param is not None, + f"Expects to have saved tensor for {flat_param._fqns[i]}", + ) + param_changed = getattr(module, param_name) is not param + needs_param_writeback = ( + param_changed # changed parameter variable itself + or not _same_storage(param, flat_param_tensor) + ) + if self._skipped_use_sharded_views and ( + param_changed or needs_param_writeback + ): + raise AssertionError( + "FSDP does not support changing the parameters between " + f"forward and backward for {self._sharding_strategy}" + ) + if param_changed: + # NOTE: The gradient is not preserved after a parameter change. + param = getattr(module, param_name) + flat_param._params[i] = param + if needs_param_writeback: + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + param, flat_param, i, expected_shape, offset_in_shard, True + ) + wroteback = True + + # Check for gradient writeback + if self._skipped_use_sharded_views: + # Skip the writeback check because we do not expose gradients + # when we skipped using sharded views + continue + if param.grad is None and flat_param.grad is not None: + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + None, flat_param.grad, i, expected_shape, offset_in_shard, False + ) + elif param.grad is not None: + # For `NO_SHARD` + CPU offloading, `_cpu_grad` is always in + # memory and owns the gradient storage, so it will never + # require gradient writeback. + if not self.uses_sharded_strategy and self._offload_params: + # Explicitly continue to handle the case of `no_sync()`, + # where `param.grad` is a view into the GPU gradient + # referenced by `flat_param.grad`, while `flat_param_grad` + # is `flat_param._cpu_grad`, which is on CPU + continue + + needs_grad_writeback = flat_param_grad is None or not _same_storage( + param.grad, flat_param_grad + ) + if needs_grad_writeback: + if flat_param_grad is None: + flat_param_grad = torch.zeros_like(flat_param) + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + param.grad, + flat_param_grad, + i, + expected_shape, + offset_in_shard, + False, + ) + flat_param.grad = flat_param_grad + flat_param_grad = flat_param.grad + + # TODO: If we want to handle shared parameters, we need to re-generate + # the shared parameter data structures in case sharedness changed. + for i, ( + param_name, + module, + _, + prim_param_name, + prim_module, + _, + ) in enumerate(flat_param._shared_param_infos): + if getattr(module, param_name) is not getattr(prim_module, prim_param_name): + raise NotImplementedError( + "Changing shared parameters is not supported yet" + ) + return wroteback + + def _writeback_tensor( + self, + src_tensor: Optional[Tensor], + dst_tensor: Tensor, + tensor_index: int, + expected_shape: torch.Size, + offset: int, + is_param: bool, # else gradient + ) -> None: + """ + Write back ``src_tensor`` to ``dst_tensor`` at offset ``offset``, where ``src_tensor`` should have shape ``expected_shape``. + + ``is_param`` indicates if the tensor is the parameter (if ``True``) or gradient (if + ``False``). If ``src_tensor`` is ``None``, then the effect is zeroing + instead of copying. ``tensor_index`` gives the index of ``src_tensor`` + in the metadata structures. + + Raises: + RuntimeError: If the ``src_tensor`` does not have the expected + shape. + """ + _p_assert( + len(expected_shape) == 1, + f"Expects a 1D expected shape but got {expected_shape}", + ) + if self._debug_level == dist.DebugLevel.INFO: + rank = self.rank if hasattr(self, "rank") else dist.get_rank() + src_shape = src_tensor.shape if src_tensor is not None else None + src_device = src_tensor.device if src_tensor is not None else None + warnings.warn( + f"[Rank {rank}] {'Parameter' if is_param else 'Gradient'} needs " + f"writeback in {self._training_state}\n" + f"expected shape={expected_shape} shape={src_shape} " + f"expected device={dst_tensor.device} device={src_device}" + ) + if src_tensor is not None and src_tensor.shape != expected_shape: + # NOTE: Gradient shape mismatch is not possible in practice since + # the gradient shape is enforced to match that of the parameter and + # we already check for parameter shape mismatch. + raise RuntimeError( + f"Cannot writeback when the {'parameter' if is_param else 'gradient'} " + f"shape changes\nExpects {expected_shape} but got {src_tensor.shape}" + ) + if src_tensor is not None: + dst_tensor[offset : offset + expected_shape.numel()].copy_(src_tensor) + else: + dst_tensor[offset : offset + expected_shape.numel()].zero_() + assert self.flat_param._is_grad_none_mask is not None + self.flat_param._is_grad_none_mask[tensor_index] = True + + def _reset_flat_param_grad_info_if_needed(self): + """ + Reset ``flat_param.grad`` if needed. + + When ``use_orig_params=True``: + (1) sets the underlying ``flat_param.grad`` to ``None`` if *all* of the + original parameters' ``.grad`` are ``None``, and + (2) sets ``flat_param.requires_grad=False`` if *none* of the original + parameters require gradient. + For (1), this is targeting ``optim.zero_grad(set_to_none=True)``, in + which case we want to free the gradients as soon after the + ``zero_grad()`` call as possible. + """ + if not self._use_orig_params: + return + flat_param = self.flat_param + assert flat_param._params is not None # mypy + all_grad_none = True + requires_grad = False + for param in flat_param._params: + all_grad_none &= param.grad is None + requires_grad |= param.requires_grad + if all_grad_none: + flat_param.grad = None + # As long as one parameter requires gradient, then the flat parameter + # must require gradient + flat_param.requires_grad = requires_grad + + def _deregister_orig_params(self): + for param_info in self.flat_param._param_infos: + param_name, module, _ = param_info + if hasattr(module, param_name): + delattr(module, param_name) + for param_name, module, _, _, _, _ in self.flat_param._shared_param_infos: + if hasattr(module, param_name): + delattr(module, param_name) + + ########### + # HELPERS # + ########### + def flat_param_to(self, *args, **kwargs): + """Wrap an in-place call to ``.to()`` for ``self.flat_param``.""" + self.flat_param.data = self.flat_param.to(*args, **kwargs) + if self._use_orig_params: + # Refresh the views because their storage may have changed + if self.is_sharded(self.flat_param): + self._use_sharded_views() + else: + self._use_unsharded_views(as_params=True) + + def _get_modules(self) -> Set[nn.Module]: + """Return a :class:`set` of the modules whose parameters are included in this handle's flat parameter.""" + return {pi.module for pi in self.flat_param._param_infos}.union( + {spi.module for spi in self.flat_param._shared_param_infos} + ) + + def is_sharded(self, tensor: Tensor) -> bool: + """ + Return whether ``tensor`` is *currently* sharded. + + For ``NO_SHARD``, we choose to have this always return ``False`` for clarity. + """ + if ( + not hasattr(self.flat_param, "_sharded_size") + or not self.uses_sharded_strategy + ): + # `_sharded_size` is defined iff `handle.shard()` has been called + return False + sharded_size = self.flat_param._sharded_size # type: ignore[attr-defined] + return tensor.size() == sharded_size + + def param_module_names(self) -> Iterator[Tuple[str, str]]: + shared_param_infos = [ + ParamInfo(param_name, module, module_name) + for ( + param_name, + module, + module_name, + _, + _, + _, + ) in self.flat_param._shared_param_infos + ] + for param_info in chain(self.flat_param._param_infos, shared_param_infos): + param_name, _, module_name = param_info # type: ignore[misc] + yield (param_name, module_name) + + def shared_param_module_names(self) -> Iterator[Tuple[str, str]]: + for param_name, _, module_name in [ + ParamInfo(param_name, module, module_name) + for ( + param_name, + module, + module_name, + _, + _, + _, + ) in self.flat_param._shared_param_infos + ]: + yield (param_name, module_name) + + @property + def _fqns_in_shard(self) -> List[str]: + """Return the FQNs of the parameters present in this rank's shard.""" + fqns_in_shard: List[str] = [] + for fqn, shard_param_info in zip( + self.flat_param._fqns, self.flat_param._shard_param_infos # type: ignore[attr-defined] + ): + if shard_param_info.in_shard: + fqns_in_shard.append(fqn) + return fqns_in_shard + + @property + def sharded_grad(self) -> Optional[Tensor]: + """Return the handle's sharded gradient.""" + flat_param = self.flat_param + # Priority for non-`None`: `_cpu_grad` > `_saved_grad_shard` > `grad` + # - CPU offloading: `_cpu_grad` + # - No CPU offloading + sharded strategies: `_saved_grad_shard` + # - No CPU offloading + `NO_SHARD`: `grad` + grad: Optional[Tensor] + if hasattr(flat_param, "_cpu_grad"): + grad = flat_param._cpu_grad # type: ignore[attr-defined] + elif hasattr(flat_param, "_saved_grad_shard"): + # In the post-backward hook, the sharded gradient is still in + # `_saved_grad_shard`. + grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + else: + # If in IDLE or in FORWARD states, then there may be an + # (accumulated) gradient. If accessed in IDLE, then this should + # be due to re-registering the original parameters (e.g. in state + # dict load). + _p_assert( + flat_param.grad is None + or not self.uses_sharded_strategy + or self._training_state + in (HandleTrainingState.FORWARD, HandleTrainingState.IDLE), + "Sharded strategies should use `_cpu_grad` or `_saved_grad_shard` " + "unless in IDLE or FORWARD", + ) + grad = flat_param.grad + return grad + + def _reset_is_grad_none(self) -> None: + """ + Reset ``_is_grad_none_mask`` as needed. + + This method should only be + called in the post-backward after gradient computation, in which case + if a parameter requires gradient, then it will surely receive a + gradient and we may reset its mask entry to ``False``. + """ + if not self._use_orig_params: + return + _p_assert( + self._training_state == HandleTrainingState.BACKWARD_POST, + "Expects to only be called in the post-backward after gradient computation", + ) + flat_param = self.flat_param + assert flat_param._params is not None # mypy + for i, param in enumerate(flat_param._params): # type: ignore[arg-type] + # As long as the parameter requires gradient, it should receive a + # meaningful gradient (even if the gradient happens to be zeros) + if param.requires_grad: + assert flat_param._is_grad_none_mask is not None # mypy + flat_param._is_grad_none_mask[i] = False + + ####################### + # CHECKS & INVARIANTS # + ####################### + def _check_sharded_strategy(self): + _p_assert(self.uses_sharded_strategy, "Expects sharded strategy") + + def _check_on_compute_device(self, tensor: Tensor): + _p_assert( + tensor.device == self.device, + f"Expects tensor to be on the compute device {self.device}, was on {tensor.device}", + ) + + def _check_on_cpu(self, tensor: Tensor): + _p_assert( + tensor.device == torch.device("cpu"), + f"Expects tensor to be on CPU but got {tensor.device}", + ) + + @staticmethod + def _check_storage_freed(tensor: Tensor): + # Compile does not resize during trace + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + _p_assert( + _same_storage_size(tensor, 0), + "Expects storage to be freed but got storage with size > 0", + ) + + @staticmethod + def _check_storage_allocated(tensor: Tensor): + _p_assert(_storage_size_allocated(tensor), "Expects storage to be allocated") + + def _check_low_precision_shard(self): + _p_assert( + self._uses_param_mixed_precision, + "Not using low precision for parameters", + ) + _p_assert( + getattr(self.flat_param, "_mp_shard", None) is not None, + "Expects `_mp_shard` to exist", + ) + device = self.flat_param._mp_shard.device # type: ignore[attr-defined] + _p_assert( + device == self.device, + f"Expects the low precision shard to be on {self.device} but got {device}", + ) + + def _check_unsharded(self, tensor: Tensor): + msg_prefix = "Expects tensor to be unsharded " + _p_assert(tensor is not None, msg_prefix + "but got `None`") + unsharded_size = self.flat_param._unpadded_unsharded_size + _p_assert( + tensor.size() == unsharded_size, + msg_prefix + f"with size {unsharded_size} but got {tensor.size()}", + ) + + def _check_sharded(self, tensor: Tensor): + msg_prefix = "Expects tensor to be sharded " + _p_assert(tensor is not None, msg_prefix + "but got `None`") + sharded_size = self.flat_param._sharded_size # type: ignore[attr-defined] + _p_assert( + tensor.size() == sharded_size, + msg_prefix + f"with size {sharded_size} but got {tensor.size()}", + ) + + ############## + # PROPERTIES # + ############## + @property + def uses_sharded_strategy(self) -> bool: + return self._sharding_strategy != HandleShardingStrategy.NO_SHARD + + @property + def _uses_param_mixed_precision(self) -> bool: + return self._fwd_bwd_param_dtype != self._orig_param_dtype + + @property + def _uses_reduce_mixed_precision(self) -> bool: + return self._reduce_dtype != self._orig_param_dtype + + @property + def _force_full_precision(self) -> bool: + return ( + self._uses_param_mixed_precision or self._uses_reduce_mixed_precision + ) and ( + self._training_state == HandleTrainingState.SUMMON_FULL_PARAMS + or + # Also disable mixed precision in model eval mode, if configured + (not self._fully_sharded_module.training and self._use_full_prec_in_eval) + ) + + @property + def _skipped_use_sharded_views(self) -> bool: + """ + This property is used for sharding strategies that do not free after forward with ``use_orig_params=True``. + + This returns if this handle is + currently in a state where it has skipped using sharded views, in which + case it can restore view invariants via ``_use_sharded_views()``. + """ + return self._unsharded_flat_param_for_skipped_views is not None + + +# NOTE: These are hacks to bypass `nn.Module.__setattr__` checks. +def _unsafe_setattr_param( + module: nn.Module, param_name: str, param: nn.Parameter +) -> None: + module._parameters[param_name] = param + # This bypasses any overrides in case `module` is an instance of an + # `nn.Module` subclass + super(nn.Module, module).__setattr__(param_name, param) + + +def _unsafe_setattr_tensor(module: nn.Module, param_name: str, tensor: Tensor) -> None: + module._parameters.pop(param_name, None) + # This bypasses any overrides in case `module` is an instance of an + # `nn.Module` subclass + super(nn.Module, module).__setattr__(param_name, tensor) + + +def _safe_setattr_tensor_or_param( + module: nn.Module, param_name: str, tensor_or_param: Union[Tensor, nn.Parameter] +): + # Call `delattr()` and `setattr()` to go through `nn.Module` checks + if hasattr(module, param_name): + delattr(module, param_name) + setattr(module, param_name, tensor_or_param) + + +def _convert_to_params( + tensors: List[Union[torch.Tensor, nn.Parameter]] +) -> List[nn.Parameter]: + return [t if isinstance(t, nn.Parameter) else nn.Parameter(t) for t in tensors] + + +def _detach_if_needed(param_or_tensor: Union[nn.Parameter, Tensor]) -> Tensor: + return ( + param_or_tensor.detach() + if isinstance(param_or_tensor, nn.Parameter) + else param_or_tensor + ) + + +def _get_aligned_numel(unsharded_dtype: torch.dtype): + # NOTE: This alignment constraint comes from TorchInductor. + ALIGNMENT = 16 # bytes + unsharded_dtype_size = _get_dtype_size(unsharded_dtype) + aligned_numel = ALIGNMENT // unsharded_dtype_size + return aligned_numel + + +@functools.lru_cache(8) +def _get_dtype_size(dtype): + return torch.empty((), dtype=dtype).element_size() + + +def _construct_padding_tensor( + padding_numel: int, dtype: torch.dtype, requires_grad: bool, device: torch.device +): + # NOTE: Set the padding value as a magic number for debuggability. The + # value itself should never be used in any user-facing computation. + return ( + torch.ones( + (padding_numel,), dtype=dtype, requires_grad=requires_grad, device=device + ) + * _FLAT_PARAM_PADDING_VALUE + ) + + +# Use `lru_cache(1)` to only log the warning once (assuming the fixed warning +# messasge is passed in) +@functools.lru_cache(1) +def _warn_skip_writeback_check(log: logging.Logger, warning: str): + log.warning(warning) + + +# Use `lru_cache(1)` to only log the warning once +@functools.lru_cache(1) +def _warn_use_fake_all_gather(log: logging.Logger, warning: str): + log.warning(warning) + + +# Use `lru_cache(1)` to only log the warning once +@functools.lru_cache(1) +def _warn_use_fake_reduce(log: logging.Logger, warning: str): + log.warning(warning) + + +def _same_storage(a, b): + return a.untyped_storage().data_ptr() == b.untyped_storage().data_ptr() + + +def _same_storage_size(a: torch.Tensor, b: int): + return a.untyped_storage().size() // a.element_size() == b + + +def _storage_size_allocated(tensor: Tensor): + storage_size: int = tensor.untyped_storage().size() + return storage_size > 0 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_fsdp_extensions.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_fsdp_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..7d5d1e5f98a3427a0613b34f0caf9756aae2a7b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_fsdp_extensions.py @@ -0,0 +1,179 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional, Tuple + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor.api import ShardedTensor +from torch.distributed._shard.sharded_tensor.shard import Shard +from torch.distributed._tensor import DeviceMesh, DTensor +from torch.distributed.fsdp._shard_utils import ( + _all_gather_dtensor, + _create_chunk_dtensor, + _create_chunk_sharded_tensor, +) + + +class FSDPExtensions(ABC): + """ + This enables some customizable hooks to enable composability with tensor + parallelism. To activate these hooks, use :func:`_set_fsdp_extensions` to + set a custom :class:`FSDPExtensions` that implements the hooks. + """ + + @abstractmethod + def pre_flatten_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, Optional[Any]]: + """E.g. converting ``DistributedTensor`` to local tensor.""" + ... + + @abstractmethod + def post_unflatten_transform( + self, + tensor: torch.Tensor, + param_extension: Any, + ) -> torch.Tensor: + """E.g. converting local tensor to ``DistributedTensor``.""" + ... + + @abstractmethod + def chunk_tensor( + self, + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, + device: Optional[torch.device] = None, + ) -> torch.Tensor: + """Shards a tensor to chunks and returns the local chunk.""" + ... + + @abstractmethod + def chunk_dtensor( + self, + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, + ) -> torch.Tensor: + """Shards a tensor/DTensor to DTensor and returns the local DTensor.""" + ... + + @abstractmethod + def pre_load_state_dict_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, List[Shard]]: + """ + This is to be called before loading a *sharded* model state dict and + should return the tensor and list of shards from which to load data. + """ + ... + + @abstractmethod + def all_gather_dtensor( + self, + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], + ) -> torch.Tensor: + """ + This is to be called before loading a *sharded* DTensor state dict. + This gathers tensor in FSDP dimension and returns local tensor of + TP DTensor. + """ + ... + + +_extensions: Optional[FSDPExtensions] = None + + +def _set_fsdp_extensions(flattener: FSDPExtensions) -> None: + global _extensions + _extensions = flattener + + +def _ext_pre_flatten_transform( + tensor: torch.Tensor, + fsdp_extension: Optional[FSDPExtensions] = None, +) -> Tuple[torch.Tensor, Optional[Any]]: + if fsdp_extension is not None: + new_tensor, param_extension = fsdp_extension.pre_flatten_transform(tensor) + if param_extension is not None: + return new_tensor, param_extension + return tensor, None + + +def _ext_post_unflatten_transform( + tensor: torch.Tensor, + param_extension: Any, + fsdp_extension: Optional[FSDPExtensions] = None, +) -> torch.Tensor: + if fsdp_extension is not None and param_extension is not None: + return fsdp_extension.post_unflatten_transform(tensor, param_extension) + return tensor + + +def _ext_chunk_tensor( + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, + fsdp_extension: Optional[FSDPExtensions] = None, +) -> torch.Tensor: + chunk_tensor_fn = ( + fsdp_extension.chunk_tensor + if fsdp_extension is not None + else _create_chunk_sharded_tensor + ) + return chunk_tensor_fn( + tensor, + rank, + world_size, + num_devices_per_node, + pg, + ) + + +def _ext_chunk_dtensor( + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, + fsdp_extension: Optional[FSDPExtensions] = None, +) -> torch.Tensor: + chunk_dtensor_fn = ( + fsdp_extension.chunk_dtensor + if fsdp_extension is not None + else _create_chunk_dtensor + ) + return chunk_dtensor_fn( + tensor, + rank, + device_mesh, + ) + + +def _ext_pre_load_state_dict_transform( + tensor: torch.Tensor, + fsdp_extension: Optional[FSDPExtensions] = None, +) -> Tuple[torch.Tensor, List[Shard]]: + if fsdp_extension is not None: + return fsdp_extension.pre_load_state_dict_transform(tensor) + + assert type(tensor) is ShardedTensor + shards = tensor.local_shards() + return (tensor, shards) + + +def _ext_all_gather_dtensor( + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], + fsdp_extension: Optional[FSDPExtensions] = None, +) -> torch.Tensor: + all_gather_dtensor_fn = ( + fsdp_extension.all_gather_dtensor + if fsdp_extension is not None + else _all_gather_dtensor + ) + return all_gather_dtensor_fn(tensor, parent_mesh) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d342499101d97a2a5c0cf44f816787e469ffc7da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py @@ -0,0 +1,1182 @@ +import collections +import itertools +import os +import warnings +from typing import ( + Any, + Callable, + Deque, + Dict, + Generator, + Iterable, + Iterator, + List, + no_type_check, + Optional, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._exec_order_utils as exec_order_utils +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.distributed.fsdp.fully_sharded_data_parallel as fsdp_file +import torch.nn as nn +from torch.distributed.algorithms._comm_hooks import default_hooks +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh +from torch.distributed.distributed_c10d import _get_default_group +from torch.distributed.fsdp._common_utils import ( + _FSDPDeviceHandle, + _FSDPState, + _get_module_fsdp_state, + _is_fsdp_flattened, + _named_parameters_with_duplicates, + clean_tensor_name, + TrainingState, +) +from torch.distributed.fsdp._flat_param import ( + _FSDP_USE_FULL_PREC_IN_EVAL, + FlatParameter, + FlatParamHandle, + HandleShardingStrategy, +) +from torch.distributed.fsdp._limiter_utils import _FreeEventQueue +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + FullOptimStateDictConfig, + FullStateDictConfig, + MixedPrecision, + ShardingStrategy, + StateDictConfig, + StateDictType, +) +from torch.distributed.fsdp.wrap import _Policy +from torch.distributed.tensor.parallel.fsdp import DTensorExtensions +from torch.distributed.utils import _sync_params_and_buffers + +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch.utils.hooks import RemovableHandle + +_TORCHDISTX_AVAIL = True +try: + from torchdistx import deferred_init, fake # type: ignore[import] +except ImportError: + _TORCHDISTX_AVAIL = False + +PARAM_BROADCAST_BUCKET_SIZE = int(250 * 1024 * 1024) +FSDP_SYNCED = "_fsdp_synced" +# Specification of process groups for hybrid sharding strategies. +HybridShardProcessGroupType = Tuple[dist.ProcessGroup, dist.ProcessGroup] +# Overall specification of process group. +ProcessGroupType = Optional[Union[dist.ProcessGroup, HybridShardProcessGroupType]] + + +# TODO (awgu): Refactor this later +SHARDING_STRATEGY_MAP = { + ShardingStrategy.NO_SHARD: HandleShardingStrategy.NO_SHARD, + ShardingStrategy.FULL_SHARD: HandleShardingStrategy.FULL_SHARD, + ShardingStrategy.SHARD_GRAD_OP: HandleShardingStrategy.SHARD_GRAD_OP, + ShardingStrategy.HYBRID_SHARD: HandleShardingStrategy.HYBRID_SHARD, + ShardingStrategy._HYBRID_SHARD_ZERO2: HandleShardingStrategy._HYBRID_SHARD_ZERO2, +} +HYBRID_SHARDING_STRATEGIES = [ + ShardingStrategy.HYBRID_SHARD, + ShardingStrategy._HYBRID_SHARD_ZERO2, +] +NO_RESHARD_AFTER_FORWARD_STRATEGIES = ( + ShardingStrategy.SHARD_GRAD_OP, + ShardingStrategy._HYBRID_SHARD_ZERO2, +) + + +# NOTE: Since non-self attributes cannot be type annotated, several attributes +# on `state` are defined first as local variables before being assigned. + + +@no_type_check +def _init_process_group_state( + state: _FSDPState, + process_group: ProcessGroupType, + sharding_strategy: ShardingStrategy, + policy: Optional[_Policy], + device_mesh: Optional[DeviceMesh] = None, +) -> _FSDPState: + if process_group is not None and device_mesh is not None: + raise ValueError( + "Cannot pass both process_group and device_mesh at the " + "same time. Please just pass only one of them." + ) + is_hybrid_strategy = sharding_strategy in HYBRID_SHARDING_STRATEGIES + if is_hybrid_strategy: + if process_group is None and policy is None and device_mesh is None: + # Raise an error here, since this is manual wrapping with no process group + # passed in, there is no way to ensure all wrapped FSDP instances use the same + # process groups. + raise ValueError( + f"Manual wrapping with {sharding_strategy}", + "requires explicit specification of process group or device_mesh.", + ) + else: + state = _init_process_group_state_for_hybrid_shard( + state, process_group, device_mesh + ) + else: + if device_mesh: + state._device_mesh = device_mesh + state.process_group = device_mesh.get_group(mesh_dim=0) + else: + state.process_group = ( + process_group if process_group is not None else _get_default_group() + ) + + state.rank = state.process_group.rank() + state.world_size = state.process_group.size() + data_parallel_world_size = state.world_size + if is_hybrid_strategy: + data_parallel_world_size *= state._inter_node_pg.size() + state._gradient_predivide_factor = ( + default_hooks.DefaultState._get_gradient_predivide_factor( + data_parallel_world_size + ) + ) + state._gradient_postdivide_factor = ( + data_parallel_world_size / state._gradient_predivide_factor + ) + return state + + +@no_type_check +def _init_process_group_state_for_hybrid_shard( + state: _FSDPState, + process_group: ProcessGroupType, + device_mesh: DeviceMesh, +) -> _FSDPState: + if device_mesh: + if _is_valid_hybrid_shard_device_mesh(device_mesh): + state._device_mesh = device_mesh + # We currently only allow _inter_node_pg to be the outermost dimension, and the + # process_group(intra_node) to be the innermost dimension. + state._inter_node_pg = device_mesh.get_group(mesh_dim=0) + state.process_group = device_mesh.get_group(mesh_dim=1) + else: + raise ValueError( + "Expected device_mesh to have ndim=2 " + f"but got {len(device_mesh.get_group())}" + ) + elif process_group is None: + default_group = _get_default_group() + intra_node_group, inter_node_group = _init_intra_and_inter_node_groups( + default_group, state._device_handle.device_count() + ) + # we shard across intra-node + state.process_group = intra_node_group + # save _inter_node_pg to allreduce across. + state._inter_node_pg = inter_node_group + else: + # Check type and assign state.process_group and state._inter_node_pg. + if _is_valid_hybrid_shard_pg_type(process_group): + # Assuming that user passed in as intra node group and inter node group + # as documented. + state.process_group, state._inter_node_pg = process_group + else: + raise ValueError( + "Expected process_group to be passed in as either None or " + f"Tuple[dist.ProcessGroup, dist.ProcessGroup] but got {type(process_group)}" + ) + # Create state for allreduce + state._inter_node_state = _get_default_comm_hook_state( + process_group=state._inter_node_pg, + ) + return state + + +@no_type_check +def _is_valid_hybrid_shard_pg_type(process_group: Any) -> bool: + return ( + isinstance(process_group, tuple) + and len(process_group) == 2 + and all(isinstance(pg, dist.ProcessGroup) for pg in process_group) + ) + + +@no_type_check +def _is_valid_hybrid_shard_device_mesh(device_mesh: DeviceMesh) -> bool: + return isinstance(device_mesh, DeviceMesh) and device_mesh.ndim == 2 + + +@no_type_check +def _init_intra_node_process_group(num_devices_per_node: int) -> dist.ProcessGroup: + """ + Return a process group across the current node. + + For example, given each row is a distinct node: + 0 1 2 3 4 5 6 7 8 + 9 10 11 12 13 14 15 + This API would return an intra-node subgroup across + [0, 7] or [8, 15] depending on the process's rank. + For example, rank 3 would get [0, 7]. + """ + intra_node_subgroup, _ = dist.new_subgroups(num_devices_per_node) + return intra_node_subgroup + + +@no_type_check +def _init_inter_node_process_group( + global_process_group: dist.ProcessGroup, + num_devices_per_node: int, +) -> dist.ProcessGroup: + """ + Return an inter-node process group where each contained rank has the same local rank. + + For example, given each row is a distinct node: + 0 1 2 3 4 5 6 7 8 + 9 10 11 12 13 14 15 + This API would return inter-node process group {0, 8}, {1, 9}, {2, 10}, and so forth + depending on the process's rank. For example, rank 1 would get {1, 9}, rank 5 + would get {5, 13}. + """ + # the inter-node pg that is returned + inter_node_pg = None + sharding_backend = dist.get_backend(global_process_group) + world_size = dist.get_world_size(global_process_group) + # Assuming fully homogeneous setup + num_nodes = world_size // num_devices_per_node + my_local_rank = dist.get_rank(global_process_group) % num_devices_per_node + for local_rank in range(num_devices_per_node): + ranks_for_inter_group = [ + local_rank + (i * num_devices_per_node) for i in range(num_nodes) + ] + # every rank always needs to call dist.new_group + grp = dist.new_group(ranks=ranks_for_inter_group, backend=sharding_backend) + if local_rank == my_local_rank: + inter_node_pg = grp + + assert ( + inter_node_pg is not None + ), f"{my_local_rank} expected to assign inter-node pg, but did not" + return inter_node_pg + + +def _init_intra_and_inter_node_groups( + global_process_group: dist.ProcessGroup, + num_devices_per_node: int, +) -> Tuple[dist.ProcessGroup, dist.ProcessGroup]: + """ + Initialize intra and inter-node process groups and return the ones corresponding to this process's rank. + + This function can be used to initialize process groups for ``HYBRID_SHARD`` or + ``_HYBRID_SHARD_ZERO2`` in FSDP. + This function assumes each node has an equal number of CUDA-enabled devices. + Returns: + Tuple[dist.ProcessGroup, dist.ProcessGroup]: Intra and inter-node process group. + """ + return ( + _init_intra_node_process_group(num_devices_per_node), + _init_inter_node_process_group(global_process_group, num_devices_per_node), + ) + + +@no_type_check +def _init_ignored_module_states( + state: _FSDPState, + module: nn.Module, + ignored_modules: Optional[Iterable[torch.nn.Module]], + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, +) -> _FSDPState: + if ignored_modules is not None and ignored_states is not None: + raise ValueError( + "Cannot pass both ignored_modules and ignored_states at the " + "same time. Please just pass ignored_states." + ) + ignored_parameters = None + passed_as_ignored_states = ignored_states is not None + if passed_as_ignored_states: + ignored_states_list = list(ignored_states) + _check_ignored_states(ignored_states_list, True) + else: + ignored_states_list = [] + _check_ignored_states( + list(ignored_modules) if ignored_modules is not None else [], False + ) + if len(ignored_states_list) > 0: + if isinstance(ignored_states_list[0], nn.Parameter): + ignored_parameters = ignored_states_list + else: + ignored_modules = ignored_states_list + state._ignored_modules = _get_ignored_modules(module, ignored_modules) + state._ignored_params = _get_ignored_params( + module, + state._ignored_modules, + ignored_parameters, + ) + state._ignored_buffer_names = _get_ignored_buffer_names( + module, + state._ignored_modules, + ) + # TODO: FSDP's contract for buffers is not well-defined. They are + # implicitly ignored for most functionality since they are not sharded; + # however, FSDP still imposes some semantics on buffers (e.g. buffer mixed + # precision). We should formalize this contract and decide if we need to + # compute and store `_ignored_buffers`. + return state + + +def _check_ignored_states( + ignored_states: List[Any], passed_as_ignored_states: bool +) -> None: + """ + Check that the ignored states are uniformly parameters or uniformly modules. + + We may remove this check in the future if we permit mixing. + """ + if len(ignored_states) == 0: + return + if passed_as_ignored_states: + all_params = all(isinstance(state, nn.Parameter) for state in ignored_states) + all_modules = all(isinstance(state, nn.Module) for state in ignored_states) + if not all_params and not all_modules: + # Sort for consistent ordering for unit test regex matching + sorted_types = sorted({type(state) for state in ignored_states}, key=repr) + raise ValueError( + "ignored_states expects all nn.Parameter or all nn.Module list " + f"elements but got types {sorted_types}" + ) + else: + if not all(isinstance(state, nn.Module) for state in ignored_states): + sorted_types = sorted({type(state) for state in ignored_states}, key=repr) + raise ValueError( + "ignored_modules expects nn.Module list elements but got " + f"types {sorted_types}" + ) + + +@no_type_check +def _init_device_handle( + state: _FSDPState, + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_id: Optional[Union[int, torch.device]], +) -> _FSDPState: + """ + Determine device handle used for initializing FSDP. + + If a device is specified by ``device_id``, + then returns device handle corresponds to that device type. Otherwise, If the + module is already on a non-CPU device, then the device type is that non-CPU device type. + If the module is on CPU or meta, then the device type is the current cuda device. + + This method will be called once ignored paramters was determined, as the device handle maybe needed + for other initialization. + """ + determined_device = None + if device_id is not None: + determined_device = ( + device_id + if isinstance(device_id, torch.device) + else torch.device(device_id) + ) + if determined_device is None: + for param in _get_orig_params(module, ignored_params): + if param.device.type in {"cpu", "meta"}: + continue + if determined_device is None: + determined_device = param.device + else: + if param.device.type != determined_device.type: + raise RuntimeError( + f"FSDP does not support modules with different device types " + f"but got params on {determined_device.type} and {param.device.type}" + ) + determined_device = determined_device or torch.device( + "cuda", torch.cuda.current_device() + ) + + state._device_handle = _FSDPDeviceHandle.from_device(determined_device) + return state + + +@no_type_check +def _init_buffer_state( + state: _FSDPState, + module: nn.Module, +) -> _FSDPState: + state._buffer_names = _get_buffer_names(module) + # Save a mapping from clean fully-qualified buffer name (starting from + # `module`) to its original dtype for restoring that dtype during model + # checkpointing when buffer mixed precision is enabled. The names should + # be clean since the casting happens in a `summon_full_params()` context. + _buffer_name_to_orig_dtype: Dict[str, torch.dtype] = {} + for buffer_name, buffer in module.named_buffers(): + buffer_name = clean_tensor_name(buffer_name) + _buffer_name_to_orig_dtype[buffer_name] = buffer.dtype + state._buffer_name_to_orig_dtype = _buffer_name_to_orig_dtype + return state + + +@no_type_check +def _init_core_state( + state: _FSDPState, + sharding_strategy: Optional[ShardingStrategy], + mixed_precision: Optional[MixedPrecision], + cpu_offload: Optional[CPUOffload], + limit_all_gathers: bool, + use_orig_params: bool, + backward_prefetch_limit: int, + forward_prefetch_limit: int, +) -> _FSDPState: + # We clamp the strategy to `NO_SHARD` for world size of 1 since they are + # currently functionally equivalent. This may change if/when we integrate + # FSDP with MoE. + if state.world_size == 1: + if sharding_strategy != ShardingStrategy.NO_SHARD: + warnings.warn( + "FSDP is switching to use `NO_SHARD` instead of " + f"{sharding_strategy or ShardingStrategy.FULL_SHARD} since " + "the world size is 1." + ) + sharding_strategy = ShardingStrategy.NO_SHARD + elif sharding_strategy == ShardingStrategy.NO_SHARD: + warnings.warn( + "The `NO_SHARD` sharding strategy is deprecated. If having issues, " + "please use DistributedDataParallel instead.", + # Level 1 is here, level 2 is from `FullyShardedDataParallel`, and + # level 3 is from the true caller + stacklevel=3, + ) + state.sharding_strategy = sharding_strategy or ShardingStrategy.FULL_SHARD + state.mixed_precision = mixed_precision or MixedPrecision() + if mixed_precision is not None: + torch._C._log_api_usage_once( + f"torch.distributed.fsdp.mixed_precision.{str(state.mixed_precision)}" + ) + state._use_full_prec_in_eval = ( + os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1" + ) + state.cpu_offload = cpu_offload or CPUOffload() + state.limit_all_gathers = limit_all_gathers + state._use_orig_params = use_orig_params + state.training_state = TrainingState.IDLE + state._is_root = None + state._free_event_queue = _FreeEventQueue() + state._debug_level = dist.get_debug_level() + state._exec_order_data = exec_order_utils._ExecOrderData( + state._debug_level, + backward_prefetch_limit, + forward_prefetch_limit, + ) + # Mapping from fully sharded module to the handles it is responsible to + # unshard and reshard (see [Note: Fully Sharded Module]) + _fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = dict() + state._fully_sharded_module_to_handle = _fully_sharded_module_to_handle + # Invariant: `state.params` contains exactly the `FlatParameter`s of the + # handles in `state._handle` + _handle: FlatParamHandle = None + state._handle = _handle + params: List[FlatParameter] = [] + state.params = params + return state + + +@no_type_check +def _init_runtime_state( + state: _FSDPState, +) -> _FSDPState: + _root_pre_forward_handles: List[RemovableHandle] = [] + state._root_pre_forward_handles = _root_pre_forward_handles + _pre_forward_handles: List[RemovableHandle] = [] + state._pre_forward_handles = _pre_forward_handles + _post_forward_handles: List[RemovableHandle] = [] + state._post_forward_handles = _post_forward_handles + state._sync_gradients = True + state._comm_hook = None + state._comm_hook_state = None + # Used to prevent running the pre-backward hook multiple times + return state + + +@no_type_check +def _init_prefetching_state( + state: _FSDPState, + backward_prefetch: BackwardPrefetch, + forward_prefetch: bool, +) -> _FSDPState: + state.backward_prefetch = backward_prefetch + state.forward_prefetch = forward_prefetch + # The data structures use tuples of handles to generalize over the case + # where a module's forward involves multiple handles. + return state + + +@no_type_check +def _init_extension(state: _FSDPState, device_mesh: DeviceMesh = None) -> _FSDPState: + # TODO: we need to add additional check once we support FSDP + PiPPy. + # This check is currently sufficient, since we only support FSDP + TP. + if device_mesh and _mesh_resources.get_parent_mesh(state._device_mesh) is not None: + state._fsdp_extension = DTensorExtensions(state._device_handle) + else: + # We need to explicilty set _fsdp_extension to None. + # Otherwise, we will run into an infinite recursion when getting the attribute. + state._fsdp_extension = None + return state + + +@no_type_check +def _init_state_dict_state(state: _FSDPState) -> _FSDPState: + state._state_dict_type = StateDictType.FULL_STATE_DICT + state_dict_config: StateDictConfig = FullStateDictConfig() + state._optim_state_dict_config = FullOptimStateDictConfig() + state._state_dict_config = state_dict_config + unshard_params_ctx: Dict[nn.Module, Generator] = {} + state._unshard_params_ctx = unshard_params_ctx + + return state + + +@no_type_check +def _init_param_handle_from_module( + state: _FSDPState, + fully_sharded_module: nn.Module, + device_id: Optional[Union[int, torch.device]], + param_init_fn: Optional[Callable[[nn.Module], None]], + sync_module_states: bool, +) -> _FSDPState: + """Initialize a ``FlatParamHandle`` from a module ``fully_sharded_module``.""" + _check_single_device_module(fully_sharded_module, state._ignored_params, device_id) + device_from_device_id = _get_device_from_device_id(device_id, state.rank) + is_meta_module, is_torchdistX_deferred_init = _need_to_materialize_module( + fully_sharded_module, state._ignored_params, state._ignored_modules + ) + # Materialize the module if needed + if (is_meta_module or is_torchdistX_deferred_init) and param_init_fn is not None: + _materialize_with_param_init_fn( + fully_sharded_module, param_init_fn, state._ignored_modules + ) + elif is_meta_module: + _materialize_meta_module( + fully_sharded_module, device_id, state._ignored_modules + ) + elif is_torchdistX_deferred_init: + deferred_init.materialize_module( + fully_sharded_module, + check_fn=lambda submodule: _get_module_fsdp_state(submodule) is None + and submodule not in state._ignored_modules, + ) + + ignored_buffers = { + buffer + for ignored_module in state._ignored_modules + for buffer in ignored_module.buffers() + } + + _move_module_to_device( + fully_sharded_module, + state._ignored_params, + ignored_buffers, + device_from_device_id, + ) + state.compute_device = _get_compute_device( + fully_sharded_module, + state._ignored_params, + device_from_device_id, + state.rank, + ) + + managed_params = list(_get_orig_params(fully_sharded_module, state._ignored_params)) + if sync_module_states: + _sync_module_params_and_buffers( + fully_sharded_module, managed_params, state.process_group + ) + if state.sharding_strategy in HYBRID_SHARDING_STRATEGIES: + _sync_module_params_and_buffers( + fully_sharded_module, managed_params, state._inter_node_pg + ) + _init_param_handle_from_params(state, managed_params, fully_sharded_module) + return state + + +@no_type_check +def _init_param_handle_from_params( + state: _FSDPState, + params: List[nn.Parameter], + fully_sharded_module: nn.Module, +): + if len(params) == 0: + return + handle = FlatParamHandle( + params, + fully_sharded_module, + state.compute_device, + SHARDING_STRATEGY_MAP[state.sharding_strategy], + state.cpu_offload.offload_params, + state.mixed_precision.param_dtype, + state.mixed_precision.reduce_dtype, + state.mixed_precision.keep_low_precision_grads, + state.process_group, + state._use_orig_params, + fsdp_extension=state._fsdp_extension, + ) + handle.shard() + assert not state._handle + state.params.append(handle.flat_param) + state._handle = handle + state._fully_sharded_module_to_handle[handle._fully_sharded_module] = handle + cpu_device = torch.device("cpu") + if state.cpu_offload.offload_params and handle.flat_param.device != cpu_device: + handle.flat_param_to(cpu_device) + + +def _get_ignored_modules( + root_module: nn.Module, + _ignored_modules: Optional[Iterable[torch.nn.Module]], +) -> Set[nn.Module]: + """ + Check that ``_ignored_modules`` is an iterable of ``nn.Module`` s without any FSDP instances. + + Return the modules contained in their module + subtrees as a :class:`set`. Nested FSDP instances are excluded, but their + already-computed ignored modules are included. + + ``_ignored_modules`` represents the argument passed by the user to FSDP. + """ + msg_prefix = "`ignored_modules` should be an iterable of `torch.nn.Module`s " + try: + ignored_root_modules = ( + set(_ignored_modules) if _ignored_modules is not None else set() + ) + except TypeError as e: + raise TypeError(msg_prefix + f"but got {type(_ignored_modules)}") from e + for module in ignored_root_modules: + if not isinstance(module, torch.nn.Module): + raise TypeError(msg_prefix + f"but got an iterable with {type(module)}") + if _get_module_fsdp_state(module): + # TODO: We may relax this by taking the FSDP instance's wrapped + # module to provide more flexibility to the user. + raise ValueError("`ignored_modules` should not include FSDP modules") + # Treat modules that cannot compose with `fully_shard` as ignored modules, + # meaning that their subtrees are ignored + for module in root_module.modules(): + if not traversal_utils._composable(module): + ignored_root_modules.add(module) + # NOTE: Even if `ignored_root_modules` is empty, do not return early so + # that this FSDP instance can get any ignored modules from its children. + + # Include child modules and exclude nested FSDP modules themselves + ignored_modules = { + child + for module in ignored_root_modules + for child in module.modules() + if not isinstance(child, fsdp_file.FullyShardedDataParallel) + } + if root_module in ignored_modules: + warnings.warn( + "Trying to ignore the top-level module passed into the FSDP " + "constructor itself will result in all parameters being " + f"ignored and is not well-supported: {module}" + ) + # Include nested FSDP modules' ignored modules + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_modules") + ignored_modules.update(optional_fsdp_state._ignored_modules) + return ignored_modules + + +def _get_ignored_params( + root_module: torch.nn.Module, + ignored_modules: Set[torch.nn.Module], + ignored_parameters: Optional[Iterable[torch.nn.Parameter]] = None, +) -> Set[torch.nn.Parameter]: + """ + Return the parameters of the modules in ``ignored_modules`` and the parameters in ``ignored_parameters``. + + :class:`FlatParameter` s are excluded from the result. + """ + all_ignored_params: Set[torch.nn.Parameter] = set() + + params_in_ignored_modules = { + p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p) + } + + all_ignored_params.update(params_in_ignored_modules) + + if ignored_parameters is not None: + params_in_ignored_parameters = { + p for p in ignored_parameters if not _is_fsdp_flattened(p) + } + all_ignored_params.update(params_in_ignored_parameters) + + # Always include nested FSDP modules' ignored parameters + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_params") + all_ignored_params.update(optional_fsdp_state._ignored_params) + + return all_ignored_params + + +def _get_ignored_buffer_names( + root_module: torch.nn.Module, + ignored_modules: Set[torch.nn.Module], +) -> Set[str]: + """Return the cleaned buffer FQNs in ``ignored_modules``.""" + all_ignored_buffer_names: Set[str] = set() + + buffers_in_ignored_modules = { + buffer for m in ignored_modules for buffer in m.buffers() + } + + all_ignored_buffer_names.update( + { + clean_tensor_name(buffer_name) + for buffer_name, buffer in root_module.named_buffers() + if buffer in buffers_in_ignored_modules + } + ) + + # Always include nested FSDP modules' ignored buffer names + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_buffer_names") + all_ignored_buffer_names.update(optional_fsdp_state._ignored_buffer_names) + + return all_ignored_buffer_names + + +def _get_buffer_names(root_module: nn.Module) -> Set[str]: + """Return the fully prefixed names of all buffers in the module hierarchy rooted at ``root_module`` as a class:`set`.""" + return { + clean_tensor_name(buffer_name) for buffer_name, _ in root_module.named_buffers() + } + + +def _check_single_device_module( + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_id: Optional[Union[int, torch.device]], +) -> None: + """ + Raise an error if ``module`` has original parameters on multiple devices, ignoring the parameters in ``ignored_params``. + + Thus, after this method, the + module must be either fully on the CPU or fully on a non-CPU device. + """ + devices = {param.device for param in _get_orig_params(module, ignored_params)} + # We allow module to be partially on CPU and partially on GPU if device_id is not + # None, since the device_id arg will result in the CPU portion being moved to + # GPU. This is useful in cases where part of the module may be parallelized + # by another algorithm and may already be on GPU. We'd like to enforce device_id + # to not be None, otherwise we'd flatten parameters in a mixed module which is + # not supported. + if len(devices) == 2 and torch.device("cpu") in devices: + if device_id is None: + raise RuntimeError( + "To support a module with both CPU and GPU params, " + "please pass in device_id argument." + ) + elif len(devices) > 1: + raise RuntimeError( + f"FSDP only supports single device modules but got params on {devices}" + ) + + +def _get_device_from_device_id( + device_id: Optional[Union[int, torch.device]], + rank: int, +) -> Optional[torch.device]: + """ + Return a ``torch.device`` for the specified ``device_id``. + + Processes ``device_id`` and returns either the corresponding device or + ``None`` if ``device_id`` is ``None``. + """ + if device_id is None: + return None + device = ( + device_id if isinstance(device_id, torch.device) else torch.device(device_id) + ) + if device == torch.device("cuda"): + warnings.warn( + f"FSDP got the argument `device_id` {device_id} on rank " + f"{rank}, which does not have an explicit index. " + f"FSDP will use the current device {torch.cuda.current_device()}. " + "If this is incorrect, please explicitly call `torch.cuda.set_device()` " + "before FSDP initialization or pass in the explicit device " + "index as the `device_id` argument." + ) + device = torch.device("cuda", torch.cuda.current_device()) + return device + + +def _need_to_materialize_module( + module: nn.Module, + ignored_params: Set[nn.Parameter], + ignored_modules: Set[nn.Module], +) -> Tuple[bool, bool]: + """ + Return if ``module`` has parameters on meta device and if ``module`` is using torchdistX deferred initialization. + + At most of the returned bools can + be ``True``. If either is ``True``, then ``module`` needs to be + materialized. + """ + managed_params = list(_get_orig_params(module, ignored_params)) + is_meta_module = any(param.is_meta for param in managed_params) + # TODO: We need to establish a contract for FSDP and buffers. For now, we + # skip checking for meta buffers from ignored modules. We should consider + # refactoring the initialization holistically to avoid so many traversals. + for submodule in module.modules(): + if submodule in ignored_modules: + continue + for buf in submodule.buffers(recurse=False): + is_meta_module |= buf.is_meta + is_torchdistX_deferred_init = ( + not is_meta_module + and _TORCHDISTX_AVAIL + and any(fake.is_fake(param) for param in managed_params) + ) + return is_meta_module, is_torchdistX_deferred_init + + +def _materialize_with_param_init_fn( + root_module: nn.Module, + param_init_fn: Callable[[nn.Module], None], + ignored_modules: Set[nn.Module], +) -> None: + if not callable(param_init_fn): + raise ValueError( + f"Expected {param_init_fn} to be callable but got {type(param_init_fn)}" + ) + modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules) + for module in modules_to_materialize: + param_init_fn(module) + + +def _materialize_meta_module( + root_module: nn.Module, + device_from_device_id: Optional[torch.device], + ignored_modules: Set[nn.Module], +): + # Run default meta device initialization + materialization_device = device_from_device_id or torch.device( + torch.cuda.current_device() + ) + modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules) + try: + # Assume that each module's `reset_parameters()` only initializes its + # own parameters and not those of its children + with torch.no_grad(): + for module in modules_to_materialize: + # As a contract to the user, only call `reset_parameters()` if + # the module has directly managed parameters/buffers + module_state_iter = itertools.chain( + module.parameters(recurse=False), module.buffers(recurse=False) + ) + has_module_states = len(list(module_state_iter)) > 0 + if has_module_states: + module.to_empty(device=materialization_device, recurse=False) + module.reset_parameters() # type: ignore[operator] + except BaseException as e: + warnings.warn( + "Unable to call `reset_parameters()` for module on meta " + f"device with error {str(e)}. Please ensure that your module of" + f"type {type(module)} implements a `reset_parameters()` method." # type: ignore[possibly-undefined] + ) + raise e + + +def _get_modules_to_materialize( + root_module: nn.Module, ignored_modules: Set[nn.Module] +) -> List[nn.Module]: + # Run BFS to collect the modules to materialize via `reset_parameters()`, + # stopping at any module with FSDP already applied or at ignored modules. + modules_to_materialize: List[nn.Module] = [] + queue = collections.deque([root_module]) + visited_modules: Set[nn.Module] = {root_module} + while queue: + module = queue.popleft() + modules_to_materialize.append(module) + for child_module in module.children(): + if ( + child_module not in visited_modules + and _get_module_fsdp_state(child_module) is None + and child_module not in ignored_modules + ): + visited_modules.add(child_module) + queue.append(child_module) + return modules_to_materialize + + +def _move_module_to_device( + module: nn.Module, + ignored_params: Set[nn.Parameter], + ignored_buffers: Set[torch.Tensor], + device_from_device_id: Optional[torch.device], +) -> None: + """ + Move ``module`` depending on ``device_from_device_id`` and its current device. + + This includes moving ignored modules' parameters. + + - If ``device_from_device_id`` is not ``None``, then this moves + ``module`` to the device. + - If ``device_from_device_id`` is ``None``, then this does not move + ``module`` but warns the user if it is on CPU. + + Precondition: ``_check_single_device_module()``. + """ + cpu_device = torch.device("cpu") + if device_from_device_id is not None: + # BFS from `module` without traversing any nested FSDP instances to + # collect the parameters/buffers that have not yet been managed + queue: Deque[nn.Module] = collections.deque() + queue.append(module) + params: List[nn.Parameter] = [] + buffers: List[torch.Tensor] = [] + while queue: + curr_module = queue.popleft() + # NOTE: We include a check to only move parameters/buffers that are + # on CPU device. If they are on a CUDA device different from the + # one specified by `device_id`, then this does NOT move them. This + # is so that we can raise an error in `_get_compute_device()`. + params.extend( + param + for param in curr_module.parameters(recurse=False) + if param.device == cpu_device + ) + buffers.extend( + buffer + for buffer in curr_module.buffers(recurse=False) + if buffer.device == cpu_device + ) + for submodule in curr_module.children(): + if not isinstance(submodule, fsdp_file.FullyShardedDataParallel): + queue.append(submodule) + params_to_move = [p for p in params if p not in ignored_params] + bufs_to_move = [p for p in buffers if p not in ignored_buffers] + _move_states_to_device(params_to_move, bufs_to_move, device_from_device_id) + return + param = next(_get_orig_params(module, ignored_params), None) + if param is not None and param.device == cpu_device: + _warn_cpu_init() + + +def _move_states_to_device( + params: List[nn.Parameter], + buffers: List[torch.Tensor], + device_from_device_id: Optional[torch.device], +) -> None: + """ + Move states to the specified device. + + Precondition: ``_check_single_device_module()`` and module's parameters and + buffers have been materialized if needed. + """ + if len(params) == 0 and len(buffers) == 0: + return + if len(params) > 0: + current_device = params[0].device + elif len(buffers) > 0: + current_device = buffers[0].device + cpu_device = torch.device("cpu") + if device_from_device_id is not None: + # Move the parameters and buffers like the `.data` code path in + # `nn.Module._apply()`, which underlies `nn.Module.to()` + for param in params: + with torch.no_grad(): + param.data = param.to(device_from_device_id) + if param.grad is not None: + param.grad.data = param.grad.to(device_from_device_id) + for buffer in buffers: + buffer.data = buffer.to(device_from_device_id) + elif current_device == cpu_device: # type: ignore[possibly-undefined] + _warn_cpu_init() + + +def _warn_cpu_init(): + warnings.warn( + "The passed-in `module` is on CPU and will thus have FSDP's sharding " + "initialization run on CPU, which may be slower than on GPU. We " + "recommend passing in the `device_id` argument for FSDP to move " + "`module` to GPU for the sharding initialization. `module` must also " + "be on GPU device to work with the `sync_module_states=True` flag " + "since that requires GPU communication." + ) + + +def _get_compute_device( + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_from_device_id: Optional[torch.device], + rank: int, +) -> torch.device: + """ + Determine and return this FSDP instance's compute device. + + If a device is + specified by ``device_id``, then returns that device. Otherwise, If the + module is already on a non-CPU device, then the compute device is that non-CPU + device. If the module is on CPU, then the compute device is the current + device. + + Since this method should be called after materializing the module, any + non-CPU device should not be meta device. For now, the compute device is + always a CUDA GPU device with its explicit index. + + Precondition: ``_check_single_device_module()`` and + ``_move_module_to_device()``. + """ + param = next(_get_orig_params(module, ignored_params), None) + if param is not None and param.device.type != "cpu": + compute_device = param.device # Determined by model param placement + else: + if device_from_device_id is not None and device_from_device_id.type != "cuda": + compute_device = device_from_device_id # Determined by custom backend + else: + compute_device = torch.device("cuda", torch.cuda.current_device()) + if device_from_device_id is not None and compute_device != device_from_device_id: + raise ValueError( + f"Inconsistent compute device and `device_id` on rank {rank}: " + f"{compute_device} vs {device_from_device_id}" + ) + return compute_device + + +# TODO: See how to deprecate! +def _sync_module_params_and_buffers( + module: nn.Module, + params: List[nn.Parameter], + process_group: dist.ProcessGroup, +) -> None: + """ + Synchronize module states (i.e. parameters ``params`` and all not-yet-synced buffers) by broadcasting from rank 0 to all ranks. + + Precondition: ``sync_module_states == True`` and ``self.process_group`` has + been set. + """ + module_states: List[torch.Tensor] = [] + for buffer in module.buffers(): + # Avoid re-synchronizing buffers in case of nested wrapping + if not getattr(buffer, FSDP_SYNCED, False): + setattr(buffer, FSDP_SYNCED, True) + detached_buffer = buffer.detach() + if is_traceable_wrapper_subclass(detached_buffer): + # NOTE: Here we assume no nested subclasses, at most one level of subclass + # in both model's buffers and params + attrs, _ = detached_buffer.__tensor_flatten__() # type: ignore[attr-defined] + inner_buffers = [getattr(detached_buffer, attr) for attr in attrs] + module_states.extend(inner_buffers) + else: + module_states.append(detached_buffer) + + for param in params: + detached_param = param.detach() + if is_traceable_wrapper_subclass(detached_param): + attrs, _ = detached_param.__tensor_flatten__() # type: ignore[attr-defined] + inner_params = [getattr(detached_param, attr) for attr in attrs] + module_states.extend(inner_params) + else: + module_states.append(detached_param) + + _check_module_states_for_sync_module_states(module_states) + _sync_params_and_buffers( + process_group, + module_states, + PARAM_BROADCAST_BUCKET_SIZE, + src=0, + ) + + +def _sync_module_states( + params: List[nn.Parameter], + buffers: List[torch.Tensor], + process_group: dist.ProcessGroup, +) -> None: + # Assumes that each call to this method passes in disjoint `params` and + # and `buffers` across calls, so there is no chance of re-synchronizing + params_and_buffers = [param.detach() for param in params] + [ + buffer.detach() for buffer in buffers + ] + _check_module_states_for_sync_module_states(params_and_buffers) + _sync_params_and_buffers( + process_group, + params_and_buffers, + PARAM_BROADCAST_BUCKET_SIZE, + src=0, + ) + + +def _check_module_states_for_sync_module_states( + module_states: List[torch.Tensor], +) -> None: + if module_states and any( + tensor.device == torch.device("cpu") for tensor in module_states + ): + raise ValueError( + "The module has CPU parameters or buffers when `sync_module_states=True`, " + "which requires them to be on GPU. Please specify the `device_id` argument " + "or move the module to GPU before passing it to FSDP." + ) + + +def _get_orig_params( + module: nn.Module, + ignored_params: Set[nn.Parameter], +) -> Iterator[nn.Parameter]: + """ + Return an iterator over the original parameters in ``module``. + + The iterator does not return + the parameters in ``ignored_params``, any ``FlatParameter`` s (which may be + present due to nested FSDP wrapping), or any original parameters already + flattened (only relevant when ``use_orig_params=True``). + """ + param_gen = module.parameters() + try: + while True: + param = next(param_gen) + if param not in ignored_params and not _is_fsdp_flattened(param): + yield param + except StopIteration: + pass + + +def _check_orig_params_flattened( + fsdp_module, + ignored_params: Set[nn.Parameter], +) -> None: + """ + Check that original parameters in ``fsdp_module`` have been flattened. + + The flattened parameters are made + invisible to ``named_parameters()`` for the module hierarchy rooted at + ``fsdp_module``. This should be called as a sanity check after flattening + the wrapped module's parameters. + """ + for param_name, param in _named_parameters_with_duplicates(fsdp_module): + if param not in ignored_params and not _is_fsdp_flattened(param): + raise RuntimeError( + f"Found an unflattened parameter: {param_name}; " + f"{param.size()} {param.__class__}" + ) + + +def _get_default_comm_hook(sharding_strategy: ShardingStrategy): + return ( + default_hooks.allreduce_hook + if sharding_strategy == ShardingStrategy.NO_SHARD + else default_hooks.reduce_scatter_hook + ) + + +def _get_default_comm_hook_state( + process_group: dist.ProcessGroup, +) -> default_hooks.DefaultState: + return default_hooks.DefaultState(process_group=process_group) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_limiter_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_limiter_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..efb5b3ba5ae1f25291ecbf98b8b67a46dce0cc9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_limiter_utils.py @@ -0,0 +1,33 @@ +import collections +from typing import Deque, Optional + +import torch + + +class _FreeEventQueue: + """ + This tracks all pending frees corresponding to inflight all-gathers. The + queueing pattern is iterative enqueues with a single dequeue per iteration + once the limit ``_max_num_inflight_all_gathers`` is reached. + """ + + def __init__(self) -> None: + self._queue: Deque[torch.cuda.Event] = collections.deque() + self._max_num_inflight_all_gathers = 2 # empirically chosen + + def enqueue(self, free_event: torch.cuda.Event) -> None: + """Enqueues a free event.""" + self._queue.append(free_event) + + def dequeue_if_needed(self) -> Optional[torch.cuda.Event]: + """Dequeues a single event if the limit is reached.""" + if len(self._queue) >= self._max_num_inflight_all_gathers: + return self._dequeue() + return None + + def _dequeue(self) -> Optional[torch.cuda.Event]: + """Dequeues a free event if possible.""" + if self._queue: + event = self._queue.popleft() + return event + return None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..682a7f2b299a9316ff842c5c402b42aad7b31800 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py @@ -0,0 +1,2086 @@ +import copy +import functools +import logging +import warnings +from contextlib import ExitStack +from dataclasses import dataclass, field +from typing import ( + Any, + cast, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + no_type_check, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed._tensor import DTensor, Replicate +from torch.distributed.distributed_c10d import _get_pg_default_device +from torch.distributed.fsdp._common_utils import ( + _apply_to_modules, + _FSDPState, + _get_module_fsdp_state_if_fully_sharded_module, + _get_param_to_fqns, + _module_handle, + _named_parameters_with_duplicates, + clean_tensor_name, +) +from torch.distributed.fsdp._debug_utils import SimpleProfiler +from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle +from torch.distributed.fsdp._fsdp_extensions import ( + _ext_chunk_dtensor, + _ext_chunk_tensor, +) +from torch.distributed.fsdp._runtime_utils import ( + _lazy_init, + _reset_flat_param_grad_info_if_needed, +) +from torch.distributed.fsdp.api import ( + ShardingStrategy, + StateDictSettings, + StateDictType, +) +from torch.utils._pytree import tree_map_only + + +logger = logging.getLogger(__name__) + + +@dataclass +class FSDPParamInfo: + state: _FSDPState + handle: FlatParamHandle + param_indices: Dict[str, int] + param_requires_grad: List[bool] + + +def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]: + keys = sorted(dictionary.keys()) + for k in keys: + yield k, dictionary[k] + + +@dataclass +class _ConsolidatedOptimState: + """ + This holds the consolidated optimizer state on the target rank. Positive- + dimension tensor state is communicated across ranks, while zero-dimension + tensor state and non-tensor state is taken directly from the target rank. + + PyTorch version 1.12 moved to using zero-dimension tensors for scalar + values, but user implemented optimizers may still use float (i.e. a + non-tensor). Thus, we support both and handle them identically. + + Attributes: + tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension + tensor state name to the unsharded flat tensor representing the + state. + zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero- + dimension tensor state name to its value. + non_tensor_state (Dict[str, Any]): Mapping from non-tensor state + name to its value. + """ + + tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + non_tensor_state: Dict[str, Any] = field(default_factory=dict) + + +class _PosDimTensorInfo(NamedTuple): + """ + Meatadata for positive-dimension tensors used internally for + :meth:`scatter_full_optim_state_dict`. + + Attributes: + shape (torch.Size): Sharded tensor shape (which is equal to the + unsharded tensor shape if the tensor is optimizer state for a + non-FSDP parameter and is hence not sharded). + dtype (torch.dtype): Data type of the tensor. + """ + + shape: torch.Size + dtype: torch.dtype + + +class _OptimStateKey(NamedTuple): + """ + This represents an optimizer state key that may be used commonly across + ranks. It is based on the unflattened parameter names rather than parameter + IDs to make it independent of each rank's own optimizer construction. + """ + + unflat_param_names: Tuple[str, ...] + is_fsdp_managed: bool + + +def _unflatten_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the optimizer state, consisting of the "state" part and the + "param_groups" part. Unflattening the "state" part involves consolidating + the state on the target rank and remapping from flattened to unflattened + parameter IDs, and the "param_groups" part only involves remapping from + flattened to unflattened parameter IDs. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): Entry for the flat parameter in the + "state" part of the optimizer state dict. + to_save (bool): Whether to save the state on this rank. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter if on the target + rank or an empty :class:`list` otherwise. The final optimizer state + dict will need to map these entries using the proper unflattened + parameter IDs. + """ + assert ( + not shard_state or to_save + ), "If ``shard_state`` is True, ``to_save`` has to be True." + consolidated_state = _communicate_optim_state( + fsdp_param_info, + flat_param_state, + ) + if to_save: + unflat_param_state = _unflatten_communicated_optim_state( + fsdp_param_info, + consolidated_state, + shard_state, + ) + for optim_state in unflat_param_state: + # We can't use .items() below cuz we'd run into a concurrent modification error + if cpu_offload: + for key in list(optim_state.keys()): + state = optim_state[key] + if not isinstance(state, torch.Tensor): + continue + optim_state[key] = state.cpu() + return unflat_param_state + else: + return [] + + +def _is_zero_dim_tensor(x: Any) -> bool: + return torch.is_tensor(x) and x.dim() == 0 + + +def _communicate_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], +) -> _ConsolidatedOptimState: + """ + Communicates the optimizer state for a flat parameter across ranks. All + ranks will hold the entire non-sharded optimizer state on GPU. + + If ``N`` is the number of tensor optimizer states in the optimizer state + dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1`` + otherwise (where the plus 1 comes from all-gathering the padding per rank). + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): The entry in the "state" part of the + optimizer state dict corresponding to the flat parameter. + + Returns: + ConsolidatedOptimState: Consolidated optimizer state for the target + flat parameter. + """ + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + state = _ConsolidatedOptimState() + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for state_name, value in sorted_items(flat_param_state): + # Positive-dimension tensor state: communicate across ranks + if torch.is_tensor(value) and value.dim() > 0: + # If the parameter is not sharded, then neither is the + # positive-dimension tensor state, so no need to communicate it -- + # we take the target rank's value + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + tensor_state[state_name] = value + continue + assert ( + fsdp_state.compute_device is not None + ), "compute_device has not been initialized" + if value.device.type != fsdp_state.compute_device.type: + value = value.to(fsdp_state.compute_device) + # Assume that positive-dimension tensor optimizer state + # has the same shape as the sharded flat parameter + buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined] + tensor_buffer = value.new_zeros(*buffer_size) + dist.all_gather_into_tensor( + tensor_buffer, value, group=fsdp_state.process_group + ) + fsdp_state._device_handle.synchronize() + unpadded_numel = cast( + nn.Parameter, flat_param._unpadded_unsharded_size + ).numel() + tensor_state[state_name] = tensor_buffer[:unpadded_numel] + # Zero-dimension tensor state and non-tensor state: take this rank's + # value directly + else: + if _is_zero_dim_tensor(value): + zero_dim_tensor_state[state_name] = value.detach().clone() + else: + non_tensor_state[state_name] = value + return state + + +def _unflatten_communicated_optim_state( + fsdp_param_info: FSDPParamInfo, + state: _ConsolidatedOptimState, + shard_state: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the communicated optimizer state (given by ``tensor_state``, + ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat + parameter. This should only be called on the target rank. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + state (_ConsolidatedOptimState): Consolidated optimizer state. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter. The final + optimizer state dict will need to map these entries using the proper + unflattened parameter IDs. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + unflat_param_state: List[Dict[str, Any]] = [] + flat_param_views: Dict[str, Iterator] = {} + num_unflat_params = flat_param._num_params + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for _ in range(num_unflat_params): + unflat_state_param = {} + # Add positive-dimension tensor state: unflatten with views + for state_name, flat_tensor in sorted_items(tensor_state): + views_generated = state_name in flat_param_views + if not views_generated: + views = handle._get_unflat_views(flat_tensor) + flat_param_views[state_name] = views + else: + views = flat_param_views[state_name] + optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views) + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + optim_state = _ext_chunk_dtensor( + optim_state, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + optim_state = _ext_chunk_tensor( + optim_state, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + unflat_state_param[state_name] = optim_state + + # Add zero-dimension tensor state: take the target rank's value + for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state): + unflat_state_param[state_name] = zero_dim_tensor + # Add non-tensor state: take the target rank's value + for state_name, non_tensor in sorted_items(non_tensor_state): + unflat_state_param[state_name] = non_tensor + unflat_param_state.append(unflat_state_param) + return unflat_param_state + + +def _broadcast_processed_state( + fsdp_state: _FSDPState, + optim_state: Dict[str, Any], + group: Optional[dist.ProcessGroup], +) -> Dict[str, Any]: + objects: List[Any] = [None] + if fsdp_state.rank == 0: + objects[0] = tree_map_only( + torch.Tensor, + lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr] + optim_state, + ) + dist.broadcast_object_list(objects, src=0, group=group) + if fsdp_state.rank == 0: + return optim_state + else: + return objects[0] + + +def _broadcast_state( + fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup] +) -> Any: + if fsdp_state.rank == 0: + if not isinstance(state, torch.Tensor) or state.dim() == 0: + return state + tensor = state.to(fsdp_state.compute_device) + else: + if isinstance(state, torch.Tensor): + assert state.dim() == 0, ( + "For non-zero ranks, a tensor state should have zero dimension, " + "but got the state with shape {state.shape()}." + ) + return state + elif not isinstance(state, _PosDimTensorInfo): + return state + tensor = torch.zeros( + state.shape, dtype=state.dtype, device=fsdp_state.compute_device + ) + dist.broadcast(tensor, src=0, group=group) + return tensor + + +def _shard_orig_param_state( + fsdp_param_info: FSDPParamInfo, + fqn: str, + optim_state: Dict[str, Any], +) -> Dict[str, Any]: + """ + Shard the optimizer state for the original parameter with the name ``fqn``. + This API should only be used when ``use_orig_params`` is True. + """ + if not optim_state: + return {} + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + param_idx = fsdp_param_info.param_indices[fqn] + shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined] + optim_state = _gather_state_dict( + optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device + ) + if not shard_param_info.in_shard: + return {} + # Flatten and shard the state. + new_optim_state: Dict[str, Any] = {} + intra_param_start_idx = shard_param_info.intra_param_start_idx + intra_param_end_idx = shard_param_info.intra_param_end_idx + for state_name, value in optim_state.items(): + if ( + torch.is_tensor(value) + and value.dim() > 0 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator] + new_optim_state[state_name] = value + return new_optim_state + + +def _flatten_optim_state_dict( + optim_state_dict: Dict[str, Any], + model: nn.Module, + use_orig_params: bool = False, + optim: Optional[torch.optim.Optimizer] = None, + rank0_only: bool = False, + group: Optional[dist.ProcessGroup] = None, +) -> Dict[str, Any]: + """ + Flattens the full optimizer state dict, still keying by unflattened parameter + names. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP know how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- it is managed by other parallelism and FSDP does not + know ho to handle/aggregate them. + + Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to + flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require + all the states even if the corresponding parameters are empty. To this end, + ``optim`` will be used to to get the initial state of the empty parameters. + ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or + NamedOptimizer. + + Returns: + Dict[str, Any]: The flattened optimizer state dict. + """ + SimpleProfiler.reset() + + unflat_osd = optim_state_dict + if "state" not in unflat_osd and not rank0_only: + raise ValueError( + '`optim_state_dict` must have the keys "state"' + "to be a valid optimizer state dict" + ) + param_to_fqns = _get_param_to_fqns(model) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state + + # Broadcast unflat_osd without non-scalar tensor if rank0_only is True. + if rank0_only: + unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group) + + # Construct the "state" part + flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {} + unflat_osd_state = unflat_osd["state"] + all_state_keys = set(unflat_osd_state.keys()) + + for param, fqns in param_to_fqns.items(): + fqn = fqns[0] + if fqn not in unflat_osd_state: + continue + all_state_keys.difference_update(fqns) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name in unflat_osd_state[fqn].keys(): + unflat_osd_state[fqn][state_name] = _broadcast_state( + fsdp_state, unflat_osd_state[fqn][state_name], group=group + ) + fqn = fqns[0] + if fqn in fqn_to_fsdp_param_info: + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + if use_orig_params: + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + flat_state = _shard_orig_param_state( + fsdp_param_info, + fqn, + unflat_osd_state[fqn], + ) + else: + flat_state = _flatten_optim_state( + fsdp_param_info, + unflat_osd_state, + fqns, + ) + key = _OptimStateKey(tuple(fqns), True) + # Only include non-empty states since as expected by + # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer + # or NamedOptimizer. + if flat_state: + flat_osd_state[key] = flat_state + elif use_orig_params: + assert ( + len(fqns) == 1 + ), f"use_orig_params is True but there are multiple FQNs, {fqns}." + if optim is not None: # NamedOptimizer or KeyedOptimizer case. + state = optim.state.get(param, None) # type: ignore[call-overload] + if state is not None: + flat_osd_state[key] = copy.deepcopy(state) + else: + warnings.warn( + f"optim_state[{key}] is not on rank{fsdp_state.rank}." + ) + + else: + raise RuntimeError( + f"The state of {key} is empty. This should happen when " + "use_orig_params=True." + ) + else: # do not flatten non-FSDP parameters' states + assert len(fqns) == 1 + key = _OptimStateKey(tuple(fqns), False) + flat_osd_state[key] = copy.copy(unflat_osd_state[fqn]) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name, param_state in list(unflat_osd_state[fqn].items()): + if fsdp_state.rank > 0: + # Deference the tensor so that PyTorch can collect the memory. + del unflat_osd_state[fqn][state_name] + else: + # Move the tensor in the original osd back to CPU to make the + # original osd unaffected. + unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][ + state_name + ].cpu() + + # Handle user-defined state, states that are not associated with parameters. + for key in all_state_keys: + user_state = unflat_osd_state[key] + if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params: + user_state = _broadcast_state(fsdp_state, user_state, group=group) + flat_osd_state[key] = copy.copy(user_state) + + SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ") + # Construct the "param_groups" part -- copy as is since it will be + # rekeyed later according to the target rank's optimizer + # Only copy param_groups if it exists in unflat_osd + if "param_groups" in unflat_osd: + flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"]) + return {"state": flat_osd_state, "param_groups": flat_osd_param_groups} + else: + return {"state": flat_osd_state} + + +def _flatten_optim_state( + fsdp_param_info: FSDPParamInfo, + unflat_osd_state: Dict[str, Dict[str, Any]], + unflat_param_names: List[str], +) -> Dict[str, Any]: + """ + Flattens the optimizer state in ``full_optim_state_dict`` for a single + flat parameter in ``fsdp_param_info`` corresponding to the unflattened + parameter names in ``unflat_param_names``. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the + optimizer state dict corresponding to the unflattened parameters. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the flat parameter ``flat_param``. + + Returns: + Dict[str, Any]: A :class:`dict` mapping state names to their values for + a particular flat parameter. The sharded optimizer state dict's "state" + part will map a key to this returned value. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + num_unflat_params = len(unflat_param_names) + assert num_unflat_params > 0, ( + "Expects at least one unflattened parameter corresponding to the " + "flat parameter" + ) + unflat_param_shapes = flat_param._shapes + num_unflat_param_shapes = len(unflat_param_shapes) + assert ( + num_unflat_params == num_unflat_param_shapes + ), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}" + + # Check if these unflattened parameters have any optimizer state + has_state = [ + bool(unflat_param_name in unflat_osd_state) + for unflat_param_name in unflat_param_names + ] + # If none of the unflattened parameters comprising this flat parameter have + # any state, then we do not want an entry in the optimizer state dict + if not any(has_state): + return {} # no need to flatten any state + # There may still be some unflattened parameters with state and some + # without + unflat_param_states = [ + _gather_state_dict( + unflat_osd_state[unflat_param_name], + pg=fsdp_state.process_group, + device=fsdp_state.compute_device, + ) + if unflat_param_name in unflat_osd_state + else None + for unflat_param_name in unflat_param_names + ] + # Check that the unflattened parameters have the same state names + state_names = None + for unflat_param_state in unflat_param_states: + if unflat_param_state is None: + continue + if state_names is None: + state_names = set(unflat_param_state.keys()) + else: + if state_names != set(unflat_param_state.keys()): + raise ValueError( + "Differing optimizer state names for the unflattened " + f"parameters: {unflat_param_names}" + ) + assert state_names is not None + + # Flatten the state + flat_state: Dict[str, Any] = {} + for state_name in state_names: + state_values = [ + unflat_param_state[state_name] if unflat_param_state is not None else None + for unflat_param_state in unflat_param_states + ] + non_none_state_values = [v for v in state_values if v is not None] + # If all ranks have None, this is a None value + if not non_none_state_values: + flat_state[state_name] = None + continue + are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True + for v in non_none_state_values: + are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0 + are_zero_dim_tensors &= _is_zero_dim_tensor(v) + are_non_tensors &= not torch.is_tensor(v) + types = {type(v) for v in non_none_state_values} + if len(types) != 1 or not ( + are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors + ): + raise ValueError( + f"Differing optimizer state types for state {state_name}, " + f"values {non_none_state_values}, and unflattened parameter " + f"names {unflat_param_names}" + ) + if are_pos_dim_tensors: + flat_tensor = _flatten_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + unflat_param_shapes, + handle, + ) + # Shard the flattened tensor immediately to minimize max memory + # usage + if ( + fsdp_state.world_size != 1 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + sharded_flat_tensor, _ = FlatParamHandle._get_shard( + flat_tensor, + fsdp_state.rank, + fsdp_state.world_size, + ) + else: + sharded_flat_tensor = flat_tensor + flat_state[state_name] = sharded_flat_tensor + elif are_zero_dim_tensors: + flat_state[state_name] = _flatten_zero_dim_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + else: + assert are_non_tensors + flat_state[state_name] = _flatten_non_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + + return flat_state + + +def _flatten_tensor_optim_state( + state_name: str, + pos_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], + unflat_param_shapes: Sequence[torch.Size], + handle: FlatParamHandle, +) -> torch.Tensor: + """ + Flattens the positive-dimension tensor optimizer state given by the values + ``tensors`` for the state ``state_name`` for a single flat parameter + from ``handle`` corresponding to the unflattened parameter names + ``unflat_param_names`` and unflatted parameter shapes + ``unflat_param_shapes``. This flattens each unflattened parameter's tensor + state into one tensor. + + NOTE: We use zero tensors for any unflattened parameters without state + since some value is required to fill those entries. This assumes that the + zero tensor is mathematically equivalent to having no state, which is true + for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all + optimizers. + + Args: + state_name (str): Optimizer state name. + pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor + optimizer state values for the unflattened parameters corresponding + to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes + corresponding to the single flat parameter. + handle (FlatParamHandle): The flat parameter's handle. + + Returns: + torch.Tensor: A flat tensor containing the optimizer state + corresponding to ``state_name`` constructed by concatenating the + unflattened parameter tensor states in ``pos_dim_tensors`` (using zero + tensors for any unflattened parameters without the state). + """ + flat_param = handle.flat_param + non_none_tensors = [t for t in pos_dim_tensors if t is not None] + # Check that all are tensors with the same dtype + dtypes = {t.dtype for t in non_none_tensors} + if len(dtypes) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have positive-dimension tensor state with the " + f"same dtype but got dtypes {dtypes} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + dtype = next(iter(dtypes)) + # Check that each tensor state matches its parameter's shape + for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes): + if tensor is None and len(shape) == 0: + raise ValueError("Flattening a zero-dimension parameter is not supported") + elif tensor is not None and tensor.shape != shape: + raise ValueError( + "Tensor optimizer state does not have same shape as its " + f"parameter: {tensor.shape} {shape}" + ) + # Flatten the tensor states: we do not need to add any right-hand-side + # padding since the flat optimizer state tensor is sharded via + # `_get_shard()`, which pads the shard as needed (just like for the flat + # parameter) + cpu_device = torch.device("cpu") + tensors_to_flatten = [ + torch.flatten(state_value.to(cpu_device)) + if state_value is not None + else torch.flatten( + torch.zeros( + size=shape, + dtype=dtype, + device=cpu_device, + ) + ) + for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes) + ] + flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel) + flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined] + assert flat_tensor.shape == flat_param_shape, ( + f"tensor optim state: {flat_tensor.shape} " + f"flat parameter: {flat_param_shape}" + ) + return flat_tensor + + +def _flatten_zero_dim_tensor_optim_state( + state_name: str, + zero_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], +) -> torch.Tensor: + """ + Flattens the zero-dimension tensor optimizer state given by the values + ``zero_dim_tensors`` for the state ``state_name`` for a single flat + parameter corresponding to the unflattened parameter names + ``unflat_param_names`` by enforcing that all tensors are the same and using + that common value. + + NOTE: The requirement that the tensors are the same across all unflattened + parameters comprising the flat parameter is needed to maintain the + invariant that FSDP performs the same computation as its non-sharded + equivalent. This means that none of the unflattened parameters can be + missing this state since imposing a value may differ from having no value. + For example, for Adam's "step", no value means maximum bias correction, + while having some positive value means less bias correction. + + Args: + state_name (str): Optimizer state name. + zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state + for the unflattened parameters corresponding to the single + flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + torch.Tensor: A zero-dimensional tensor giving the value of the state + ``state_name`` for all unflattened parameters corresponding to the + names ``unflat_param_names``. + """ + non_none_tensors = [t for t in zero_dim_tensors if t is not None] + # Enforce that all have the same value and dtype + values_set = {t.item() if t is not None else None for t in zero_dim_tensors} + dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors} + if ( + len(non_none_tensors) != len(zero_dim_tensors) + or len(values_set) != 1 + or len(dtypes) != 1 + ): + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {values_set} and dtypes {dtypes} for state " + f"{state_name} and unflattened parameter names " + f"{unflat_param_names}" + ) + value = next(iter(values_set)) + dtype = next(iter(dtypes)) + return torch.tensor(value, dtype=dtype, device=torch.device("cpu")) + + +def _flatten_non_tensor_optim_state( + state_name: str, + non_tensors: List[Any], + unflat_param_names: List[str], +) -> Any: + """ + Flattens the non-tensor optimizer state given by the values ``non_tensors`` + for the state ``state_name`` for a single flat parameter corresponding + to the unflattened parameter names ``unflat_param_names`` by enforcing that + all values are the same and using that common value. + + See the note in :func:`_flatten_zero_dim_tensor_optim_state`. + + Args: + state_name (str): Optimizer state name. + non_tensors (List[Any]): Non-tensor optimizer state for the unflattened + parameters corresponding to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + Any: A non-tensor giving the value of the state ``state_name`` for all + unflattened parameters corresponding to the names + ``unflat_param_names``. + """ + non_none_non_tensors = [nt for nt in non_tensors if nt is not None] + # Enforce that all have the same value (same type already checked) + non_tensor_set = set(non_tensors) + if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {non_tensor_set} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + non_tensor = next(iter(non_tensor_set)) + return non_tensor + + +def _rekey_sharded_optim_state_dict( + sharded_osd: Dict[str, Any], + model: nn.Module, + optim: torch.optim.Optimizer, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + using_optim_input: bool, + is_named_optimizer: bool = False, +) -> Dict[str, Any]: + """ + Rekeys the optimizer state dict from unflattened parameter names to flat + parameter IDs according to the calling rank's ``optim``, which may be + different across ranks. In particular, the unflattened parameter names are + represented as :class:`_OptimStateKey` s. + """ + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast( + Dict[nn.Parameter, Union[int, str]], + ( + _get_param_to_param_id_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_to_param_key( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + # All parameter keys in `param_to_param_key` should be in + # `param_to_fqns` -- strict inequality follows when not all parameters are + # passed to the optimizer + assert len(param_to_param_key) <= len(param_to_fqns) + + unflat_param_names_to_flat_param_key: Dict[ + Tuple[str, ...], Union[int, str] + ] = {} # for "state" + unflat_param_name_to_flat_param_key: Dict[ + str, Union[int, str] + ] = {} # for "param_groups" + for param, unflat_param_names in param_to_fqns.items(): + if param not in param_to_param_key: + # This parameter was not passed to the optimizer + continue + flat_param_key = param_to_param_key[param] + unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key + for unflat_param_name in unflat_param_names: + unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key + + sharded_osd_state = sharded_osd["state"] + rekeyed_osd_state: Dict[Union[str, int], Any] = {} + for key, param_state in sharded_osd_state.items(): + if isinstance(key, str): + rekeyed_osd_state[key] = param_state + continue + flat_param_key = unflat_param_names_to_flat_param_key.get( + key.unflat_param_names, key.unflat_param_names + ) + rekeyed_osd_state[flat_param_key] = param_state + + # Only process param_groups if it exists in sharded_osd + if "param_groups" in sharded_osd: + rekeyed_osd_param_groups: List[Dict[str, Any]] = [] + for unflat_param_group in sharded_osd["param_groups"]: + flat_param_group = copy.deepcopy(unflat_param_group) + flat_param_keys = sorted( + { + unflat_param_name_to_flat_param_key[unflat_param_name] + for unflat_param_name in unflat_param_group["params"] + } + ) + flat_param_group["params"] = flat_param_keys + rekeyed_osd_param_groups.append(flat_param_group) + return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups} + else: + return {"state": rekeyed_osd_state} + + +def _get_param_id_to_param_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[int, nn.Parameter]: + """ + Constructs a mapping from parameter IDs to parameters. This may be used + both for models with ``FlatParameter`` s and without. + + NOTE: This method is only preserved for backward compatibility. The method + :meth:`_get_param_key_to_param` is the preferred code path that does not + rely on ``optim_input``. + + NOTE: We critically assume that, whether the optimizer input is a list of + parameters or a list of parameter groups, :class:`torch.optim.Optimizer` + enumerates the parameter IDs in order. In other words, for a parameter list + input, the parameter IDs should be in that list order, and for a parameter + groups input, the parameter IDs should be in order within each parameter + group and in order across parameter groups. + + Args: + model (nn.Module): Model whose parameters are passed into the + optimizer. + optim_input (Optional[Union[List[Dict[str, Any]], + Iterable[nn.Parameter]]]): Input passed into the optimizer + representing either a :class:`list` of parameter groups or an + iterable of parameters; if ``None``, then this method assumes the + input was ``model.parameters()``. (Default: ``None``) + + Returns: + List[nn.Parameter]: Mapping from parameter IDs to parameters, + where the parameter ID is implicitly the index in the :class:`list`. + """ + # Assume the standard case of passing `model.parameters()` to the optimizer + # if `optim_input` is not specified + if optim_input is None: + return dict(enumerate(model.parameters())) + try: + params = cast(List[nn.Parameter], list(optim_input)) + except TypeError as e: + raise TypeError( + "Optimizer input should be an iterable of Tensors or dicts, " + f"but got {optim_input}" + ) from e + if len(params) == 0: + raise ValueError("Optimizer input should not be empty") + + # Check if the optimizer input represents tensors or parameter groups + all_tensors = True + all_dicts = True + for param in params: + all_tensors &= isinstance(param, torch.Tensor) + all_dicts &= isinstance(param, dict) + if not all_tensors and not all_dicts: + raise TypeError("Optimizer input should be an iterable of Tensors or dicts") + if all_tensors: + return dict(enumerate(params)) + assert all_dicts + param_id_to_param: List[nn.Parameter] = [] + for param_group in params: + has_params_key = "params" in param_group # type: ignore[operator] + assert has_params_key, ( + 'A parameter group should map "params" to a list of the ' + "parameters in the group" + ) + # Implicitly map `flat_param_id` (current length of the list) to + # `param` + param_id_to_param.extend(param_group["params"]) # type: ignore[index] + return dict(enumerate(param_id_to_param)) + + +def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]: + """ + Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes + from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical" + because ``FlatParameter`` s do not come from the original module but are + registered only after FSDP has been applied. This function returns the FSDP-given + name for the ``FlatParameter`` (usually module._flat_param) as opposed to the + canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``). + + Consequently, this function will only return a non-empty mapping if FSDP was + applied with ``use_orig_params=False`` as, otherwise, the original parameters + are used within the module and there would be no ``FlatParameter`` s in the module. + + """ + + def module_fn(module, prefix, tree_level, flat_param_to_fqn): + for param_name, param in _named_parameters_with_duplicates( + module, recurse=False + ): + if not isinstance(param, FlatParameter): + continue + fqn = clean_tensor_name(prefix + param_name) + flat_param_to_fqn[param] = fqn + + def return_fn(flat_param_to_fqn): + return flat_param_to_fqn + + flat_param_to_fqn_ret: Dict[FlatParameter, str] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + flat_param_to_fqn_ret, + ) + + +def _get_param_key_to_param( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[Union[int, str], nn.Parameter]: + """ + Constructs a mapping from parameter keys to parameters. For the regular + optimizers, the keys are parameter IDs. For NamedOptimizer, the keys + are FQNs. This API may be used both for models with ``FlatParameter`` s and + without. + """ + clean_fqn_to_curr_fqn: Dict[str, str] = {} + if is_named_optimizer: + assert ( + param_to_fqns is not None and flat_param_to_fqn is not None + ), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None." + assert model is not None + for key, _ in _named_parameters_with_duplicates(model): + clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key + + param_key_to_param: Dict[Union[str, int], nn.Parameter] = {} + pid = 0 + for param_group in optim.param_groups: + if is_named_optimizer: + for param in param_group["params"]: + assert flat_param_to_fqn is not None + if param in flat_param_to_fqn: + # FlatParameter case + key = flat_param_to_fqn[param] + else: + assert param_to_fqns is not None + # use_orig_params case + assert len(param_to_fqns[param]) == 1 + key = param_to_fqns[param][0] + try: + key = clean_fqn_to_curr_fqn[key] + except KeyError as e: + raise KeyError( + f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}." + ) from e + param_key_to_param[key] = param + else: + for param in param_group["params"]: + param_key_to_param[pid] = param + pid += 1 + + return param_key_to_param + + +def _get_param_to_param_key( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[nn.Parameter, Union[int, str]]: + """ + Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API + only supports the case where `optim` is a regular optimizer, not NamedOptimizer. + So the parameter keys will be parameter ids. + """ + param_id_to_param = _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _get_param_to_param_id_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[nn.Parameter, int]: + """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`.""" + param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _check_missing_keys_on_rank( + r0_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]], + param_key_to_param: Dict[Union[str, int], nn.Parameter], + group: Optional[dist.ProcessGroup], +) -> None: + # Ensure that all ranks have at least the optimizer states needed by + # rank 0's optimizer + missing_keys: List[_OptimStateKey] = [] + for r0_optim_state_key in r0_optim_state_keys: + if r0_optim_state_key not in optim_state_key_to_param_key: + # A parameter from rank 0's optimizer does not exist for this + # rank's optimizer + missing_keys.append(r0_optim_state_key) + continue + param_key = optim_state_key_to_param_key[r0_optim_state_key] + if isinstance(param_key, int): + assert param_key >= 0 and param_key < len( + param_key_to_param + ), "Check the `param_key_to_param` construction" + # We cannot use FSDPState.compute_device as this API is a global view. + device = _get_pg_default_device(group) + num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device) + dist.all_reduce(num_missing, group=group) + if num_missing.item() > 0: + obj_list = [None for _ in range(dist.get_world_size(group))] + dist.all_gather_object(obj_list, missing_keys, group=group) + error_msg = ( + "FSDP currently requires each rank to have at least the " + "optimizer states needed by rank 0's optimizer but some ranks " + "are missing some of those states" + ) + for rank, keys in enumerate(obj_list): + keys = cast(List[_OptimStateKey], keys) + if len(keys) > 0: + error_msg += ( + f"\nRank {rank} is missing states for the parameters: " + f"{[key.unflat_param_names for key in keys]}" + ) + raise RuntimeError(error_msg) + + +def _map_param_key_to_optim_keys( + optim_state_dict: Dict[str, Any], + group: Optional[dist.ProcessGroup], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + merge_keys: bool = False, +) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]: + """ + Construct the local mapping between the ``_OptimStateKey`` and parameter keys + and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0 + must contain all the ``_OptimStateKey``, an exception will be raised otherwise. + Note that ``merge_keys`` should equal to ``use_orig_params``. + """ + rank = dist.get_rank(group) + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local + all_optim_state_keys: List[_OptimStateKey] = [] + + for param_key, param in param_key_to_param.items(): + # Do not include parameters without state to avoid empty mappings + # just like in normal `torch.optim.Optimizer.state_dict()` + if param_key not in optim_state_dict["state"]: + continue + fqns = param_to_fqns[param] + is_fsdp_managed = isinstance(param, FlatParameter) + if is_fsdp_managed: + assert fqns[0] in fqn_to_fsdp_param_info, ( + fqns[0], + list(fqn_to_fsdp_param_info.keys()), + ) + is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info + optim_state_key = _OptimStateKey( + unflat_param_names=tuple(fqns), + is_fsdp_managed=is_fsdp_managed, + ) + if rank == 0 or merge_keys: + all_optim_state_keys.append(optim_state_key) + optim_state_key_to_param_key[optim_state_key] = param_key + + if merge_keys: + all_keys: List[List[_OptimStateKey]] = [ + [] for _ in range(dist.get_world_size(group)) + ] + dist.all_gather_object(all_keys, all_optim_state_keys, group=group) + merge_all_optim_state_keys = [ + key for local_keys in all_keys for key in local_keys + ] + all_optim_state_keys = sorted(set(merge_all_optim_state_keys)) + else: + key_obj_list: List[Optional[List[_OptimStateKey]]] = ( + [all_optim_state_keys] if rank == 0 else [None] + ) + dist.broadcast_object_list(key_obj_list, src=0, group=group) + assert key_obj_list[0] is not None + all_optim_state_keys = key_obj_list[0] + _check_missing_keys_on_rank( + all_optim_state_keys, + optim_state_key_to_param_key, + param_key_to_param, + group, + ) + + return all_optim_state_keys, optim_state_key_to_param_key + + +def _unflatten_param_groups( + state_dict: Dict[str, Any], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], +) -> List[Dict[str, Any]]: + param_groups: List[Dict[str, Any]] = [] + for flat_param_group in state_dict["param_groups"]: + unflat_param_group = copy.deepcopy(flat_param_group) + param_group_params = [ + param_key_to_param[flat_param_key] + for flat_param_key in flat_param_group["params"] + ] + nested_unflat_param_names = [ + param_to_fqns[param] for param in param_group_params + ] + unflat_param_group["params"] = [ + unflat_param_name + for unflat_param_names in nested_unflat_param_names + for unflat_param_name in unflat_param_names + ] # flatten the list of lists + param_groups.append(unflat_param_group) + return param_groups + + +def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool: + """ + Returns whether the state_dict is from a NamedOptimizer. + This function checks that the keys in the state_dict['state'] are strings + (which usually are FQNs) versus integers (which usually refer to param_ids + from a vanilla torch.optim.Optimizer). + """ + state = optim_state_dict.get("state", None) + if not state: + # If we cannot find a state, assume it is not NamedOptimizer as + # NamedOptimizer has eager initialization. + return False + try: + key = next(iter(state.keys())) + except Exception as e: + raise Exception(optim_state_dict) from e + return isinstance(key, str) + + +@dataclass +class StateInfo: + # The key of these dictionaries are the state name, e.g., `exp_avg`. + tensors: Dict[str, _PosDimTensorInfo] + scalar_tensors: Dict[str, torch.Tensor] + non_tensors: Dict[str, Any] + + +def _allgather_state_info( + fsdp_state: _FSDPState, + input_states: Dict[str, Any], +) -> List[Dict[str, StateInfo]]: + """ + Given the ``input_states``, allgather StateInfo for each state. The function + uses all_gather_object to gather StateInfo so no GPU tensors are sent. + """ + + processed_state_dict: Dict[str, StateInfo] = {} + gathered_state_info: List[Dict[str, StateInfo]] = [ + {} for _ in range(fsdp_state.world_size) + ] + + for fqn, optim_state in input_states.items(): + # Allgather the scalar tensor state, non-tensor states and tensors metadata. + processed_state = StateInfo({}, {}, {}) + for state_name, value in sorted_items(optim_state): + if torch.is_tensor(value): + if value.dim() == 0: + # Ensure that `step` is on CPU. + processed_state.scalar_tensors[state_name] = value.cpu() + else: + processed_state.tensors[state_name] = _PosDimTensorInfo( + value.shape, value.dtype + ) + else: + processed_state.non_tensors[state_name] = value + processed_state_dict[fqn] = processed_state + dist.all_gather_object( + gathered_state_info, + processed_state_dict, + group=fsdp_state.process_group, + ) + return gathered_state_info + + +def _convert_all_state_info( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + output_states: Dict[str, Dict[str, Any]], +) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API converted + the StateInfo into the original state if the state is not a non-scalar + tensor. For a multi-dimensional tensor, the local state will be stored in + ``state_buffer`` in a correct order for later allgather purpose. + """ + + state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {} + + for fqn, gathered_state in output_states.items(): + state_info = [s[fqn] for s in gathered_state_info] + all_tensor_states = sorted( + {n for state in state_info for n in state.tensors.keys()} + ) + empty_ranks: Set[int] = set() + dtype: Optional[torch.dtype] = None + # First check all the non-scalar states and get the information of + # states on each rank. + for state_name in all_tensor_states: + numels = [] + _empty_ranks: Set[int] = set() + for rank, object_state in enumerate(state_info): + numels.append(0) + info = object_state.tensors.get(state_name, None) + if info is not None: + numels[-1] = info.shape.numel() + if not dtype: + dtype = info.dtype + else: + assert dtype == info.dtype + if numels[-1] == 0: + _empty_ranks.add(rank) + + assert not empty_ranks or empty_ranks == _empty_ranks + empty_ranks = _empty_ranks + if state_name not in state_buffers: + state_buffers[state_name] = [ + None for _ in fsdp_param_info.param_indices + ] + local_state = input_states[fqn].get(state_name, None) + # N.B. We need to move the state to compute_device. The reason is + # not yet clear and we need to figure out why the state may be on a + # different device. + if local_state is not None: + local_state = local_state.to(fsdp_param_info.state.compute_device) + state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state + + # Restoring the scalar and non-tensor states. If the corresponding + # non-scalar states do not exist on the rank, we also skip the scalar + # non-tensor states on that rank. + for rank, object_state in enumerate(state_info): + if rank in empty_ranks: + continue + for name, non_tensor_value in object_state.non_tensors.items(): + curr_non_tensor_value = gathered_state.get(name, None) + assert ( + curr_non_tensor_value is None + or curr_non_tensor_value == non_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {non_tensor_value}." + + f" Other ranks: {curr_non_tensor_value}" + ) + gathered_state[name] = non_tensor_value + + for name, scalar_tensor_value in object_state.scalar_tensors.items(): + curr_scalar_tensor_value = gathered_state.get(name, None) + assert curr_scalar_tensor_value is None or torch.equal( + scalar_tensor_value, curr_scalar_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {scalar_tensor_value}." + + f" Other ranks: {curr_scalar_tensor_value}" + ) + gathered_state[name] = scalar_tensor_value + + return dtype, state_buffers # type: ignore[possibly-undefined] + + +def _unflatten_orig_param_states( + fsdp_param_info: FSDPParamInfo, + output_states: Dict[str, Dict[str, Any]], + state_name: str, + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> None: + """ + Given a output state dict, ``output_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), and the values + are gathered states, unflatten the states to the original dimensions. + + This function performs the unflattening process in-place. + """ + if not to_save: + return + flat_param = fsdp_param_info.handle.flat_param + fsdp_state = fsdp_param_info.state + for fqn, gathered_state in output_states.items(): + value = gathered_state[state_name] + param_idx = fsdp_param_info.param_indices[fqn] + + # TODO: This solution is not general and only apply to PTD TP solution. + if isinstance(value, DTensor): + placement = value.placements[0] + # If gathered state is a DTensor and its TP placement is not Replicate(), we need to + # gather the tensor on its TP dimension before chunking them into DTensor again. + if placement != Replicate(): + placement_dim = placement.dim # type: ignore[attr-defined] + value_local = value.redistribute(placements=(Replicate(),)) + reshape_size = list(flat_param._shapes[param_idx]) + reshape_size[placement_dim] *= value.device_mesh.size(0) + reshape_size = torch.Size(reshape_size) + value = value.reshape(reshape_size) + # If gathered state is a replicate DTensor, we directly reshape it. + else: + value = value.reshape(flat_param._shapes[param_idx]) + else: + # If gathered state is a tensor, we directly reshape it into unflatten state. + value = value.reshape(flat_param._shapes[param_idx]) + + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + value = _ext_chunk_dtensor( + value, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + value = _ext_chunk_tensor( + value, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + elif not cpu_offload: + with SimpleProfiler.profile("clone"): + value = value.detach().clone() + + if cpu_offload: + with SimpleProfiler.profile(SimpleProfiler.Type.D2H): + value = value.cpu() + gathered_state[state_name] = value + + +def _allgather_orig_param_states( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Dict[str, Any]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API allgathers + all tensor states and restore non-tensor states from ``gathered_state_info``. + """ + fsdp_state = fsdp_param_info.state + if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL: + logger.warning( + "CUDA Memory Summary before calling to _allgather_orig_param_states %s", + torch.cuda.memory_summary(), + ) + + output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()} + + dtype, state_buffers = _convert_all_state_info( + fsdp_param_info, gathered_state_info, input_states, output_states + ) + + if len(state_buffers) == 0: + return output_states + + has_state_params: List[bool] = [ + True if fqn in output_states else False + for fqn, idx in fsdp_param_info.param_indices.items() + ] + + # Loop through the ``state_buffers`` and construct the flattened, concatenated, + # sharded states. The size of the constructed state will be the same size as + # flat_param (also sharded). + # Then we perform an allgather_into_tensor to get the full flat_param state. + # The full flat_param state is the result of concatenation of multiple states + # the order of of flat_param._fqns. + # The final step is to split the flat_param state into original param states + # and return the result. + flat_param = fsdp_param_info.handle.flat_param + empty_func = functools.partial( + torch.empty, dtype=dtype, device=fsdp_state.compute_device + ) + gathered_tensor = empty_func(flat_param._padded_unsharded_size) + # Synchronize can be slow but this will be easier for us to debug. + torch.cuda.synchronize() + for state_name, buffers in state_buffers.items(): + local_buffers: List[torch.Tensor] = [] + begin = fsdp_state.rank * flat_param._sharded_size.numel() + # End is inclusive. + end = begin + flat_param._sharded_size.numel() - 1 + # param_idx corresponds to the parameter index in the FlatParameter. + mem_offset, param_idx = 0, 0 + for numel, is_padding in zip( + flat_param._numels_with_padding, flat_param._is_padding_mask + ): + frozen_and_no_state = not is_padding and ( + not fsdp_param_info.param_requires_grad[param_idx] + and not has_state_params[param_idx] + ) + + if is_padding or frozen_and_no_state: + # This memory range is a padding or the param is frozen and does + # not require gradient. For the later case, we treat it as a + # padding and add empty values to the local_buffers. + + padding_begin, padding_end = mem_offset, mem_offset + numel - 1 + if padding_begin <= begin <= padding_end: + # The range is an align padding before the first parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + padding_end - begin + 1 + if end >= padding_end + else end - begin + 1 + ) + elif padding_begin <= end <= padding_end: + # The range is an align padding after the last parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + end - padding_begin + 1 + if begin <= padding_begin + else end - begin + 1 + ) + elif begin < padding_begin <= padding_end < end: + # The range is an align padding that is completely in the + # shard. + padding_len = numel + else: + padding_len = 0 + if padding_len: + local_buffers.append(empty_func(padding_len)) + + if not is_padding: + # This memory range is a parameter in FlatParameter. So there + # should be an corresponding state in the optimizer unless the + # parameter is frozen, which we treat it as a padding above. + + # We need to check if this rank owns the buffer. If this is None: + # 1.) the rank does not own any part of the original parameter. + # As a result, there is no corresponding optimizer state on + # the rank as well. + # 2.) the parameter is frozen AND no optimizer state for the + # parameter. If a parameter is frozen, there can still be + # optimizer state if the parameter is not frozen in the + # previous steps. + if buffers[param_idx] is not None: + local_buffers.append(cast(torch.Tensor, buffers[param_idx])) + param_idx += 1 + + mem_offset += numel + + shard_numel_padded = flat_param._sharded_size.numel() - ( + sum(t.numel() for t in local_buffers) + ) + + assert flat_param._shard_numel_padded == shard_numel_padded, ( + "Manually calculated _sharded_numel_padded is incorrect. " + f"_shard_numel_padded={flat_param._shard_numel_padded}, " + f"shard_numel_padded={shard_numel_padded}, " + f"_sharded_size.numel={flat_param._sharded_size.numel()}, " + f"_numels_with_padding={flat_param._numels_with_padding}, " + f"begin={begin}, end={end}," + ) + if shard_numel_padded > 0: + # Add right-handed padding. + local_buffers.append(empty_func(shard_numel_padded)) + local_shard = torch.cat(local_buffers) + assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), ( + "The size of local shard times the world size should equal to the " + "gathered tensor size. The inconsistency may be from a bug of " + "FlatParameter's metadata or the reconstruction logic in optimizer " + "state dict." + ) + torch.cuda.synchronize() + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER): + dist.all_gather_into_tensor( + gathered_tensor, local_shard, group=fsdp_state.process_group + ) + # Synchronize can be slow but this will be easier for us to debug. + torch.cuda.synchronize() + + unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()] + flat_param_handle = fsdp_param_info.handle + orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor) + assert len(orig_states) == len(fsdp_param_info.param_indices), ( + "The number of parameters from FlatParameter is not consistent to " + "the number of states used by optimizer state dict reconstruction " + "logic." + ) + for fqn, idx in fsdp_param_info.param_indices.items(): + if fsdp_param_info.param_requires_grad[idx] or fqn in output_states: + output_states[fqn][state_name] = orig_states[idx] + + _unflatten_orig_param_states( + fsdp_param_info, + output_states, + state_name, + shard_state, + to_save, + cpu_offload, + ) + + del gathered_tensor + return output_states + + +def _gather_all_orig_param_state( + fsdp_param_info: FSDPParamInfo, + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Any]: + """ + Given a optimizer state dict, ``input_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), gather all the + states and unflatten them to the original dimensions. Note that all the + params referred by the ``input_states`` must be managed by FSDP. + """ + fsdp_state = fsdp_param_info.state + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + return input_states if to_save else {} + + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ): + gathered_state_info = _allgather_state_info(fsdp_state, input_states) + output_states = _allgather_orig_param_states( + fsdp_param_info, + gathered_state_info, + input_states, + shard_state, + to_save, + cpu_offload, + ) + if to_save: + for key, idx in fsdp_param_info.param_indices.items(): + if key in output_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + + raise RuntimeError( + f"{key} is not in the output state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the output_states has the param keys " + f"{sorted(output_states.keys())}." + ) + return output_states + else: + return {} + + +def _convert_state_with_orig_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo + # usually corresponds to multiple parameters. We could not use FSDPParamInfo + # as the key because FSDPParamInfo is not hashable. As a result, we fall back + # to `id(FSDPParamInfo)`, which the type is an integer. + all_states: Dict[int, Dict[str, Any]] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + if param_key is None and not optim_state_key.is_fsdp_managed: + continue + + if optim_state_key.is_fsdp_managed: + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None) + if fsdp_param_info is None: + # This can happen if the not all FSDP instances have all the + # parameters. This can happen with FSDP + some MPMD style + # parallelism. + + # TODO: it is unclear if we need to do the same check with + # non-FSDP managed keys. + continue + state = {} if param_key is None else optim_state_dict[param_key] + if id(fsdp_param_info) not in all_states: + all_states[id(fsdp_param_info)] = {} + all_states[id(fsdp_param_info)][fqn] = state + + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + with SimpleProfiler.profile("none_fsdp_managed_copy"): + param_key = cast(Union[str, int], param_key) + fsdp_osd_state[unflat_param_name] = copy.copy( + optim_state_dict[param_key] + ) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + # Instead of gathering the state of each parameter individually, we perform + # the gathering all at once to speed up the process. + for _all_states in all_states.values(): + fqn = next(iter(_all_states.keys())) + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + assert len(fsdp_param_info.param_requires_grad) > 0, ( + "With use_orig_params, FSDPParamInfo should have requires_grad " + "information. However, the length is zero." + ) + for key, idx in fsdp_param_info.param_indices.items(): + if key in _all_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + raise RuntimeError( + f"{key} is not in the optimizer state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the optimizer has the param keys " + f"{sorted(_all_states.keys())}." + ) + fsdp_osd_state.update( + _gather_all_orig_param_state( + fsdp_param_info, + _all_states, + shard_state, + to_save, + cpu_offload, + ) + ) + + return fsdp_osd_state + + +def _convert_state_with_flat_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + assert param_key is not None, ( + "If use_orig_params is False, we must be able to find the " + f"corresponding param id. {optim_state_key} {param_key}" + ) + + if optim_state_key.is_fsdp_managed: + # If there are multiple unflat_param_names (not use_orig_params), + # they share the same FSDPParamInfo. So the first unflat_param_name + # is sufficient to fetch the FSDPParamInfo. + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + unflat_state = _unflatten_optim_state( + fsdp_param_info, + optim_state_dict[param_key], + to_save, + shard_state, + cpu_offload, + ) + if to_save: + assert len(unflat_state) == len(optim_state_key.unflat_param_names) + for unflat_param_name, unflat_param_state in zip( + optim_state_key.unflat_param_names, + unflat_state, + ): + fsdp_osd_state[unflat_param_name] = unflat_param_state + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key]) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + return fsdp_osd_state + + +@torch.no_grad() +def _optim_state_dict( + model: nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + rank0_only: bool, + shard_state: bool, + group: Optional[dist.ProcessGroup], + using_optim_input: bool, + use_orig_params: bool = False, + cpu_offload: bool = True, +) -> Dict[str, Any]: + """ + Consolidates the optimizer state and returns it as a :class:`dict` + following the convention of :meth:`torch.optim.Optimizer.state_dict`, + i.e. with keys ``"state"`` and ``"param_groups"``. + The flat parameters in ``FSDP`` modules contained in ``model`` are mapped + back to their unflattened parameters. + + Parameter keys are not well-defined. For a regular optimizer, the optimizer + state_dict contains a mapping from parameter IDs to parameter states. + Parameter IDs are the order of parameters in ``optim.param_groups()`` across + all the groups. This API also allows user to pass ``optim_input`` for the + mapping between parameters and parameter IDs. Using ``optim_input`` is being + deprecated. + + If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not + contain parameter IDs mapping but a mapping from parameter FQNs to parameter + states. This API finds the mapping from FQNs to parameters if the optimizer + is a ``NamedOptimizer``. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP knows how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- those are managed by other parallelisms and FSDP does not + know how to handle/aggregate them. + + Args: + model (nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + rank0_only (bool): If ``True``, saves the populated :class:`dict` + only on rank 0; if ``False``, saves it on all ranks. (Default: + ``True``) + shard_state (bool): If ``True``, shard and distribute all + non-zero-dimension states. + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model`` 's original unflattened parameters and including keys + "state" and "param_groups" following the convention of + :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``, + then nonzero ranks return an empty :class:`dict`. + """ + SimpleProfiler.reset() + cm = ExitStack() + cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL)) + _reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model)) + to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state + + with SimpleProfiler.profile("preprocessing"): + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + is_named_optimizer = _is_named_optimizer(optim_state_dict) + + param_key_to_param = cast( + Dict[Union[int, str], nn.Parameter], + ( + _get_param_id_to_param_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + + with SimpleProfiler.profile("preprocessing_with_comm"): + ( + all_optim_state_keys, + optim_state_key_to_param_key, + ) = _map_param_key_to_optim_keys( + optim_state_dict, + group, + param_key_to_param, + param_to_fqns, + fqn_to_fsdp_param_info, + merge_keys=use_orig_params, + ) + + with SimpleProfiler.profile("state_converting"): + convert_fn = ( + _convert_state_with_orig_params + if use_orig_params + else _convert_state_with_flat_params + ) + fsdp_osd_state = convert_fn( + all_optim_state_keys, + optim_state_key_to_param_key, + fqn_to_fsdp_param_info, + optim_state_dict["state"], + to_save, + shard_state, + cpu_offload, + ) + + # At this point, communication is complete and ranks can return early if nothing + # will be saved on that rank. + if not to_save: + return {} + + fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state} + + flat_param_fqns = set(flat_param_to_fqn.values()) + for key, value in optim_state_dict["state"].items(): + if key in fsdp_osd_state: + continue + if key in flat_param_fqns: + continue + if key in param_key_to_param: + continue + # This key is not recognized by FSDP. It may be a user-defined state + # or some parameters state that FSDP is unable to map from + # ``optim.param_groups``. + warnings.warn( + f"Found a optim state, {key}, that FSDP cannot process. FSDP " + "will directly copy everything to the returned state_dict. In " + "most cases, this is a user-defined state that is not " + "associated with any particular parameter. Another possible " + "case is this state is managed by TorchRec. Otherwise, there may " + " be a mismatched assumption of optim_state_dict of this mode." + ) + fsdp_osd_state[key] = value + + if "param_groups" in optim_state_dict: + fsdp_osd["param_groups"] = _unflatten_param_groups( + optim_state_dict, param_key_to_param, param_to_fqns + ) + + cm.close() + SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ") + + return fsdp_osd + + +def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]: + """ + Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo`` + if the param is managed by FSDP. Shared parameters, or original parameters that + are shared across multiple nn.Modules, are required to belong to one and only + one FSDP instance and thus correspond to one ``FlatParameter``. Within the one + ``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared + parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters. + """ + + def module_fn(module, prefix, tree_level, fqn_to_param_info): + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state is None: + return + _lazy_init(fsdp_state, module) + handle = _module_handle(fsdp_state, module) + if not handle: + return + flat_param = handle.flat_param + fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, []) + # NOTE: `idx` indexes into the data structures *without* padding + # elements + for idx, local_fqn in enumerate(flat_param._fqns): + fqn = clean_tensor_name(prefix + local_fqn) + if fqn in fqn_to_param_info: + assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn + fqn_to_param_info[fqn] = fsdp_param_info + fsdp_param_info.param_indices[fqn] = idx + if flat_param._params is not None: + fsdp_param_info.param_requires_grad.append( + flat_param._params[idx].requires_grad + ) + + def return_fn(fqn_to_param_info): + return fqn_to_param_info + + fqn_to_param_info: Dict[str, FSDPParamInfo] = {} + # FlatParameter._fqns stores the local fqn, starting from the root of the + # FSDP. Using _apply_to_modules() with model (may not be the FSDP root + # module) allows us to construct the global fqn. + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + fqn_to_param_info, + ) + + +@no_type_check +def _set_optim_use_dtensor( + fsdp_state: _FSDPState, + state_dict_settings: StateDictSettings, +) -> None: + # If device_mesh is passed in when initalizing FSDP, we automatically turn the + # _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type + # has to be set to SHARDED_STATE_DICT. + if getattr(fsdp_state, "_device_mesh", None): + state_dict_type = state_dict_settings.state_dict_type + if state_dict_type == StateDictType.LOCAL_STATE_DICT: + raise RuntimeError( + "Found state_dict_type LOCAL_STATE_DICT.", + "DeviceMesh is not compatible with LOCAL_STATE_DICT.", + "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.", + ) + else: + state_dict_settings.optim_state_dict_config._use_dtensor = True diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d910fd6be3d9101e9ca7199857f268545da3a783 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py @@ -0,0 +1,1630 @@ +import functools +import logging +from enum import auto, Enum +from typing import Any, Callable, Dict, List, no_type_check, Optional, Set, Tuple + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +from torch.autograd.graph import register_multi_grad_hook +from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS +from torch.distributed.fsdp._common_utils import ( + _assert_in_training_states, + _FSDPState, + _get_module_fsdp_state, + _is_composable, + _log_post_backward_hook, + _no_dispatch_record_stream, + clean_tensor_name, + TrainingState, +) +from torch.distributed.fsdp._flat_param import ( + FlatParameter, + FlatParamHandle, + HandleShardingStrategy, + HandleTrainingState, + RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES, +) +from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES +from torch.distributed.fsdp.api import BackwardPrefetch +from torch.distributed.utils import ( + _apply_to_tensors, + _cast_forward_inputs, + _p_assert, + _to_kwargs, +) +from torch.utils import _pytree as pytree + +log = logging.getLogger(__name__) + +# Do not include "process_group" to enable hybrid shard and MoE cases +HOMOGENEOUS_ATTR_NAMES = ( + "_use_orig_params", + "limit_all_gathers", + "_use_full_prec_in_eval", +) + + +class _PrefetchMode(Enum): + BACKWARD = auto() + FORWARD = auto() + + +def _get_fsdp_root_states_with_modules( + module: nn.Module, +) -> Tuple[List[_FSDPState], List[nn.Module]]: + """ + Returns a tuple containing: + 1. A list of the root ``_FSDPState`` instances in the module tree rooted at + ``module`` without any duplicates and following the ``module.modules()`` + traversal order (which is assumed to be depth-first). + 2. A corresponding list of the root modules owning the states in the first + list. + + This is similar to :func:`_get_fsdp_states_with_modules` except that we + must call :func:`_is_fsdp_root` to force a lazy initialization to determine + the FSDP root in case lazy initialization has not yet happened. + """ + fsdp_root_states: List[_FSDPState] = [] + fsdp_root_modules: List[nn.Module] = [] + visited_fsdp_states: Set[_FSDPState] = set() + # NOTE: This function assumes that `module.modules()` proceeds top-down. + for submodule in module.modules(): + optional_state = _get_module_fsdp_state(submodule) + if ( + optional_state is not None + and optional_state not in visited_fsdp_states + and _is_fsdp_root(optional_state, submodule) + ): + visited_fsdp_states.add(optional_state) + fsdp_root_states.append(optional_state) + fsdp_root_modules.append(submodule) + return fsdp_root_states, fsdp_root_modules + + +def _get_fsdp_root_states(module: nn.Module) -> List[_FSDPState]: + """See :func:`_get_fsdp_root_states_with_modules`.""" + fsdp_root_states, _ = _get_fsdp_root_states_with_modules(module) + return fsdp_root_states + + +def _is_fsdp_root(state: _FSDPState, module: nn.Module) -> bool: + """ + Returns if ``state`` corresponds to that of an FSDP root. + + For the wrapper code path, ``state`` and ``module`` should be the same. For + the non-wrapper code path, ``state`` should be ``module`` 's state. + """ + # Force a lazy initialization to determine the FSDP root + _lazy_init(state, module) + assert state._is_root is not None # mypy + return state._is_root + + +@no_type_check +def _lazy_init( + state: _FSDPState, + root_module: nn.Module, +) -> _FSDPState: + """ + Performs initialization lazily, typically right before the first forward + pass. The laziness is needed to ensure that the parameter device/dtype and + the FSDP hierarchy have finalized. This method's actual logic only runs on + the root FSDP instance, which performs initialization for all non-root FSDP + instances to avoid partial initialization. + + For the non-composable code path, ``state`` and ``root_module`` should be + the same, namely the FSDP instance itself. + """ + if state._is_root is not None: + return # no-op: already lazily initialized + if not state._device_handle.is_available(): + # Allow the FSDP constructor to run even without CUDA but check this + # once we start real execution + raise RuntimeError("FSDP does not support CPU only execution") + # The following logic is only run on the root FSDP instance since it will + # set `_is_root=False` for the non-root instances + state._is_root = True + _assert_in_training_states(state, [TrainingState.IDLE]) + _check_flat_params_on_expected_device(state, root_module) + state._all_fsdp_states = traversal_utils._get_fsdp_states(root_module) + _init_streams(state) + buffers, buffer_dtypes = _get_buffers_and_dtypes_for_computation(state, root_module) + _cast_buffers_to_dtype_and_device(buffers, buffer_dtypes, state.compute_device) + state._exec_order_data.init(state, root_module, state.process_group) + _share_state_and_init_handle_attrs(state, root_module) + return state + + +def _check_flat_params_on_expected_device(state: _FSDPState, module: nn.Module): + """ + Checks that all ``FlatParameter``s in ``module`` 's tree managed by + ``state`` are on the expected device for *lazy initialization*. + """ + cpu_device = torch.device("cpu") + for handle in traversal_utils._get_fsdp_handles(module): + if ( + not handle._offload_params + and handle.flat_param.device != state.compute_device + ): + raise RuntimeError( + "An FSDP-managed module unexpectedly has parameters on " + f"{handle.flat_param.device}. Make sure to move the module to " + f"{state.compute_device} before training." + ) + elif handle._offload_params and handle.flat_param.device != cpu_device: + raise RuntimeError( + "An FSDP-managed module with parameter CPU offloading enabled " + f"has parameters on {handle.flat_param.device}. Make sure to " + f"not move the module from CPU when offloading parameters." + ) + + +@no_type_check +def _share_state_and_init_handle_attrs( + root_state: _FSDPState, + root_module: nn.Module, +) -> None: + """ + Shares data structure state from the ``root_state`` to all FSDP states in + ``root_module`` 's module tree, and initializes handle attributes. These + are done together to require a single loop over the states. + """ + handle = root_state._handle + if handle: + handle.init_flat_param_attributes() + attr_name_to_values: Dict[str, Set[Any]] = {} + for attr_name in HOMOGENEOUS_ATTR_NAMES: + attr_name_to_values[attr_name] = set() + root_state._all_handles = root_state._exec_order_data.all_handles # share reference + # Update _has_optim_in_backward for each handle. + for handle in root_state._all_handles: + flat_param = handle.flat_param + if hasattr(flat_param, "_in_backward_optimizers"): + raise RuntimeError( + "FSDP optimizer in backward only supported with use_orig_params=True!" + ) + handle._has_optim_in_backward = flat_param._params is not None and any( + hasattr(param, "_in_backward_optimizers") for param in flat_param._params + ) + if handle._has_optim_in_backward: + torch._C._log_api_usage_once("fsdp.optimizer_in_backward") + for fsdp_state in root_state._all_fsdp_states: + for attr_name in HOMOGENEOUS_ATTR_NAMES: + _p_assert( + hasattr(fsdp_state, attr_name), + f"FSDP state missing attribute {attr_name}", + ) + attr_name_to_values[attr_name].add(getattr(fsdp_state, attr_name)) + if fsdp_state is root_state: + continue + # Relax the assert for non-root FSDP instances in case the nested + # initialized module is wrapped again in FSDP later (e.g. after + # training to run inference) + _p_assert( + fsdp_state._is_root is None or not fsdp_state._is_root, + "Non-root FSDP instance's `_is_root` should not have been " + "set yet or should have been set to `False`", + ) + fsdp_state._is_root = False + fsdp_state._unshard_stream = root_state._unshard_stream + fsdp_state._post_backward_stream = root_state._post_backward_stream + fsdp_state._pre_unshard_stream = root_state._pre_unshard_stream + fsdp_state._all_reduce_stream = root_state._all_reduce_stream + fsdp_state._default_stream = root_state._default_stream + fsdp_state._exec_order_data = root_state._exec_order_data + fsdp_state._free_event_queue = root_state._free_event_queue + if fsdp_state._fsdp_extension is not None: + fsdp_state._fsdp_extension.compute_stream = root_state._default_stream + handle = fsdp_state._handle + if handle: + handle.init_flat_param_attributes() + for attr_name, attr_values in attr_name_to_values.items(): + if len(attr_values) != 1: + raise ValueError( + f"Expects one homogeneous value for {attr_name} but got {attr_values}" + ) + + +@no_type_check +def _init_streams( + state: _FSDPState, +) -> None: + """ + Initializes CUDA streams for overlapping communication, computation, and + data transfers. The streams should be shared across FSDP instances. + """ + assert state._is_root + assert state._device_handle.is_available() + uses_hybrid_sharding = any( + fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES + for fsdp_state in state._all_fsdp_states + ) + # Prioritize all-gathers/reduce-scatters over async all-reduce for HSDP and + # preserve the default priority of 0 otherwise + high_priority = -1 if state.limit_all_gathers and uses_hybrid_sharding else 0 + # Default stream for computation + state._default_stream = state._device_handle.current_stream() + if state._fsdp_extension is not None: + # set the compute stream to the FSDP extension + state._fsdp_extension.compute_stream = state._default_stream + + # Stream for unshard logic, including allocating the all-gather destination + # tensors and the all-gathers themselves + state._unshard_stream = state._device_handle.Stream(priority=high_priority) + # Stream for overlapping gradient reduction with the backward pass gradient + # computation + state._post_backward_stream = state._device_handle.Stream(priority=high_priority) + # Stream for pre-unshard logic, namely allocations and writes for CPU + # offloading (H2D copy) and mixed precision (low precision cast) + state._pre_unshard_stream = state._device_handle.Stream(priority=high_priority) + # Stream to run HSDP's all-reduce as async (if using HSDP) + state._all_reduce_stream = ( + state._device_handle.Stream() if uses_hybrid_sharding else state._default_stream + ) + + +@no_type_check +def _unshard( + state: _FSDPState, + handle: FlatParamHandle, + unshard_stream: torch.Stream, + pre_unshard_stream: torch.Stream, +) -> None: + """ + Unshards the handles in ``handles``. If the handles are in + :meth:`summon_full_params` and are using mixed precision, then they are + forced to full precision. + + Postcondition: handle's ``FlatParameter`` 's data is the padded + unsharded flat parameter on the compute device. + """ + if not handle: + return + with state._device_handle.stream(pre_unshard_stream): + ran_pre_unshard = handle.pre_unshard() + if ran_pre_unshard: + unshard_stream.wait_stream(pre_unshard_stream) + if state.limit_all_gathers: + event = state._free_event_queue.dequeue_if_needed() + if event: + with torch.profiler.record_function( + "FullyShardedDataParallel.rate_limiter" + ): + event.synchronize() + with state._device_handle.stream(unshard_stream): + handle.unshard() + handle.post_unshard() + + +@no_type_check +def _reshard( + state: _FSDPState, + handle: FlatParamHandle, + free_unsharded_flat_param: bool, +): + """ + Reshards the handle. ``free_unsharded_flat_param`` indicates whether to + free the handle's padded unsharded flat parameter. + """ + handle.reshard(free_unsharded_flat_param) + if state.limit_all_gathers and free_unsharded_flat_param: + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + # We don't run a even queue for freeing under torch compile atm + # But maybe we need to? TODO(voz): Look into this + free_event = state._device_handle.Event() + free_event.record() + state._free_event_queue.enqueue(free_event) + handle.post_reshard() + # Flat parameter freed or not, we always have to "unshard" the parameter + # upon next access to get its shape correct. + handle._prefetched = False + + +def _unshard_grads( + handle: Optional[FlatParamHandle], +) -> None: + if handle: + handle.unshard_grad() + + +def _reshard_grads( + handle: Optional[FlatParamHandle], +) -> None: + if handle: + handle.reshard_grad() + + +@no_type_check +def _pre_forward( + state: _FSDPState, + handle: Optional[FlatParamHandle], + unshard_fn: Callable, + module: nn.Module, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], +) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + """ + Runs the pre-forward logic. This includes an opportunity to unshard + currently sharded parameters such as those for the current forward and + registering post-backward hooks for these current parameters. This function + also converts forward ``args`` and ``kwargs`` to the given precision. + + Args: + handles (List[FlatParamHandle]): Handles giving the parameters used in + the current forward. + unshard_fn (Optional[Callable]): A callable to unshard any currently + sharded parameters or ``None`` to not do any unsharding. + module (nn.Module): Module whose forward this method runs right before; + expected by the hook signature. + args (Tuple[Any, ...]): Module forward ``args``. + kwargs (Dict[str, Any]): Module forward ``kwargs``. + """ + with torch.profiler.record_function("FullyShardedDataParallel._pre_forward"): + # For `fully_shard` + `checkpoint`, skip pre-forward logic in the + # recomputed forward + if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE: + # For both checkpoint implementations, we do not need to re-cast + # inputs here since they will be checkpointed in the low precision + # either by AC or normally by autograd as long as the AC region is + # nested within FSDP + return args, kwargs + state.training_state = TrainingState.FORWARD_BACKWARD + state._exec_order_data.record_pre_forward(handle, module.training) + if handle: + handle._training_state = HandleTrainingState.FORWARD + if unshard_fn is not None: + unshard_fn(state, handle) + # Register post-backward hooks to reshard the parameters and reduce-scatter + # their gradients. They must be re-registered every forward pass in case + # the `grad_fn` is mutated. + _register_post_backward_hook(state, handle) + # We have to reallocate the _cpu_grad if optimizer overlap + # set the grad to None in the backward pass. + if handle and handle._offload_params and handle.flat_param._cpu_grad is None: + handle.flat_param._cpu_grad = torch.zeros_like( + handle.flat_param._local_shard, device=torch.device("cpu") + ).pin_memory() + + should_cast_forward_inputs = ( + state._handle and not state._handle._force_full_precision + ) + + if should_cast_forward_inputs and state.mixed_precision.cast_forward_inputs: + # Recursively convert args and kwargs to specified precision. + input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype + args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs) + _register_post_backward_reshard_only_hook(state, handle, args, kwargs) + return args, kwargs + + +@no_type_check +def _pre_forward_unshard( + state: _FSDPState, + handle: Optional[FlatParamHandle], +) -> None: + """Unshards parameters in the pre-forward.""" + if not handle: + return + # If the handles have been prefetched, then there is no need to call + # `_unshard()` again + if not handle._prefetched: + _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream) + handle._needs_pre_forward_unshard = False + # Don't wait during trace + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + state._device_handle.current_stream().wait_stream(state._unshard_stream) + with torch.profiler.record_function( + "FullyShardedDataParallel._pre_forward_prefetch" + ): + _prefetch_handle(state, handle, _PrefetchMode.FORWARD) + + +@no_type_check +def _post_forward( + state: _FSDPState, + handle: Optional[FlatParamHandle], + reshard_fn: Callable, + module: nn.Module, + input: Any, + output: Any, +) -> Any: + """ + Runs the post-forward logic. This includes an opportunity to reshard + currently unsharded parameters such as those used in the current forward + and registering pre-backward hooks on the forward outputs. + + Args: + handles (List[FlatParamHandle]): Handles giving the parameters used in + the current forward. + reshard_fn (Optional[Callable]): A callable to reshard any currently + unsharded parameters (e.g. from the current forward) or ``None`` to + not do any resharding. + module (nn.Module): Module whose forward just ran, which should be a + fully sharded module (see [Note: Fully Sharded Module]); expected + by the hook signature. + input (Any): Unused; expected by the hook signature. + output (Any): Forward pass output; pre-backward hooks are registered on + the tensors that require gradients in this output. + + Postcondition: Each ``FlatParameter`` 's data points to the sharded flat + parameter. + """ + with torch.profiler.record_function("FullyShardedDataParallel._post_forward"): + # For `fully_shard` + `checkpoint`, skip post-forward logic in the + # recomputed forward + if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE: + return output + + state._exec_order_data.record_post_forward(handle) + if reshard_fn is not None: + reshard_fn(state, handle) + # Register pre-backward hooks to unshard the flat parameters for the + # gradient computation (if needed) + output = _register_pre_backward_hooks(state, module, output, handle) + state.training_state = TrainingState.IDLE + if handle: + handle._training_state = HandleTrainingState.IDLE + return output + + +@no_type_check +def _post_forward_reshard( + state: _FSDPState, + handle: FlatParamHandle, +) -> None: + """Reshards parameters in the post-forward.""" + if not handle: + return + # Do not free the root's parameters in the post-forward for `FULL_SHARD` + # with the intention that they are immediately used for backward + # computation (though this may not be true) + free_unsharded_flat_param = ( + not state._is_root + and handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES + ) + _reshard(state, handle, free_unsharded_flat_param) + + +@no_type_check +def _root_pre_forward( + state: _FSDPState, + module: nn.Module, + args, + kwargs, +) -> None: + """ + Runs pre-forward logic specific to the root FSDP instance, which should run + before any individual module's pre-forward. This starts with an attempt at + lazy initialization (which only runs non-vacuously once). Otherwise, if + this is called on a non-root FSDP instance, then it returns directly. + + Args: + module (nn.Module): Module for which this logic tries to run. It may or + may not be the root. If not, then this method does not do anything. + """ + with torch.profiler.record_function("FullyShardedDataParallel._root_pre_forward"): + _lazy_init(state, module) + _p_assert(state._is_root is not None, "Expects a root FSDP to have been set") + if not state._is_root: + # Always cast forward inputs in the root of this local FSDP unit for mixed + # precision, as this is where mixed precision could be configed. + # This is more useful for auto wrapping that is recommended in composable path. + # For manual wrapping, cast forward inputs on each local FSDP unit root will + # increase some overhead, so not turned on for model wrapper path right now where + # manual wrapping is more broadly used. + if _is_composable(state): + return _root_cast_forward_input(state, module, args, kwargs) + return args, kwargs + + # We cast buffers back to full precision if we're forcing full precision. Disjointly, we check if buffers + # are in full precision and if we should cast them back to lower precision, which happens when + # exiting eval() mode. + handle = state._handle + if handle: + should_cast_buffers_to_full_prec = handle._force_full_precision + else: + should_cast_buffers_to_full_prec = True + + if should_cast_buffers_to_full_prec: + _cast_buffers_to_dtype_and_device( + buffers=dict(module.named_buffers()).values(), + buffer_dtypes=list(state._buffer_name_to_orig_dtype.values()), + device=state.compute_device, + ) + # This flag is only set when we cast buffers to full precision, to avoid the + # CPU overhead that can stem from retrieving all buffers and their types in the + # following else branch. + state._needs_buffer_dtype_restore_check = True + elif getattr(state, "_needs_buffer_dtype_restore_check", False): + # Check if buffers are in full precision and we need to cast them + # back down. + ( + buffers, + buffer_dtypes_for_computation, + ) = _get_buffers_and_dtypes_for_computation(state, module) + if len(buffers) > 0 and len(buffer_dtypes_for_computation) > 0: + if any( + buffer.dtype != buffer_dtype_for_computation + for buffer, buffer_dtype_for_computation in zip( + buffers, buffer_dtypes_for_computation + ) + ): + # Assume we have to cast everything if there is one mismatch + _cast_buffers_to_dtype_and_device( + buffers, buffer_dtypes_for_computation, state.compute_device + ) + # We don't have to check this again until we cast buffers to full precision again. + state._needs_buffer_dtype_restore_check = False + + if state.forward_prefetch: + handles = [] + for fsdp_state in state._all_fsdp_states: + if fsdp_state._handle: + handles.append(fsdp_state._handle) + for handle in handles: + handle._needs_pre_forward_unshard = True + handle._prefetched = False + _wait_for_computation_stream( + state._device_handle.current_stream(), + state._unshard_stream, + state._pre_unshard_stream, + ) + _reset_flat_param_grad_info_if_needed(state._all_handles) + + # Prepares the forward inputs by moving them to ``compute_device`` + # TODO: Do not use the side stream for tensor copies for now; investigate + # the perf with/without it. + with torch.profiler.record_function("FullyShardedDataParallel._to_kwargs"): + args_tuple, kwargs_tuple = _to_kwargs( + args, kwargs, state.compute_device, False + ) + args = args_tuple[0] + kwargs = kwargs_tuple[0] + + return _root_cast_forward_input(state, module, args, kwargs) + + +@no_type_check +def _root_cast_forward_input( + state: _FSDPState, module: torch.nn.Module, args, kwargs +) -> Tuple[Any, Any]: + if state._handle: + force_full_precision = not state._handle._force_full_precision + else: + force_full_precision = True + + should_cast_forward_inputs = ( + (module.training or not state._use_full_prec_in_eval) and force_full_precision + ) and state.mixed_precision.cast_root_forward_inputs + + if should_cast_forward_inputs: + input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype + args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs) + + return args, kwargs + + +@no_type_check +def _pre_backward_hook( + state: _FSDPState, + module: nn.Module, + handle: FlatParamHandle, + grad, + *unused: Any, +) -> Any: + """ + Prepares ``_handle`` 's ``FlatParameter`` s for gradient computation. + + Args: + module (nn.Module): Fully sharded module (see [Note: Fully Sharded + Module]). + """ + # Only run the pre-backward hook once per group of handles involved in the + # same module forward computation + if ( + handle + and hasattr(handle, "_ran_pre_backward_hook") + and handle._ran_pre_backward_hook + ): + log.debug("%s %s", id(state), "Not Running pre backward! Already Ran!") + return grad + + with torch.profiler.record_function("FullyShardedDataParallel._pre_backward_hook"): + # Queue the post-backward callback once for the root FSDP instance to + # attach it to the outermost backward graph task so that it is called + # after all backward calls complete + if state._is_root and not state._post_backward_callback_queued: + _register_post_backward_final_callback(state, module) + _reset_flat_param_grad_info_if_needed(state._all_handles) + elif handle: + allowed_states = [TrainingState.IDLE] + if _is_composable(state): + allowed_states.append(TrainingState.FORWARD_BACKWARD) + _assert_in_training_states(state, allowed_states) + state.training_state = TrainingState.FORWARD_BACKWARD + # Queueing the post-backward callback is the only logic that is not + # per-handle in the pre-backward hook, so we can return early here if + # there are no handles. + if not handle: + return grad + handle._training_state = HandleTrainingState.BACKWARD_PRE + + if handle._needs_pre_backward_unshard: + # If the handles have been prefetched, then there is no need to + # call `_unshard()` again + if not handle._prefetched: + _unshard( + state, + handle, + state._unshard_stream, + state._pre_unshard_stream, + ) + # Don't wait during trace + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + state._device_handle.current_stream().wait_stream(state._unshard_stream) + + # Set this to `False` to ensure that a mistargeted prefetch does not + # actually unshard these handles + handle._needs_pre_backward_unshard = False + with torch.profiler.record_function( + "FullyShardedDataParallel._pre_backward_prefetch" + ): + _prefetch_handle(state, handle, _PrefetchMode.BACKWARD) + handle.prepare_gradient_for_backward() + handle._ran_pre_backward_hook = True + return grad + + +@no_type_check +@torch.no_grad() +def _post_backward_hook( + state: _FSDPState, + handle: FlatParamHandle, + flat_param, + *unused: Any, +): + """ + Reduce-scatters the gradient of ``handle`` 's ``FlatParameter``. + + Precondition: The ``FlatParameter`` 's ``.grad`` attribute contains the + unsharded gradient for the local batch. + + Postcondition: + - If using ``NO_SHARD``, then the ``.grad`` attribute is the reduced + unsharded gradient. + - Otherwise, the ``_saved_grad_shard`` attribute is the reduced sharded + gradient (accumulating with any existing gradient). + """ + _log_post_backward_hook(state, handle, log) + flat_param = handle.flat_param + flat_param._post_backward_called = True + with torch.autograd.profiler.record_function( + "FullyShardedDataParallel._post_backward_hook" + ): + _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD]) + # For multiple applications of reentrant AC across submodules sharing + # the same `FlatParameter`, the post-backward hook may run multiple + # times in one backward, in which case we permit the state to already + # be in `BACKWARD_POST`. + _p_assert( + handle._training_state + in (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.BACKWARD_POST), + f"Expects `BACKWARD_PRE` or `BACKWARD_POST` state but got {handle._training_state}", + ) + handle._training_state = HandleTrainingState.BACKWARD_POST + + if flat_param.grad is None: + return + if flat_param.grad.requires_grad: + raise RuntimeError("FSDP does not support gradients of gradients") + + _post_backward_reshard(state, handle) + if not state._sync_gradients: + if handle._use_orig_params: + handle._use_unsharded_grad_views() + return + + # Wait for all ops in the current stream (e.g. gradient computation) to + # finish before reduce-scattering the gradient + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + state._post_backward_stream.wait_stream( + state._device_handle.current_stream() + ) + + with state._device_handle.stream(state._post_backward_stream): + autograd_computed_grad = flat_param.grad.data + if ( + not _low_precision_hook_enabled(state) + and flat_param.grad.dtype != handle._reduce_dtype + # If we are forcing full precision but communicating grads + # (i.e. model.eval() + full precision in eval was configured), don't downcast gradient. + and not handle._force_full_precision + ): + flat_param.grad.data = flat_param.grad.to(handle._reduce_dtype) + if handle.uses_sharded_strategy: + _reduce_grad(state, handle) + else: + _reduce_grad_no_shard(state, handle) + # Since the unsharded gradient is produced in the computation + # stream and consumed in the post-backward stream, inform the + # caching allocator (before it goes out of scope) + _no_dispatch_record_stream( + autograd_computed_grad, state._post_backward_stream + ) + + +def _post_backward_reshard_only_hook( + state: _FSDPState, + handle: FlatParamHandle, + *unused: Any, +) -> None: + with torch.profiler.record_function( + "FullyShardedDataParallel._post_backward_hook_reshard_only" + ): + # `_pre_backward_hook` may not get executed + # if forward output does not require grad + # overwrite IDLE state for post-backward prefetching + state.training_state = TrainingState.FORWARD_BACKWARD + handle._training_state = HandleTrainingState.BACKWARD_POST + _post_backward_reshard(state, handle) + + +def _post_backward_reshard( + state: _FSDPState, + handle: FlatParamHandle, + *unused: Any, +) -> None: + free_unsharded_flat_param = _should_free_in_backward(state, handle) + _reshard(state, handle, free_unsharded_flat_param) + + # TODO: Post-backward prefetching does not support the multiple handles + # per module case since the post-backward hook runs per handle, not per + # group of handles. + with torch.profiler.record_function( + "FullyShardedDataParallel._post_backward_prefetch" + ): + _prefetch_handle(state, handle, _PrefetchMode.BACKWARD) + + +@no_type_check +def _should_free_in_backward( + state: _FSDPState, + handle: FlatParamHandle, +) -> bool: + """ + Returns whether FSDP should free the unsharded flat parameter in the + post-backward or not. + """ + if not handle.uses_sharded_strategy: + return False + # If not syncing gradients, then we do not free for strategies that do not + # reshard after forward as a *heuristic* to tradeoff higher memory for + # higher throughput. + return ( + state._sync_gradients + or handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES + ) + + +@no_type_check +def _reduce_grad(state: _FSDPState, handle: FlatParamHandle) -> None: + """ + For sharded strategies, this runs gradient reduction, sharded gradient + accumulation if needed, and the post-reduction callback. + """ + flat_param = handle.flat_param + uses_hybrid_sharded_strategy = handle._sharding_strategy in ( + HandleShardingStrategy.HYBRID_SHARD, + HandleShardingStrategy._HYBRID_SHARD_ZERO2, + ) + # We clear `.grad` to permit multiple backwards. This avoids a race where + # the second backward pass computation precedes ahead of the first backward + # pass reduction, which is possible since the reduction is issued in a + # separate stream and is async and would result in reducing the wrong + # gradient. + unsharded_grad = flat_param.grad.data + flat_param.grad = None + padded_unsharded_grad, new_sharded_grad = _get_reduce_scatter_tensors( + state, unsharded_grad + ) + if state._comm_hook is None: # default path + _div_if_needed(padded_unsharded_grad, state._gradient_predivide_factor) + pg = ( + handle._fake_process_group + if handle._use_fake_reduce + else state.process_group + ) + dist.reduce_scatter_tensor( + new_sharded_grad, + padded_unsharded_grad, + group=pg, + ) + if uses_hybrid_sharded_strategy: + # Don't wait during trace + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + state._all_reduce_stream.wait_stream(state._post_backward_stream) + with state._device_handle.stream(state._all_reduce_stream): + # Since the new sharded gradient is produced in the post- + # backward stream and consumed in the all-reduce stream, + # inform the caching allocator + _no_dispatch_record_stream(new_sharded_grad, state._all_reduce_stream) + dist.all_reduce(new_sharded_grad, group=state._inter_node_pg) + _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor) + grad_to_offload = _accumulate_sharded_grad( + state, handle, new_sharded_grad + ) + _post_reduce_grad_callback(state, handle, grad_to_offload) + return + _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor) + else: + state._comm_hook( + state._comm_hook_state, padded_unsharded_grad, new_sharded_grad + ) + # NOTE: HSDP variants do not support communication hook. + grad_to_offload = _accumulate_sharded_grad(state, handle, new_sharded_grad) + _post_reduce_grad_callback(state, handle, grad_to_offload) + + +@no_type_check +def _get_reduce_scatter_tensors( + state: _FSDPState, unsharded_grad: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Returns the input and output tensors to reduce-scatter, respectively. + """ + chunks = list(unsharded_grad.chunk(state.world_size)) + numel_to_pad = state.world_size * chunks[0].numel() - unsharded_grad.numel() + padded_unsharded_grad = ( + F.pad(unsharded_grad, [0, numel_to_pad]) if numel_to_pad > 0 else unsharded_grad + ) + new_sharded_grad = torch.empty_like(chunks[0]) # padded + return padded_unsharded_grad, new_sharded_grad + + +@no_type_check +def _accumulate_sharded_grad( + state: _FSDPState, + handle: FlatParamHandle, + sharded_grad: torch.Tensor, +) -> torch.Tensor: + """ + Accumulates the reduce-scattered sharded gradient with any existing sharded + gradient if needed, returning the gradient to offload (if CPU offloading is + enabled). + """ + flat_param = handle.flat_param + _cast_grad_to_param_dtype(state, sharded_grad, flat_param) + # Save the sharded gradient in `_saved_grad_shard` to support gradient + # accumulation -- for multiple backwards, the gradient reductions may + # happen in arbitrary order + accumulate_grad = hasattr(flat_param, "_saved_grad_shard") + if accumulate_grad: + _check_grad_to_accumulate(sharded_grad, flat_param._saved_grad_shard) + flat_param._saved_grad_shard += sharded_grad + else: + flat_param._saved_grad_shard = sharded_grad + grad_to_offload = flat_param._saved_grad_shard + return grad_to_offload + + +@no_type_check +def _reduce_grad_no_shard(state: _FSDPState, handle: FlatParamHandle) -> None: + """ + For no-shard, this runs gradient reduction (which directly covers any + gradient accumulation implicitly) and the post-reduction callback. + """ + flat_param = handle.flat_param + if state._comm_hook is None: # default path + _div_if_needed(flat_param.grad, state._gradient_predivide_factor) + dist.all_reduce(flat_param.grad, group=state.process_group) + _div_if_needed(flat_param.grad, state._gradient_postdivide_factor) + else: + state._comm_hook(state._comm_hook_state, flat_param.grad) + # For `NO_SHARD`, we can keep the low precision gradients by simply + # omitting the cast altogether + if not handle._keep_low_precision_grads: + _cast_grad_to_param_dtype(state, flat_param.grad, flat_param) + grad_to_offload = flat_param.grad.data + _post_reduce_grad_callback(state, handle, grad_to_offload) + + +@no_type_check +def _post_reduce_grad_callback( + state: _FSDPState, + handle: FlatParamHandle, + # Additional arguments needed for the callback logic + grad_to_offload: torch.Tensor, +): + """ + This callback captures any logic to run after the gradient reduction + finishes. Currently, this offloads the gradient to CPU if CPU offloading is + enabled and uses sharded gradient views if ``use_orig_params=True``. + """ + _offload_grad(state, handle, grad_to_offload) + _post_backward_use_sharded_grad_views(handle) + + +@no_type_check +def _offload_grad( + state: _FSDPState, + handle: FlatParamHandle, + grad_to_offload: torch.Tensor, +): + if not handle._offload_params: + return + # Offload the gradient to CPU to ensure parameters and gradients are on the + # same device as required by the optimizer + # TODO: Investigate why `NO_SHARD` breaks correctness when using + # `non_blocking=True` here. + # TODO (rohan-varma): When CPU offload and optimizer overlap, + # non_blocking=True won't work since the copy may have not finished before + # the optimizer step executes on CPU. If we want to use non-blocking=True + # here, we'll have to synchronize before using result on CPU. + non_blocking = handle.uses_sharded_strategy and not handle._has_optim_in_backward + handle.flat_param._cpu_grad.copy_( + grad_to_offload.detach(), non_blocking=non_blocking + ) # synchronized in the post-backward callback + # Since the gradient being offloaded may have been produced in the + # computation stream and is being consumed here in the post-backward + # stream, inform the caching allocator + _no_dispatch_record_stream(grad_to_offload.data, state._post_backward_stream) + + +@no_type_check +def _post_backward_use_sharded_grad_views(handle: FlatParamHandle): + if not handle._use_orig_params: + return + # Since the handle's `FlatParameter` completed its gradient computation, we + # should reset the gradient noneness mask + handle._reset_is_grad_none() + # Delay using sharded gradient views until after the reduce-scatter instead + # of immediately after resharding + handle._use_sharded_grad_views() + if handle._has_optim_in_backward: + handle.prepare_gradient_for_optim() + for orig_param in handle.flat_param._params: + # Check for `None` gradient to filter parameters not in the rank + if orig_param.grad is not None and hasattr( + orig_param, "_in_backward_optimizers" + ): + # TODO (rohan-varma): For CPU offload, this unfortunately + # operates on CPU because the parameters and gradients have + # already been offloaded. We should run this on GPU after + # refactoring. + for optim in orig_param._in_backward_optimizers: + optim.step() + + optim.zero_grad(set_to_none=True) + handle._reset_flat_param_grad_info_if_needed() + if handle._offload_params: + handle.flat_param._cpu_grad = None + + +def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None: + if div_factor > 1: + tensor.div_(div_factor) + + +@no_type_check +def _cast_grad_to_param_dtype( + state: _FSDPState, + sharded_grad: torch.Tensor, + param: FlatParameter, +): + """ + Casts ``sharded_grad`` back to the full parameter dtype so that the + optimizer step runs with that dtype. This performs an actual cast if + 1. parameters were in reduced precision during the forward since then + gradients would be in that reduced precision, or + 2. parameters were not in reduced precision but gradients were in + reduced precision for communication. + However, if a low precision communication hook is registered, then this + dtype cast happens in the hook instead. + """ + _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD]) + if not _low_precision_hook_enabled(state) and sharded_grad.dtype != param.dtype: + low_prec_grad_data = sharded_grad.data + sharded_grad.data = sharded_grad.data.to(dtype=param.dtype) + # Since for `NO_SHARD`, the gradient is produced in the computation + # stream and consumed here in the post-backward stream, inform the + # caching allocator; for the sharded strategies, the gradient is + # produced in the post-backward stream, so this `record_stream()` + # should be a no-op + _no_dispatch_record_stream( + low_prec_grad_data, state._device_handle.current_stream() + ) + + +def _check_grad_to_accumulate( + new_sharded_grad: torch.Tensor, + accumulated_grad: torch.Tensor, +) -> None: + _p_assert( + accumulated_grad.shape == new_sharded_grad.shape, + "Shape mismatch when accumulating gradients: " + f"existing gradient shape={accumulated_grad.shape} " + f"new gradient shape={new_sharded_grad.shape}", + ) + _p_assert( + accumulated_grad.device == new_sharded_grad.device, + "Device mismatch when accumulating gradients: " + f"existing gradient device={accumulated_grad.device} " + f"new gradient device={new_sharded_grad.device}", + ) + + +@no_type_check +def _low_precision_hook_enabled(state: _FSDPState) -> bool: + return state._comm_hook in LOW_PRECISION_HOOKS + + +@no_type_check +@torch.no_grad() +def _post_backward_final_callback( + state: _FSDPState, + module: nn.Module, +): + """ + This waits for the post-backward to finish and performs some final cleanup. + This runs at the end of the entire backward pass and should only be called + on the root FSDP instance. + """ + _p_assert( + state._is_root, + "The post-backward callback should only be called on the root FSDP instance", + ) + root_state = state + + if root_state._sync_gradients: + current_stream = state._device_handle.current_stream() + # TODO (rohan-varma): this also waits for the overlapped optimizer step to finish + # since it currently runs in the post-backward stream. That can be + # pushed to the next forward if run in a different stream + current_stream.wait_stream(root_state._post_backward_stream) + if root_state._all_reduce_stream is not current_stream: # uses HSDP + current_stream.wait_stream(root_state._all_reduce_stream) + if root_state.cpu_offload.offload_params: + # Wait for non-blocking GPU -> CPU sharded gradient copies from the + # post-backward hooks to finish explicitly since CPU gradients do + # not automatically synchronize with the GPU + state._device_handle.current_stream().synchronize() + root_state._exec_order_data.next_iter() + + for fsdp_state in state._all_fsdp_states: + _catch_all_reshard(fsdp_state) + _finalize_params(fsdp_state) + fsdp_state.training_state = TrainingState.IDLE + handle = fsdp_state._handle + if handle: + handle._ran_pre_backward_hook = False + handle._needs_pre_backward_unshard = False + handle._post_forward_index = None + handle._training_state = HandleTrainingState.IDLE + handle._prefetched = False + # Reset for cases like one forward and multiple backwards + root_state._post_backward_callback_queued = False + + +@no_type_check +def _catch_all_reshard( + state: _FSDPState, +) -> None: + """ + Reshards the parameters that may not have been resharded in the + post-backward hook. This can happen when a module's output is used in the + forward pass, meaning that its pre-backward hook runs (unsharding the + parameter), but the post-backward hook does not run because the output was + not jused in the loss computation corresponding to this backward pass. + """ + # Wrap with a try-except to provide a more informative traceback if an + # error is raised + try: + if state._handle: + # TODO: This already-resharded check is brittle: + # https://github.com/pytorch/pytorch/issues/83956 + already_resharded = ( + state._handle.flat_param.data_ptr() + == state._handle.flat_param._local_shard.data_ptr() + # If FSDP skipped using sharded views, then the flat parameter + # still points to the sharded data, so we need to reshard to + # use sharded views + and not state._handle._skipped_use_sharded_views + ) + if already_resharded: + return + free_unsharded_flat_param = _should_free_in_backward(state, state._handle) + _reshard(state, state._handle, free_unsharded_flat_param) + except Exception as e: + _p_assert( + False, + f"Got exception in the catch-all reshard for {state}: {str(e)}", + raise_assertion_error=False, + ) + raise e + + +@no_type_check +def _finalize_params( + state: _FSDPState, +) -> None: + """Finalizes the parameters before the next iteration.""" + handle = state._handle + if not handle: + return + flat_param = handle.flat_param + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + if hasattr(flat_param, "_post_backward_hook_handle"): + pbhs_handle = flat_param._post_backward_hook_handle + pbhs_handle.remove() + del flat_param._post_backward_hook_handle + else: + if hasattr(flat_param, "_post_backward_hook_state"): + post_backward_hook_state_len = len(flat_param._post_backward_hook_state) + expected_post_backward_hook_state_len = int(flat_param.requires_grad) + 1 + _p_assert( + post_backward_hook_state_len == expected_post_backward_hook_state_len, + f"Invalid: ``_post_backward_hook_state``: {flat_param._post_backward_hook_state}", + ) + flat_param._post_backward_hook_state[-1].remove() + delattr(flat_param, "_post_backward_hook_state") + if flat_param.requires_grad: + if not state._sync_gradients: + # Preserve the gradient accumulation state if not synchronizing + # gradients: `.grad` remains the unsharded gradient from prior + # `no_sync()` iterations, and `_saved_grad_shard` remains the + # sharded gradient from the last synchronized iteration + return + if not handle._has_optim_in_backward: + handle.prepare_gradient_for_optim() + _p_assert( + hasattr(flat_param, "_post_backward_called"), + "Expects `_post_backward_called` to be set on the `FlatParameter`", + ) + flat_param._post_backward_called = False + + +@no_type_check +def _prefetch_handle( + state: _FSDPState, + current_handle: Optional[FlatParamHandle], + prefetch_mode: _PrefetchMode, +) -> None: + """ + Prefetches the next handles if needed (without synchronization). An empty + handles key cannot prefetch. + """ + if not current_handle: + return + handle = _get_handle_to_prefetch(state, current_handle) + if not handle: + return + # Temporarily emulate the training state while calling `_unshard` to + # ensure the correct `as_params` for `_use_unsharded_views()` + prev_training_state = handle._training_state + if prefetch_mode == _PrefetchMode.BACKWARD: + handle._training_state = HandleTrainingState.BACKWARD_PRE + elif prefetch_mode == _PrefetchMode.FORWARD: + handle._training_state = HandleTrainingState.FORWARD + else: + raise ValueError(f"Invalid prefetch mode on rank {state.rank}: {prefetch_mode}") + # Prefetch the next set of handles without synchronizing to allow + # the sync to happen as late as possible to maximize overlap + _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream) + handle._training_state = prev_training_state + handle._prefetched = True + + +@no_type_check +def _get_handle_to_prefetch( + state: _FSDPState, + current_handle: FlatParamHandle, +) -> FlatParamHandle: + """ + Returns a :class:`list` of the handles keys to prefetch for the next + module(s), where ``current_handle`` represents the current module. + + "Prefetching" refers to running the unshard logic early (without + synchronization), and the "next" modules depend on the recorded execution + order and the current training state. + """ + training_state = _get_training_state(current_handle) + valid_training_states = ( + HandleTrainingState.BACKWARD_PRE, + HandleTrainingState.BACKWARD_POST, + HandleTrainingState.FORWARD, + ) + _p_assert( + training_state in valid_training_states, + f"Prefetching is only supported in {valid_training_states} but " + f"currently in {training_state}", + ) + eod = state._exec_order_data + target_handle: Optional[FlatParamHandle] = None + if ( + training_state == HandleTrainingState.BACKWARD_PRE + and state.backward_prefetch == BackwardPrefetch.BACKWARD_PRE + ) or ( + training_state == HandleTrainingState.BACKWARD_POST + and state.backward_prefetch == BackwardPrefetch.BACKWARD_POST + ): + target_handle_candidate = eod.get_handle_to_backward_prefetch(current_handle) + if ( + target_handle_candidate + and target_handle_candidate._needs_pre_backward_unshard + and not target_handle_candidate._prefetched + ): + target_handle = target_handle_candidate + else: + target_handle = None + elif training_state == HandleTrainingState.FORWARD and state.forward_prefetch: + target_handle_candidate = eod.get_handle_to_forward_prefetch(current_handle) + if ( + target_handle_candidate + and target_handle_candidate._needs_pre_forward_unshard + and not target_handle_candidate._prefetched + ): + target_handle = target_handle_candidate + else: + target_handle = None + + return target_handle + + +def _get_training_state( + handle: FlatParamHandle, +) -> HandleTrainingState: + """Returns the training state of the handles in ``handle``.""" + _p_assert(handle, "Expects a non-empty handle") + return handle._training_state + + +@no_type_check +def _register_pre_forward_hook( + state: _FSDPState, + module: nn.Module, +) -> None: + """ + Registers a pre-forward hook on ``module``. + """ + for forward_handle in state._pre_forward_handles: + forward_handle.remove() + state._pre_forward_handles.clear() + module_param_handle = state._fully_sharded_module_to_handle.get(module, None) + hook = functools.partial( + _pre_forward, state, module_param_handle, _pre_forward_unshard + ) + state._pre_forward_handles.append( + module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True) + ) + + +@no_type_check +def _register_post_forward_hook( + state: _FSDPState, + module: nn.Module, +) -> None: + """ + Registers a post-forward hook on ``module``. Even if the module has no + handles, we should register the hook since it will register the module's + pre-backward hook. + """ + for forward_handle in state._post_forward_handles: + forward_handle.remove() + state._post_forward_handles.clear() + module_param_handle = state._fully_sharded_module_to_handle.get(module, None) + hook = functools.partial( + _post_forward, + state, + module_param_handle, + _post_forward_reshard, + ) + state._post_forward_handles.append(module.register_forward_hook(hook)) + + +@no_type_check +def _register_root_pre_forward_hook( + state: _FSDPState, + module: nn.Module, +): + """ + Registers root pre-forward hook on ``module``, which should be the local + FSDP root. + + NOTE: For the current composable FSDP design, we have each application of + ``fully_shard()`` to a module to indicate that that module is the local + FSDP root. We may remove this assumption in the future, in which case we + will need to register this root pre-forward hook on any candidate module + that may be the local FSDP root. + """ + for forward_handle in state._root_pre_forward_handles: + forward_handle.remove() + state._root_pre_forward_handles.clear() + hook = functools.partial(_root_pre_forward, state) + state._root_pre_forward_handles.append( + module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True) + ) + + +@no_type_check +def _register_pre_backward_hooks( + state: _FSDPState, + module: nn.Module, + outputs: Any, + handle: FlatParamHandle, +) -> None: + """ + Registers pre-backward hooks on the tensors that require gradients in the + forward pass outputs ``outputs``, which were computed using the + ``FlatParameter`` s of ``handles``. + + Args: + module (nn.Module): Fully sharded module (see [Note: Fully Sharded + Module]). + + Returns: + Forward pass outputs with pre-backward hooks registered to tensors that + require gradients. + """ + # If there is no gradient computation, then there is no need for + # pre-backward logic + if not torch.is_grad_enabled(): + return outputs + if state._is_root: + state._post_backward_callback_queued = False # only defined on the root + + if handle: + handle._needs_pre_backward_unshard = False + # Since these handles' `FlatParameter`s participated in a forward, we + # conservatively assume that they will be used in the backward + handle._ran_pre_backward_hook = False + + def _register_hook(t: torch.Tensor) -> torch.Tensor: + if t.requires_grad: + t.register_hook( + functools.partial(_pre_backward_hook, state, module, handle) + ) + if handle: + handle._needs_pre_backward_unshard = True + return t + + return _apply_to_tensors(_register_hook, outputs) + + +def _register_post_backward_hook( + state: _FSDPState, + handle: Optional[FlatParamHandle], +) -> None: + """ + Registers post-backward hooks on the ``FlatParameter`` s' + ``AccumulateGrad`` objects to reshard and to reduce-scatter gradients. + + The ``AccumulateGrad`` object represents the last function that finalizes + the ``FlatParameter`` 's gradient, so it only runs after its entire + gradient computation has finished. + + We register the post-backward hook only once in the *first* forward that a + ``FlatParameter`` participates in. This relies on the ``AccumulateGrad`` + object being preserved through multiple forwards. + + NOTE: We follow this heuristic to prefer the *first* forward to target the + parameter mixed precision case, where there are *separate* + ``AccumulateGrad`` objects across the different forwards. (Without + parameter mixed precision, the ``AccumulateGrad`` objects are the same.) If + we instead prefer the *last* forward, then the hook runs early. + """ + # If there is no gradient computation, then there is no need for + # post-backward logic + if not torch.is_grad_enabled(): + return + if not handle: + return + flat_param = handle.flat_param + + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + already_registered = hasattr(flat_param, "_post_backward_hook_handle") + if already_registered or not flat_param.requires_grad: + return + hook = functools.partial(_post_backward_hook, state, handle) + hook_handle = flat_param.register_post_accumulate_grad_hook(hook) + flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined] + else: + already_registered = hasattr(flat_param, "_post_backward_hook_state") + if already_registered or not flat_param.requires_grad: + return + # Get the `AccumulateGrad` object + temp_flat_param = flat_param.expand_as(flat_param) + _p_assert( + temp_flat_param.grad_fn is not None, + "The `grad_fn` is needed to access the `AccumulateGrad` and " + "register the post-backward hook", + ) + acc_grad = temp_flat_param.grad_fn.next_functions[0][0] # type: ignore[union-attr] + assert acc_grad is not None + hook_handle = acc_grad.register_hook( + functools.partial(_post_backward_hook, state, handle) + ) + flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined] + + +def _register_post_backward_reshard_only_hook( + state: _FSDPState, + handle: Optional[FlatParamHandle], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], +) -> None: + """ + Registers post-backward hooks to reshard flat parameters that do not + require gradient. We register these using multi-post-grad hooks on the + input activations to ensure that all gradients that may depend on the + parameters have been computed before resharding. + """ + # If there is no gradient computation, then there is no need for + # post-backward logic + if not torch.is_grad_enabled(): + return + # Construct `inp_tensors` lazily to avoid CPU overhead in typical case + # where each flat parameter requires gradient + inp_tensors: Optional[List[torch.Tensor]] = None + if not handle: + return + flat_param = handle.flat_param + + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + already_registered = hasattr(flat_param, "_post_backward_hook_handle") + else: + already_registered = hasattr(flat_param, "_post_backward_hook_state") + + if already_registered or flat_param.requires_grad: + return + if inp_tensors is None: + args_flat = pytree.arg_tree_leaves(*args, **kwargs) + inp_tensors = [ + obj for obj in args_flat if torch.is_tensor(obj) and obj.requires_grad + ] + assert inp_tensors is not None # mypy + hook_handle = register_multi_grad_hook( + inp_tensors, functools.partial(_post_backward_reshard_only_hook, state, handle) + ) + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined, assignment] + else: + flat_param._post_backward_hook_state = (hook_handle,) # type: ignore[attr-defined, assignment] + + +@no_type_check +def _register_post_backward_final_callback( + state: _FSDPState, module: nn.Module +) -> None: + """ + Registers the post-backward final callback that runs at the end of the + backward pass. This should be called from the root FSDP instance at the + beginning of the pre-backward. + """ + _p_assert( + state._is_root, + "Only the root FSDP instance should register the post-backward callback", + ) + if state._post_backward_callback_queued: + return + _assert_in_training_states(state, [TrainingState.IDLE]) + # Trace does not need this callback + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + state._post_backward_callback_queued = True + Variable._execution_engine.queue_callback( + functools.partial(_post_backward_final_callback, state, module) + ) + + +def _wait_for_computation_stream( + computation_stream: torch.Stream, + unshard_stream: torch.Stream, + pre_unshard_stream: torch.Stream, +): + """ + Has the unshard and pre-unshard streams wait for the computation stream. + For example, this should be called in the FSDP root's pre-forward to + respect optimizer step computation. + """ + # Tracing does not need to wait + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + return + unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined] + # Having the pre-all-gather stream wait for the current stream even if we + # do not leverage the pre-all-gather stream is tolerable since this only + # runs once per iteration + pre_unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined] + + +def _reset_flat_param_grad_info_if_needed( + handles: List[FlatParamHandle], +): + """ + Clears the original parameters' gradients if needed. This method's CPU + overhead is minimal, so we may call it throughout FSDP methods, which serve + as callsites to free the gradient memory earlier. + """ + if not isinstance(handles, list): + handles = [handles] + for handle in handles: + if handle._use_orig_params: + handle._reset_flat_param_grad_info_if_needed() + + +@no_type_check +def _get_buffers_and_dtypes_for_computation( + state: _FSDPState, + root_module: nn.Module, +) -> Tuple[List[torch.Tensor], List[Optional[torch.dtype]]]: + """ + Returns all buffers in the module tree rooted at ``root_module`` and a + corresponding list of the buffer dtypes for computation. Each buffer dtype + is either ``None`` if buffer mixed precision is not enabled or the buffer + low precision dtype otherwise. + """ + _p_assert(state._is_root, "Expects the root to cast buffers") + buffers: List[torch.Tensor] = [] + buffer_dtypes: List[Optional[torch.dtype]] = [] + visited_buffers: Set[torch.Tensor] = set() + # Traverse the FSDP states bottom-up so that we prefer the owning FSDP + # instance's mixed precision setting for each buffer + fsdp_states, fsdp_modules = traversal_utils._get_fsdp_states_with_modules( + root_module + ) + for fsdp_state, fsdp_module in zip(reversed(fsdp_states), reversed(fsdp_modules)): + for buffer_name, buffer in fsdp_module.named_buffers(): + if buffer in visited_buffers: + continue + visited_buffers.add(buffer) + if clean_tensor_name(buffer_name) in fsdp_state._ignored_buffer_names: + continue + buffers.append(buffer) + buffer_dtypes.append(fsdp_state.mixed_precision.buffer_dtype) + assert len(buffers) == len(buffer_dtypes), f"{len(buffers)} {len(buffer_dtypes)}" + return buffers, buffer_dtypes + + +@no_type_check +def _get_orig_buffer_dtypes( + state: _FSDPState, + buffer_names: List[str], +) -> List[torch.dtype]: + """ + Returns the original buffer types of the given buffer names. + """ + buffer_dtypes: List[torch.dtype] = [] + for buffer_name in buffer_names: + _p_assert( + buffer_name in state._buffer_name_to_orig_dtype, + f"{buffer_name} is missing from pre-computed dict on rank " + f"{state.rank}, which only has keys " + f"{state._buffer_name_to_orig_dtype.keys()}", + ) + buffer_dtypes.append(state._buffer_name_to_orig_dtype[buffer_name]) + return buffer_dtypes + + +def _cast_buffers_to_dtype_and_device( + buffers: List[torch.Tensor], + buffer_dtypes: List[Optional[torch.dtype]], + device: torch.device, +) -> None: + """ + Casts ``buffers`` to the dtypes given by ``buffer_dtypes`` and moves them + to ``device``. If an element in ``buffer_dtypes`` is ``None``, then the + corresponding buffer is only moved to ``device``. + """ + _p_assert( + buffer_dtypes is None or len(buffers) == len(buffer_dtypes), + f"Expects `buffers` and `buffer_dtypes` to have the same length if " + f"`buffer_dtypes` is specified but got {len(buffers)} and " + f"{len(buffer_dtypes)}", + ) + for buffer, buffer_dtype in zip(buffers, buffer_dtypes): + if not torch.is_floating_point(buffer) or buffer_dtype is None: + buffer.data = buffer.to(device=device) + else: + buffer.data = buffer.to(device=device, dtype=buffer_dtype) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..38db8198663704534f6da30dea313e28c0b4ef75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py @@ -0,0 +1,127 @@ +import copy +import itertools +import math +from typing import Optional + +import torch +import torch.distributed as dist +from torch.distributed import distributed_c10d +from torch.distributed._shard.sharded_tensor import ( + Shard, + ShardedTensor, + ShardedTensorMetadata, + TensorProperties, +) +from torch.distributed._shard.sharding_spec import ShardMetadata +from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard as DShard + + +def _get_remote_device_str(rank, device_type, num_devices_per_node): + if device_type.lower() == "cpu": + return f"rank:{rank}/{device_type}" + else: + return f"rank:{rank}/{device_type}:{rank % num_devices_per_node}" + + +def _create_chunk_sharded_tensor( + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, + device: Optional[torch.device] = None, +) -> ShardedTensor: + """ + Shard a tensor to chunks along the first dimension. The local rank will gets its + corresponding chunk as the local shard to create a ShardedTensor. + """ + chunks = tensor.chunk(world_size, dim=0) + if len(chunks) > rank: + local_shard = chunks[rank].clone() + offsets = [0 for _ in tensor.size()] + offsets[0] = math.ceil(tensor.size()[0] / world_size) * rank + local_shards = [Shard.from_tensor_and_offsets(local_shard, offsets, rank)] + else: + local_shards = [] + + # Create a ShardedTensor without invoking communication. + chunk_sizes = [list(chunk.size()) for chunk in chunks] + dim0_offsets = [0] + list( + itertools.accumulate([chunk_size[0] for chunk_size in chunk_sizes]) + )[:-1] + offsets = [0] * (len(chunk_sizes[0]) - 1) + chunk_offsets = [[d0] + offsets for d0 in dim0_offsets] + device_type = ( + distributed_c10d._get_pg_default_device(pg).type + if device is None + else device.type + ) + placements = [ + _get_remote_device_str(r, device_type, num_devices_per_node) + for r in range(len(chunk_sizes)) + ] + assert len(chunk_sizes) == len(chunk_offsets) == len(placements) + shard_metadata = [ + ShardMetadata(offset, size, placement) + for offset, size, placement in zip(chunk_offsets, chunk_sizes, placements) + ] + sharded_tensor_metadata = ShardedTensorMetadata( + shards_metadata=shard_metadata, + size=tensor.size(), + tensor_properties=TensorProperties( + dtype=tensor.dtype, + layout=tensor.layout, + requires_grad=False, + memory_format=torch.contiguous_format, + pin_memory=tensor.is_pinned(), + ), + ) + return ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, sharded_tensor_metadata=sharded_tensor_metadata, process_group=pg + ) + + +def _create_chunk_dtensor( + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, +) -> DTensor: + """ + Shard a tensor to chunks along the first dimension. The local rank will gets its + corresponding chunk as the local tensor to create a DTensor. + """ + # We need to explicitly call .detach() to return a new tensor detached from the current graph. + tensor = tensor.clone().detach() + + # FSDP placements: [Shard(0)] + # HSDP placements: [Replicate(), Shard(0)] + replicate_placements = [Replicate() for _ in range(device_mesh.ndim)] + shard_placements = [Replicate() for _ in range(device_mesh.ndim)] + shard_placements[-1] = DShard(0) # type: ignore[call-overload] + + return DTensor.from_local( + tensor, device_mesh, replicate_placements, run_check=False + ).redistribute( + placements=shard_placements, + ) + + +def _all_gather_dtensor( + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], +) -> torch.Tensor: + """ + All gather a DTensor in its sharded dimension and return the local tensor. + """ + assert parent_mesh is None + + placements = list(copy.deepcopy(tensor.placements)) + # FSDP placements: [Shard(0)] -> [Replicate()] + # HSDP placements: [Replicate(), Shard(0)] -> [Replicate(), Replicate()] + placements[-1] = Replicate() + tensor = tensor.redistribute( + device_mesh=tensor.device_mesh, + placements=placements, + ) + + return tensor.to_local() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9489994a3bb4bfae733b2224574c7ebfbe295a6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py @@ -0,0 +1,928 @@ +import contextlib +import logging +import math +import warnings +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterator, + List, + no_type_check, + Tuple, +) + +import torch +import torch.distributed as dist + +import torch.distributed.algorithms._checkpoint.checkpoint_wrapper as checkpoint_wrapper + +import torch.nn as nn +import torch.nn.functional as F +from torch.distributed._shard.sharded_tensor import ( + init_from_local_shards, + Shard, + ShardedTensor, +) +from torch.distributed._tensor import DTensor +from torch.distributed.device_mesh import _mesh_resources + +from torch.distributed.fsdp._common_utils import ( + _FSDPState, + _get_module_fsdp_state_if_fully_sharded_module, + _has_fsdp_params, + _is_composable, + _module_handle, + clean_tensor_name, + FSDP_PREFIX, + FSDP_WRAPPED_MODULE, +) +from torch.distributed.fsdp._debug_utils import SimpleProfiler +from torch.distributed.fsdp._runtime_utils import ( + _cast_buffers_to_dtype_and_device, + _get_orig_buffer_dtypes, + _lazy_init, + _reset_flat_param_grad_info_if_needed, +) +from torch.distributed.fsdp.api import ( + FullStateDictConfig, + ShardingStrategy, + StateDictType, +) +from torch.distributed.utils import _replace_by_prefix + +from ._fsdp_extensions import ( + _ext_all_gather_dtensor, + _ext_chunk_dtensor, + _ext_chunk_tensor, + _ext_post_unflatten_transform, + _ext_pre_load_state_dict_transform, +) +from ._unshard_param_utils import _unshard_fsdp_state_params, FLAT_PARAM + + +logger = logging.getLogger(__name__) + + +def _should_unshard_params(fsdp_state: _FSDPState) -> bool: + if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD and ( + _is_composable(fsdp_state) or fsdp_state._use_orig_params + ): + return False + else: + return True + + +def _convert_to_wrapped_module_name(module_name: str) -> str: + module_name = module_name.replace(f"{FSDP_PREFIX}", "") + module_name = module_name.replace(f"{FSDP_WRAPPED_MODULE}", "") + if module_name: + module_name = f"{module_name}." + # `CheckpointWrapper` adds a prefix that has to be removed as well. + module_name = module_name.replace(checkpoint_wrapper._CHECKPOINT_PREFIX, "") + return module_name + + +def _param_name_infos( + module: nn.Module, fsdp_state: _FSDPState +) -> Iterator[Tuple[str, str, str]]: + if not _has_fsdp_params(fsdp_state, module): + return + for param_name, module_name in _module_handle( + fsdp_state, module + ).param_module_names(): + module_name = _convert_to_wrapped_module_name(module_name) + fqn = f"{module_name}{param_name}" + yield fqn, param_name, module_name + + +def _shared_param_name_infos( + module: nn.Module, fsdp_state +) -> Iterator[Tuple[str, str, str]]: + for param_name, module_name in _module_handle( + fsdp_state, module + ).shared_param_module_names(): + module_name = _convert_to_wrapped_module_name(module_name) + fqn = f"{module_name}{param_name}" + yield fqn, param_name, module_name + + +@no_type_check +def _enter_unshard_params_ctx( + module: nn.Module, + fsdp_state: _FSDPState, + writeback: bool = False, + rank0_only: bool = False, + offload_to_cpu: bool = False, + with_grads: bool = False, +) -> None: + """ + state_dict hooks cannot use the pure context call as the checkpoint flow + requires to enter the context in the pre-hook but leave the context in the + post-hook. This API enters the context of ``_unshard_fsdp_state_params``. + """ + assert module not in fsdp_state._unshard_params_ctx, ( + "Entering the ``_unshard_fsdp_state_params`` context but _unshard_params_ctx[module] " + "is not None." + ) + fsdp_state._unshard_params_ctx[module] = _unshard_fsdp_state_params( + module, + fsdp_state, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ) + fsdp_state._unshard_params_ctx[module].__enter__() + + +@no_type_check +def _exit_unshard_params_ctx(module: nn.Module, fsdp_state: _FSDPState) -> None: + """A helper function to exit ``_unshard_fsdp_state_params`` context.""" + fsdp_state._unshard_params_ctx[module].__exit__(None, None, None) + fsdp_state._unshard_params_ctx.pop(module) + + +def _common_pre_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, +) -> None: + """Performs the pre-state_dict tasks shared by all state_dict types.""" + if fsdp_state._device_handle.is_available(): + fsdp_state._device_handle.synchronize() + # TODO: need to check if this is always correct for composable FSDP. + _lazy_init(fsdp_state, module) + if fsdp_state._is_root: + _reset_flat_param_grad_info_if_needed(fsdp_state._all_handles) + + +def _common_unshard_pre_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + offload_to_cpu: bool, + rank0_only: bool, +) -> None: + """ + Performs the pre-state_dict tasks shared by all state_dict types that require + ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this hook. + """ + # For composable `fully_shard`, it does not need to unshard parameters for `NO_SHARD` cases. + if not _should_unshard_params(fsdp_state): + return + _enter_unshard_params_ctx( + module, + fsdp_state, + writeback=False, + offload_to_cpu=offload_to_cpu, + rank0_only=rank0_only, + ) + + +@no_type_check +def _common_unshard_post_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, + param_hook: Callable, +) -> Dict[str, Any]: + """ + The post-state_dict flow that shared by all state_dict types that require + ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this + hook. + """ + _replace_by_prefix(state_dict, prefix + f"{FSDP_PREFIX}", prefix) + # Return early for trivial cases + if not state_dict or not _has_fsdp_params(fsdp_state, module): + if _should_unshard_params(fsdp_state): + _exit_unshard_params_ctx(module, fsdp_state) + return state_dict + + # If a rank does not have unsharded parameters(when `rank0_only=True` + # and `rank != 0`), then the rank only needed to participate in the + # all-gather and does not need to save the # state dict. We simply check + # rank0_only to ensure this issue. + rank0_only = ( + fsdp_state._state_dict_type == StateDictType.FULL_STATE_DICT + and cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only + ) + # no_fsdp_return means the state_dict returned by this rank should contain + # only non-FSDP controlled parameters and buffers. + no_fsdp_return = rank0_only and fsdp_state.rank != 0 + if no_fsdp_return and not fsdp_state._use_orig_params: + for clean_key in fsdp_state._buffer_names: + # This is a hack to support activation checkpoint. + clean_key = clean_key.replace( + f"{checkpoint_wrapper._CHECKPOINT_PREFIX}.", "" + ) + state_dict.pop(f"{prefix}{clean_key}", None) + # Non-zero ranks have flat_param key when rank0_only=True, because rank0_only=True is + # passed in to unshard context, but nonzero ranks reshard early, causing this flat_param + # to appear in state_dict. + state_dict.pop(f"{prefix}{FLAT_PARAM}") + _exit_unshard_params_ctx(module, fsdp_state) + return state_dict + + # Loop only the parameters saved in this instance's wrapped module to + # avoid processing buffers. + for fqn, param_name, module_name in _param_name_infos(module, fsdp_state): + fqn = f"{prefix}{fqn}" + if no_fsdp_return: + state_dict.pop(fqn) + continue + assert fqn in state_dict, ( + f"FSDP assumes {fqn} is in the state_dict but the state_dict only " + f"has {state_dict.keys()}. " + f"prefix={prefix}, module_name={module_name}, " + f"param_name={param_name} rank={fsdp_state.rank}." + ) + + param_hook(state_dict, prefix, fqn) + + if _should_unshard_params(fsdp_state): + _exit_unshard_params_ctx(module, fsdp_state) + + cpu_device = torch.device("cpu") + buffer_clean_fqns = [] + buffers = [] + for clean_key in fsdp_state._buffer_names: + # This is a hack to support activation checkpoint. + clean_key = clean_tensor_name(clean_key) + fqn = f"{prefix}{clean_key}" + if fqn not in state_dict: + # A buffer can be registered as non-persistent. + continue + if no_fsdp_return: + state_dict.pop(fqn) + else: + buffer = state_dict[fqn] + if ( + fsdp_state._state_dict_config.offload_to_cpu + and buffer.device != cpu_device + ): + state_dict[fqn] = buffer.to(cpu_device) + # skip upcasting for ignored buffers + if clean_key not in fsdp_state._ignored_buffer_names: + buffer_clean_fqns.append(clean_key) + buffers.append(state_dict[fqn]) + + if buffers: + mixed_precision_enabled_for_buffers = ( + fsdp_state._mixed_precision_enabled_for_buffers() + if not _is_composable(fsdp_state) + else (fsdp_state.mixed_precision.buffer_dtype is not None) + ) + if mixed_precision_enabled_for_buffers: + buffer_dtypes = _get_orig_buffer_dtypes(fsdp_state, buffer_clean_fqns) + _cast_buffers_to_dtype_and_device( + buffers, buffer_dtypes, fsdp_state.compute_device + ) + for buffer, clean_fqn in zip(buffers, buffer_clean_fqns): + fqn = f"{prefix}{clean_fqn}" + logger.info("FSDP is casting the dtype of %s to %s", fqn, buffer.dtype) + state_dict[fqn] = buffer.clone() + return state_dict + + +@no_type_check +def _full_pre_state_dict_hook( + fsdp_state: _FSDPState, + module: nn.Module, + *args, + **kwargs, +) -> None: + """ + Hook that runs before model.state_dict() is called. pre-state_dict hook is + not actually supported by ``nn.Module``. As a result, this API is called + from ``_full_post_state_dict_hook()`` to simulate the case. Once pre-state_dict + is supported in ``nn.Module``, this hook will be registered as a hook in + ``nn.Module``. + """ + if getattr(fsdp_state, "_device_mesh", False): + parent_mesh = _mesh_resources.get_parent_mesh(fsdp_state._device_mesh) + + _common_pre_state_dict_hook(module, fsdp_state) + _common_unshard_pre_state_dict_hook( + module, + fsdp_state, + offload_to_cpu=fsdp_state._state_dict_config.offload_to_cpu, + rank0_only=cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only, + ) + + +@no_type_check +def _full_post_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> Dict[str, Any]: + """ + Hook that runs after model.state_dict() is called before returning result to + user. For FSDP, we may have to clone the tensors in state_dict as params go + back to sharded version after _unshard_fsdp_state_params ends, and also remove + the ``FSDP_WRAPPED_MODULE`` prefix. + """ + + def param_hook( + state_dict: Dict[str, Any], + prefix: str, + fqn: str, + ) -> None: + clean_key = fqn + clean_prefix = clean_tensor_name(prefix) + # Strip prefix out of key if needed as buffer names and param names + # do not have prefix considered as they are not computed in `state_dict` + # call. + if clean_key.startswith(clean_prefix): + clean_key = clean_key[len(clean_prefix) :] + + # Clone parameters before exiting the `_unshard_fsdp_state_params()` context. + if not getattr(state_dict[fqn], "_has_been_cloned", False): + try: + state_dict[fqn] = state_dict[fqn].clone().detach() + state_dict[fqn]._has_been_cloned = True # type: ignore[attr-defined] + except BaseException as e: + warnings.warn( + f"Failed to clone() tensor with name {fqn} on rank {fsdp_state.rank}. " + "This may mean that this state_dict entry could point to invalid " + "memory regions after returning from state_dict() call if this " + "parameter is managed by FSDP. Please check clone " + f"implementation of {fqn}. Error: {str(e)}" + ) + + return _common_unshard_post_state_dict_hook( + module, fsdp_state, state_dict, prefix, param_hook + ) + + +def _full_pre_load_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> None: + _lazy_init(fsdp_state, module) + if _should_unshard_params(fsdp_state): + with SimpleProfiler.profile("_enter_unshard_params_ctx"): + _enter_unshard_params_ctx(module, fsdp_state, writeback=True) + # Add FSDP_PREFIX only for wrapper-based FSDP. + if not _is_composable(fsdp_state): + _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}") + + +def _full_post_load_state_dict_hook( + module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs +) -> None: + if _should_unshard_params(fsdp_state): + with SimpleProfiler.profile("_exit_unshard_params_ctx"): + _exit_unshard_params_ctx(module, fsdp_state) + + +def _local_pre_state_dict_hook( + fsdp_state: _FSDPState, + module: nn.Module, + *args, + **kwargs, +) -> None: + """ + Hook that runs before model.state_dict() is called. Right now, pre-state_dict + hook is not supported by the PyTorch core. So this API is called from + `_local_post_state_dict_hook()` to simulate the case. + """ + if ( + _has_fsdp_params(fsdp_state, module) + and not _module_handle(fsdp_state, module).uses_sharded_strategy + ): + raise RuntimeError( + "``local_state_dict`` can only be used when parameters are flatten " + "and sharded." + ) + _common_pre_state_dict_hook(module, fsdp_state) + + +@no_type_check +def _local_post_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> Dict[str, Any]: + """ + This hook create a ShardedTensor from the local flat_param and replace + the state_dict[f"{prefix}{FLAT_PARAM}] with the ShardedTensor. No copy + will happen. The underlying storage is the same. + """ + + _replace_by_prefix(state_dict, f"{prefix}{FSDP_PREFIX}", prefix) + if not _has_fsdp_params(fsdp_state, module): + return state_dict + + # state_dict[f"{prefix}{FLAT_PARAM}"] exists and has the same tensor + # value as the flat_param but it is a pure Tensor because + # nn.Module.state_dict() will detach the parameter. Therefore, we need + # to get flat_param to get the metadata. + assert _module_handle(fsdp_state, module), "Should have returned early" + flat_param = _module_handle(fsdp_state, module).flat_param + # Constructs a ShardedTensor from the flat_param "without" padding. + # Removing the padding allows users to change the number of ranks + # when loading the local_state_dict. + full_numel = flat_param._unpadded_unsharded_size.numel() # type: ignore[attr-defined] + shard_offset = flat_param.numel() * fsdp_state.rank + valid_data_size = flat_param.numel() - flat_param._shard_numel_padded + if valid_data_size > 0: + # If FlatParameter is returned, FlatParameter._local_shard cause a + # pickling issue (can be torch.save but not torch.load). Since there + # is no benefit for state_dict to return the actual FlatParameter class, + # a view (which is a tensor) of the FlatParameter will be returned. + flat_param = flat_param[:valid_data_size].view(valid_data_size) + local_shards = [ + Shard.from_tensor_and_offsets(flat_param, [shard_offset], fsdp_state.rank) + ] + else: + local_shards = [] + sharded_tensor = init_from_local_shards( + local_shards, full_numel, process_group=fsdp_state.process_group + ) # type: ignore[assignment] + # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT. + if fsdp_state._state_dict_config.offload_to_cpu: + sharded_tensor = sharded_tensor.cpu() + state_dict[f"{prefix}{FLAT_PARAM}"] = sharded_tensor + return state_dict + + +def _local_post_load_state_dict_hook( + module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs +) -> None: + pass + + +def _local_pre_load_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> None: + """ + This hook finds the local flat_param for this FSDP module from the + state_dict. The flat_param should be a ShardedTensor. This hook converts + the ShardedTensor to a tensor. No copy happen unless padding is required. + """ + _lazy_init(fsdp_state, module) + _replace_by_prefix(state_dict, prefix, f"{prefix}{FSDP_PREFIX}") + fqn = f"{prefix}{FSDP_PREFIX}{FLAT_PARAM}" + if fqn not in state_dict: + assert not _has_fsdp_params(fsdp_state, module), ( + "No `FlatParameter` in `state_dict` for this FSDP instance " + "but it has parameters" + ) + return + load_tensor = state_dict[fqn] + assert isinstance( + load_tensor, ShardedTensor + ), "Tensors in local_state_dict should be ShardedTensor." + + # Convert the ShardedTensor to a Tensor. + flat_param = _module_handle(fsdp_state, module).flat_param + assert flat_param is not None + valid_data_size = flat_param.numel() - flat_param._shard_numel_padded + shards = load_tensor.local_shards() + if valid_data_size > 0: + assert len(shards), "load_local_state_dict assume one shard per ShardedTensor." + load_tensor = shards[0].tensor + + # Get the metadata of the flat_param to decide whether to pad the loaded + # tensor. + if flat_param._shard_numel_padded > 0: + assert load_tensor.numel() < flat_param.numel(), ( + f"Local shard size = {flat_param.numel()} and the tensor in " + f"the state_dict is {load_tensor.numel()}." + ) + load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded]) + else: + load_tensor = flat_param + # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT. + state_dict[fqn] = load_tensor + + +def _sharded_pre_state_dict_hook( + fsdp_state: _FSDPState, + module: nn.Module, + *args, + **kwargs, +) -> None: + """ + Hook that runs before model.state_dict() is called. Check + ``_full_pre_load_state_dict_hook`` for the detail. + """ + if ( + _has_fsdp_params(fsdp_state, module) + and not _module_handle(fsdp_state, module).uses_sharded_strategy + ): + raise RuntimeError( + "``sharded_state_dict`` can only be used when parameters are flatten " + "and sharded." + ) + _common_pre_state_dict_hook(module, fsdp_state) + # Setting offload_to_cpu here does not work even if offload_to_cpu is True. + # We have to create ShardedTensor first then move it to CPU. + _common_unshard_pre_state_dict_hook( + module, + fsdp_state, + offload_to_cpu=False, + rank0_only=False, + ) + + +@no_type_check +def _sharded_post_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> Dict[str, Any]: + """ + The hook replaces the unflattened, unsharded parameter in the state_dict + with a unflattened, sharded parameter (a ShardedTensor). + """ + + def param_hook(state_dict: Dict[str, Any], prefix: str, fqn: str): + param = state_dict[fqn] + if not fsdp_state._state_dict_config._use_dtensor: + sharded_tensor = _ext_chunk_tensor( + tensor=param, + rank=fsdp_state.rank, + world_size=fsdp_state.world_size, + num_devices_per_node=fsdp_state._device_handle.device_count(), + pg=fsdp_state.process_group, + fsdp_extension=fsdp_state._fsdp_extension, + ) + else: + sharded_tensor = _ext_chunk_dtensor( + tensor=param, + rank=fsdp_state.rank, + device_mesh=fsdp_state._device_mesh, + fsdp_extension=fsdp_state._fsdp_extension, + ) + if fsdp_state._state_dict_config.offload_to_cpu: + sharded_tensor = sharded_tensor.cpu() + state_dict[fqn] = sharded_tensor + + return _common_unshard_post_state_dict_hook( + module, fsdp_state, state_dict, prefix, param_hook + ) + + +@no_type_check +def _sharded_post_load_state_dict_hook( + module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs +) -> None: + if _has_fsdp_params(fsdp_state, module): + with SimpleProfiler.profile("_exit_unshard_params_ctx"): + _exit_unshard_params_ctx(module, fsdp_state) + + +@no_type_check +def _sharded_pre_load_state_dict_hook( + module: nn.Module, + fsdp_state: _FSDPState, + state_dict: Dict[str, Any], + prefix: str, +) -> None: + """ + The hook combines the unflattened, sharded parameters (ShardedTensor) to + a new FlatParameter and shards the new FlatParameter to the local chunk. + """ + _lazy_init(fsdp_state, module) + if not _is_composable(fsdp_state): + _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}") + if not _has_fsdp_params(fsdp_state, module): + return + + handle = _module_handle(fsdp_state, module) + if not handle.uses_sharded_strategy: + raise RuntimeError( + "load_sharded_state_dict can only be called when parameters " + "are flattened and sharded." + ) + fqn_to_param_ext = dict( + zip(handle.flat_param._fqns, handle.flat_param._param_extensions) + ) + + for fqn, _, _ in _param_name_infos(module, fsdp_state): + if not _is_composable(fsdp_state): + fqn_from_global_root = f"{prefix}{FSDP_PREFIX}{fqn}" + else: + fqn_from_global_root = f"{prefix}{fqn}" + try: + param = state_dict.pop(fqn_from_global_root) + except KeyError: + logger.warning( + f"Did not find param with FQN {fqn_from_global_root}, skipping it. " # noqa: G004 + "The weight will not be filled if you expect it to be." + ) + continue # TODO: Improve unittesting for state_dict finetuning + # cases: https://github.com/pytorch/pytorch/issues/109134 + + if not fsdp_state._state_dict_config._use_dtensor: + # All-gather the param (ShardedTensor) + param, shards = _ext_pre_load_state_dict_transform( + param, fsdp_state._fsdp_extension + ) + + assert len(shards) < 2, ( + "Expects 0 or 1 shard per rank " + f"but got {len(shards)} shards on rank {fsdp_state.rank}." + ) + param_numel = param.size().numel() + dim_0_size = param.size()[0] + chunk_size = ( + math.ceil(dim_0_size / fsdp_state.world_size) + * param_numel + // dim_0_size + ) + if len(shards) == 1: + local_tensor = shards[0].tensor.flatten() + with SimpleProfiler.profile(SimpleProfiler.Type.H2D): + local_tensor = local_tensor.to(fsdp_state.compute_device) + num_padding = chunk_size - local_tensor.numel() + if num_padding > 0: + local_tensor = F.pad(local_tensor, [0, num_padding]) + else: + local_tensor = torch.zeros( + chunk_size, dtype=param.dtype, device=fsdp_state.compute_device + ) + tensor = torch.empty( + chunk_size * fsdp_state.world_size, + dtype=local_tensor.dtype, + device=fsdp_state.compute_device, + ) + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER): + dist.all_gather_into_tensor( + tensor, local_tensor, group=fsdp_state.process_group + ) + tensor = tensor.narrow(0, 0, param_numel).reshape(param.size()) + state_dict[fqn_from_global_root] = tensor + else: + if param.device != fsdp_state._device_mesh.device_type: + param = param.to(fsdp_state._device_mesh.device_type) + + parent_mesh = _mesh_resources.get_parent_mesh(fsdp_state._device_mesh) + local_tensor = _ext_all_gather_dtensor( + param, parent_mesh, fsdp_state._fsdp_extension + ) + + if fqn_to_param_ext.get(fqn) is not None: + ext = fqn_to_param_ext[fqn] + local_tensor = _ext_post_unflatten_transform( + local_tensor, ext, fsdp_state._fsdp_extension + ) + state_dict[fqn_from_global_root] = local_tensor + + with SimpleProfiler.profile("_enter_unshard_params_ctx"): + _enter_unshard_params_ctx(module, fsdp_state, writeback=True) + + +@contextlib.contextmanager +def _replace_with_full_state_dict_type(fsdp_state: _FSDPState) -> Generator: + old_state_dict_config = fsdp_state._state_dict_config + old_state_dict_type = fsdp_state._state_dict_type + fsdp_state._state_dict_config = FullStateDictConfig() + fsdp_state._state_dict_type = StateDictType.FULL_STATE_DICT + yield + fsdp_state._state_dict_config = old_state_dict_config + fsdp_state._state_dict_type = old_state_dict_type + + +@no_type_check +@torch.no_grad() +def _post_state_dict_hook( + module: nn.Module, + state_dict: Dict[str, Any], + prefix: str, + *args: Any, +) -> Dict[str, Any]: + """ + _post_state_dict_hook() is called after the state_dict() of this + FSDP module is executed. ``fsdp_state._state_dict_type`` is used to decide + what postprocessing will be done. + """ + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD: + context = _replace_with_full_state_dict_type(fsdp_state) + warnings.warn( + "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will" + "be returned." + ) + else: + context = contextlib.nullcontext() + + with context: + _post_state_dict_hook_fn = { + StateDictType.FULL_STATE_DICT: _full_post_state_dict_hook, + StateDictType.LOCAL_STATE_DICT: _local_post_state_dict_hook, + StateDictType.SHARDED_STATE_DICT: _sharded_post_state_dict_hook, + } + processed_state_dict = _post_state_dict_hook_fn[fsdp_state._state_dict_type]( + module, fsdp_state, state_dict, prefix + ) + + if fsdp_state._is_root: + logger.info("FSDP finished processing state_dict(), prefix=%s", prefix) + for key, tensor in sorted(processed_state_dict.items()): + if key.startswith(prefix) and isinstance(tensor, torch.Tensor): + local_shape = tensor.shape + if isinstance(tensor, ShardedTensor): + local_shape = None + shards = tensor.local_shards() + if shards: + local_shape = shards[0].tensor.shape + elif isinstance(tensor, DTensor): + local_shape = tensor.to_local().shape + logger.info( + "FQN=%s: type=%s, shape=%s, local_shape=%s, dtype=%s, device=%s", + key, + type(tensor), + tensor.shape, + local_shape, + tensor.dtype, + tensor.device, + ) + + return processed_state_dict + + +@no_type_check +@torch.no_grad() +def _pre_state_dict_hook( + module: nn.Module, + *args, + **kwargs, +) -> None: + """ + This is called before the core state dict saving logic of ``module``. + ``fsdp_state._state_dict_type`` is used to decide what postprocessing will + be done. + """ + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD: + context = _replace_with_full_state_dict_type(fsdp_state) + warnings.warn( + "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will" + "be returned." + ) + else: + _set_use_dtensor(fsdp_state) + context = contextlib.nullcontext() + + with context: + _pre_state_dict_hook_fn = { + StateDictType.FULL_STATE_DICT: _full_pre_state_dict_hook, + StateDictType.LOCAL_STATE_DICT: _local_pre_state_dict_hook, + StateDictType.SHARDED_STATE_DICT: _sharded_pre_state_dict_hook, + } + _pre_state_dict_hook_fn[fsdp_state._state_dict_type]( + fsdp_state, + module, + *args, + **kwargs, + ) + + +@no_type_check +def _set_use_dtensor(fsdp_state: _FSDPState) -> None: + # If device_mesh is passed in when initalizing FSDP, we automatically turn the + # _use_dtensor flag to be true for ShardedStateDictConfig(). + if getattr(fsdp_state, "_device_mesh", None): + state_dict_type = fsdp_state._state_dict_type + if state_dict_type == StateDictType.LOCAL_STATE_DICT: + raise RuntimeError( + "Found state_dict_type LOCAL_STATE_DICT", + "DeviceMesh is not compatible with LOCAL_STATE_DICT.", + "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.", + ) + else: + fsdp_state._state_dict_config._use_dtensor = True + + +@no_type_check +@torch.no_grad() +def _pre_load_state_dict_hook( + module: nn.Module, + state_dict: Dict[str, Any], + prefix: str, + *args: Any, +) -> None: + """ + This is called before ``module._load_from_state_dict()``. + ``fsdp_state._state_dict_type`` is used to decide what preprocessing will + be done. + """ + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD: + context = _replace_with_full_state_dict_type(fsdp_state) + warnings.warn( + "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will" + "be returned." + ) + else: + _set_use_dtensor(fsdp_state) + context = contextlib.nullcontext() + + _lazy_init(fsdp_state, module) + if fsdp_state._is_root: + SimpleProfiler.reset() + + with context: + _pre_load_state_dict_hook_fn = { + StateDictType.FULL_STATE_DICT: _full_pre_load_state_dict_hook, + StateDictType.LOCAL_STATE_DICT: _local_pre_load_state_dict_hook, + StateDictType.SHARDED_STATE_DICT: _sharded_pre_load_state_dict_hook, + } + # Code that is common for all state_dict impls + if fsdp_state._device_handle.is_available(): + fsdp_state._device_handle.synchronize() + # Dispatch into state_dict specific implementation of pre-hook. + _pre_load_state_dict_hook_fn[fsdp_state._state_dict_type]( + module, fsdp_state, state_dict, prefix + ) + + +@no_type_check +@torch.no_grad() +def _post_load_state_dict_hook( + module: nn.Module, + incompatible_keys: Tuple[List[str], List[str]], + *args: Any, +) -> None: + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD: + context = _replace_with_full_state_dict_type(fsdp_state) + warnings.warn( + "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will" + "be returned." + ) + else: + context = contextlib.nullcontext() + + with context: + _post_load_state_dict_hook_fn = { + StateDictType.FULL_STATE_DICT: _full_post_load_state_dict_hook, + StateDictType.LOCAL_STATE_DICT: _local_post_load_state_dict_hook, + StateDictType.SHARDED_STATE_DICT: _sharded_post_load_state_dict_hook, + } + # Code that is common for all state_dict impls + # Dispatch into state_dict type specific implementation of post-hook for + # loading state_dict. + _post_load_state_dict_hook_fn[fsdp_state._state_dict_type](module, fsdp_state) + + # When reporting incompatible keys, trim FSDP prefixes. + missing_keys = incompatible_keys[0] + unexpected_keys = incompatible_keys[1] + for i in range(len(missing_keys)): + missing_keys[i] = clean_tensor_name(missing_keys[i]) + + for i in range(len(unexpected_keys)): + unexpected_keys[i] = clean_tensor_name(unexpected_keys[i]) + + if fsdp_state._is_root: + SimpleProfiler.dump_and_reset("FSDP model load_state_dict profiling: ") + + +def _register_all_state_dict_hooks(state: _FSDPState): + """ + Registers pre-save, post-save, pre-load, and post-load state dict hooks. + """ + for hook_registration_fn_str, hook, hook_registration_fn_kwargs in ( + ("register_state_dict_pre_hook", _pre_state_dict_hook, {}), + ("_register_state_dict_hook", _post_state_dict_hook, {}), + ( + "_register_load_state_dict_pre_hook", + _pre_load_state_dict_hook, + {"with_module": True}, + ), + ("register_load_state_dict_post_hook", _post_load_state_dict_hook, {}), + ): + _register_state_dict_hooks_base( + state, hook_registration_fn_str, hook, hook_registration_fn_kwargs + ) + + +@no_type_check +def _register_state_dict_hooks_base( + state: _FSDPState, + hook_registration_fn_name: str, + hook: Callable, + hook_registration_fn_kwargs: Dict[str, Any], +) -> None: + """Registers ``hook`` using ``hook_registration_fn``.""" + if not _is_composable(state): + getattr(state, hook_registration_fn_name)(hook, **hook_registration_fn_kwargs) + else: + handle = state._handle + if handle: + getattr(handle._fully_sharded_module, hook_registration_fn_name)( + hook, **hook_registration_fn_kwargs + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c768b73b8f95c4465c30ec08929afb82b37923b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py @@ -0,0 +1,237 @@ +import functools +from contextlib import contextmanager +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple + +import torch +import torch.nn as nn + + +@dataclass +class TracingConfig: + """ + This represents a symbolic tracing configuration. + + Args: + tracer (torch.fx.Tracer): An instance of :class:`torch.fx.Tracer` to + use for symbolic tracing. The default value is the native + :class:`torch.fx.Tracer` constructed with default arguments. + However, the user may want to pass a different value such as the + ``HFTracer`` for models in the HuggingFace Transformers_ library. + .. _Transformers: https://huggingface.co/docs/transformers/index + concrete_args (Optional[Dict[str, Any]]): Concrete arguments that + should not be treated as ``torch.fx.Proxy`` when tracing the + module ``forward()``. Passing ``concrete_args`` allows partially + specializing the forward, e.g. to remove control flow or data + structures. This ``concrete_args`` here is the same argument used + in :meth:`~torch.fx.Tracer.trace`. + """ + + tracer: torch.fx.Tracer = field(default_factory=torch.fx.Tracer) + concrete_args: Optional[Dict[str, Any]] = None + + +class _ParamUsageInfo(NamedTuple): + """ + This is used for ``_ExecutionInfo.module_to_param_usage_infos`` to record + execution information. The ``dict`` maps modules to a list of these + ``_ParamUsageInfo`` instances, where each instance represents a group of + parameters used together. + + Specifically, for each module key in the ``dict``, each instance of this + class represents either: + (1) the module and some sublist of its ``named_parameters()`` used + together in execution (see ``_patched_create_proxy()``), or + (2) a submodule and all of ``submodule.named_parameters()`` (see + ``_patched_call_module()``). + + Type (1) corresponds to directly using parameters in ops without calling + ``forward()``, and type (2) corresponds to calling ``forward()``. The + mapped-to lists in the ``dict`` follow the execution order. + """ + + module: nn.Module + named_params: List[Tuple[str, nn.Parameter]] + + +class _ExecutionInfo: + """ + This represents the execution order information from the forward pass. + + Attributes: + curr_module (nn.Module): Current module being traced. + module_forward_order (List[nn.Module]): The modules in (pre-)forward + order, i.e. the order in which their ``forward()`` methods are + called. Each call to a module's ``forward()`` corresponds to one + element in the list. + module_to_param_usage_infos (Dict[nn.Module, List[_ParamUsageInfo]]): + Maps a module to a list of module execution infos. See + :class:`_ParamUsageInfo` for details. + param_forward_order (List[nn.Parameter]): The parameters in forward + execution order, where only a parameter's first participation is + included. + visited_params (Set[nn.Parameter]): The parameters visited so far + during the trace. This is only used during tracing for fast + membership check. Invariant: The parameters in + ``param_forward_order`` are exactly those in ``visited_params``. + """ + + def __init__(self, root_module: nn.Module) -> None: + self.curr_module: nn.Module = root_module + self.module_forward_order: List[nn.Module] = [root_module] + self.module_to_param_usage_infos: Dict[nn.Module, List[_ParamUsageInfo]] = { + root_module: [] + } + self.param_forward_order: List[nn.Parameter] = [] + self.visited_params: Set[nn.Parameter] = set() + + +class _ExecOrderTracer: + def __init__(self) -> None: + self.exec_info: Optional[_ExecutionInfo] = None + + @contextmanager + def patch_tracer(self, tracer: torch.fx.Tracer, root_module: nn.Module): + self.exec_info = _ExecutionInfo(root_module) + orig_call_module = tracer.call_module + orig_create_proxy = tracer.create_proxy + tracer.call_module = functools.partial( + self._patched_call_module, orig_call_module, self.exec_info + ) + fqn_to_param = dict(root_module.named_parameters()) + tracer.create_proxy = functools.partial( + self._patched_create_proxy, + orig_create_proxy, + self.exec_info, + fqn_to_param, + ) + try: + yield + finally: + tracer.call_module = orig_call_module + tracer.create_proxy = orig_create_proxy + + def _patched_call_module( + self, + call_module: Callable, + exec_info: _ExecutionInfo, + # Below are the expected arguments to `call_module()` + module: nn.Module, + forward: Callable, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> Any: + """ + Overrides ``call_module`` to save execution information to + ``exec_info``. Note that ``call_module`` is called during symbolic + tracing for each non-root module. + + Args: + call_module (Callable): Original ``call_module`` to override. + exec_info (_ExecutionInfo): Used to record execution information. + module (nn.Module): Module corresponding to this ``call_module``. + forward (Callable): ``forward()`` method of ``module`` to be called + for this ``call_module``. + args (Tuple[Any, ...]): Positional arguments for ``forward``. + kwargs (Dict[str, Any]): Keyword arguments for ``forward``. + + Returns: + Same return value as ``call_module``. + """ + exec_info.module_forward_order.append(module) + named_params = list(module.named_parameters()) + curr_module = exec_info.curr_module + if named_params: + assert ( + curr_module in exec_info.module_to_param_usage_infos + ), "The current module should have already been processed by a patched `call_module`" + exec_info.module_to_param_usage_infos[exec_info.curr_module].append( + _ParamUsageInfo(module, named_params) + ) + prev_curr_module = curr_module + exec_info.curr_module = module + exec_info.module_to_param_usage_infos[module] = [] + output = call_module(module, forward, args, kwargs) + exec_info.curr_module = prev_curr_module + return output + + def _patched_create_proxy( + self, + create_proxy: Callable, + exec_info: _ExecutionInfo, + fqn_to_param: Dict[str, nn.Parameter], + # Below are the expected arguments to `create_proxy()` + kind: str, + target: torch.fx.node.Target, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + name: Optional[str] = None, + type_expr: Optional[Any] = None, + proxy_factory_fn: Optional[Callable[[torch.fx.Node], torch.fx.Proxy]] = None, + ) -> torch.fx.Proxy: + """ + Overrides ``create_proxy`` to save execution information to + ``exec_info``. Note that ``create_proxy`` is called during symbolic + tracing for each leaf function/method/module. + + Args: + create_proxy (Callable): Original ``create_proxy`` to override. + exec_info (_ExecutionInfo): Used to record execution information. + fqn_to_param (Dict[str, nn.Parameter]): ``dict`` version of the + root module's ``named_parameters()`` with FQN as key and + parameter as value. + kind (str): Kind of the target method ('call_function', + 'call_method', 'get_attr', 'call_module', 'placeholder', or + 'output'). See :class:`torch.fx.Graph` for details. This is + passed to ``create_proxy``. + target (torch.fx.node.Target): Contains the string name of the + function/method/module. This is passed to ``create_proxy``. + args (Tuple[Any, ...]): Positional arguments for the function/ + method/module. This is passed to ``create_proxy``. + kwargs (Dict[str, Any]): Keyword arguments for the function/method/ + module. This is passed to ``create_proxy`` + name (Optional[str]): An optional string name for the ``Node`` + created in ``create_proxy``. This is passed to + ``create_proxy``. + type_expr (Optional[Any]): An optional type annotation representing + the Python type that the output of the node has. This is passed + to ``create_proxy``. + proxy_factory_fn (Callable[[torch.fx.Node], torch.fx.Proxy]): + An alternative proxy constructor used in ``create_proxy``. This + is passed to ``create_proxy``. + + Returns: + torch.fx.Proxy: Created ``Node`` wrapped in a ``Proxy`` object. + """ + proxy = create_proxy( + kind, target, args, kwargs, name, type_expr, proxy_factory_fn + ) + curr_module = exec_info.curr_module + if kind in ("call_function", "call_method"): + if args is not None: + named_params: List[Tuple[str, nn.Parameter]] = [] + for arg in args: + if ( + isinstance(arg, torch.fx.Proxy) + and arg.node.target in fqn_to_param + ): + param = fqn_to_param[arg.node.target] + named_params.append((arg.node.target, param)) + if param not in exec_info.visited_params: + exec_info.visited_params.add(param) + exec_info.param_forward_order.append(param) + if named_params: + exec_info.module_to_param_usage_infos[curr_module].append( + _ParamUsageInfo(curr_module, named_params) + ) + elif kind == "call_module": + named_params = list(curr_module.named_parameters()) + if named_params: + exec_info.module_to_param_usage_infos[curr_module].append( + _ParamUsageInfo(curr_module, named_params) + ) + for _, param in named_params: + if param not in exec_info.visited_params: + exec_info.visited_params.add(param) + exec_info.param_forward_order.append(param) + return proxy diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..68c38d15daf1f97b54fa3366c2b668c934c7a17d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py @@ -0,0 +1,113 @@ +""" +NOTE: This file must be imported like +``import torch.distributed.fsdp._traversal_utils`` and not like +``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular +imports. For brevity, we may import the file as ``traversal_utils``. +""" + +import collections +from typing import Deque, List, Set, Tuple + +import torch.nn as nn +from torch.distributed._composable.contract import _get_registry +from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state + + +""" +[Note: FSDP State Traversal] +For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel`` +module wrapping a fully sharded module, and for the non-wrapper code path, +``_FSDPState`` is an object that gets embedded on a fully sharded module. +See [Note: Fully Sharded Module] for the definition. + +There are three common traversal idioms: Given a root module, +- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree. +- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the +tree (i.e. those with ``_is_root == True``). +- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree. + +All of these methods must take in the root module (i.e. an ``nn.Module``) and +not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph +traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal. +""" + + +def _composable(module: nn.Module) -> bool: + """ + Returns if ``module`` can compose with ``fully_shard``. + """ + # TODO: Add any other composable APIs that are mutually exclusive. + registry = _get_registry(module) + if registry is None: + return True + return "replicate" not in registry + + +# TODO (awgu): We may be able to remove this function if we retired the +# `use_orig_params=False` code path since so far we only need the module for +# `FlatParameter` registration, which is not needed for `use_orig_params=True`. +def _get_fsdp_states_with_modules( + module: nn.Module, +) -> Tuple[List[_FSDPState], List[nn.Module]]: + """ + Returns a tuple containing: + 1. A list of the ``_FSDPState`` instances in the module tree rooted at + ``module`` without any duplicates and following the ``module.modules()`` + traversal order (which is assumed to be depth-first). + 2. A corresponding list of the modules owning the states in the first list. + + For the wrapper code path, both returned lists are the same, each + containing all ``FullyShardedDataParallel`` instances. For the composable + code path, this returns a list of all composable state instances and a list + of the corresponding fully sharded modules. See [Note: Fully Sharded + Module]. + + NOTE: The traversal does not proceed into any module annotated by an + incompatible API (e.g. ``replicate``). + """ + fsdp_states: List[_FSDPState] = [] + fsdp_modules: List[nn.Module] = [] + # Track the visited FSDP states since multiple modules may share the same + # one and we want to return a de-duplicated list + visited_fsdp_states: Set[_FSDPState] = set() + # Track the visited modules in case of shared modules, which implies the + # module graph is no longer a tree + visited_modules: Set[nn.Module] = set() + + # Perform depth-first search from `module` to ensure that we do not + # traverse into an incompatible API's subtree (use DFS instead of BFS to + # match `.modules()` order) + deque: Deque[nn.Module] = collections.deque([module]) + while deque: + submodule = deque.popleft() + visited_modules.add(submodule) + if not _composable(submodule): + continue + for child_module in reversed(list(submodule.children())): + if child_module not in visited_modules: + deque.appendleft(child_module) + optional_state = _get_module_fsdp_state(submodule) + if optional_state is not None and optional_state not in visited_fsdp_states: + visited_fsdp_states.add(optional_state) + fsdp_states.append(optional_state) + fsdp_modules.append(submodule) + return fsdp_states, fsdp_modules + + +def _get_fsdp_states(module: nn.Module) -> List[_FSDPState]: + """See :func:`_get_fsdp_states_with_modules`.""" + fsdp_states, _ = _get_fsdp_states_with_modules(module) + return fsdp_states + + +def _get_fsdp_handles(module: nn.Module) -> List: + """ + Returns all ``FlatParamHandle`` s in the module tree rooted at ``module`` + following the rules in :func:`_get_fsdp_state`. + """ + handles = [ + fsdp_state._handle + for fsdp_state in _get_fsdp_states(module) + if fsdp_state._handle is not None + ] + return handles diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..afa6cebe4abe8d9174896c88959409cb64941111 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py @@ -0,0 +1,357 @@ +import contextlib +import warnings +from typing import cast, Generator + +import torch +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed.fsdp._common_utils import ( + _FSDPState, + _has_fsdp_params, + _module_handle, + HandleTrainingState, + TrainingState, +) +from torch.distributed.fsdp._runtime_utils import ( + _get_fsdp_root_states_with_modules, + _lazy_init, + _reset_flat_param_grad_info_if_needed, + _reshard, + _reshard_grads, + _unshard, + _unshard_grads, +) +from torch.distributed.utils import _p_assert + +from ._flat_param import FlatParamHandle + +FLAT_PARAM = "_flat_param" + + +@torch.no_grad() +def _writeback_to_local_shard( + handle: FlatParamHandle, + writeback_grad: bool, +): + """ + For the handle, writes back the this rank's shard of the unsharded + flattened parameter to the sharded flattened parameter. If + ``writeback_grad=True``, then writes back to the sharded gradient as + well. + + Precondition: The handle's ``FlatParameter`` 's data points to the + padded unsharded flattened parameter. + """ + + def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor: + if handle.uses_sharded_strategy: + # For sharded strategies, get the *unpadded* shard instead of + # the *padded* shard to persist user changes to the padding + # (though FSDP does not explicitly support this) + shard, _ = FlatParamHandle._get_unpadded_shard( + flat_param_or_grad, + handle.rank, + handle.world_size, + ) + return shard + # For `NO_SHARD`, the `flat_param` or its gradient may be modified, + # so we write it back directly + return flat_param_or_grad + + param_shard = _get_shard(handle.flat_param) + handle.flat_param._local_shard[: param_shard.numel()].copy_(param_shard) # type: ignore[attr-defined] + if writeback_grad: + existing_grad = handle.sharded_grad + if existing_grad is not None: + assert handle.flat_param.grad is not None + grad_shard = _get_shard(handle.flat_param.grad) + existing_grad[: grad_shard.numel()].copy_(grad_shard) + + +def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None: + """ + De-registers the flattened parameter from the wrapped module, hiding it + from ``nn.Module`` methods. + + We do not use ``del`` because we want ``FLAT_PARAM`` to always be an + attribute but dynamically change whether it is visible to ``nn.Module`` + methods. + """ + if _has_fsdp_params(state, module): + # TODO: figure out the case for the composable APIs. + cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None) + + +def _register_flat_param(state: _FSDPState, module: nn.Module) -> None: + """ + Registers the flattened parameter to the wrapped module, making it + visible to ``nn.Module`` methods. + + We do not use :meth:`nn.Module.register_parameter` because we want + ``FLAT_PARAM`` to always be an attribute but dynamically change whether + it is visible to ``nn.Module`` methods. + """ + handle = _module_handle(state, module) + if _has_fsdp_params(state, module): + # TODO: figure out the case for the composable APIs. + cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param + + +@contextlib.contextmanager +def _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator: + """ + Assumes that the flattened parameter is unsharded. When in the context, + de-registers the flattened parameter and unflattens the original + parameters as ``nn.Parameter`` views into the flattened parameter. + After the context, re-registers the flattened parameter and restores + the original parameters as ``Tensor`` views into the flattened + parameter. + """ + handle = _module_handle(state, module) + if not handle: + yield + else: + _deregister_flat_param(state, module) + try: + with handle.unflatten_as_params(): + yield + finally: + if not handle._use_orig_params: + _register_flat_param(state, module) + + +def _validate_unshard_params_args( + state: _FSDPState, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +) -> None: + if with_grads and (offload_to_cpu or not state._use_orig_params): + raise NotImplementedError( + f"with_grads={with_grads}, " + f"use_orig_params={state._use_orig_params}, " + f"offload_to_cpu={offload_to_cpu} " + f"is not supported yet" + ) + if offload_to_cpu and state._handle and (not state._handle.uses_sharded_strategy): + raise NotImplementedError( + "offload_to_cpu=True and NO_SHARD is not supported yet" + ) + if writeback and rank0_only: + # TODO: Rank 0 can broadcast the `FlatParameter` to allow all ranks to + # persist the changes. + raise NotImplementedError( + "writeback=True and rank0_only=True is not supported yet" + ) + if offload_to_cpu and not rank0_only: + warnings.warn( + "offload_to_cpu=True and rank0_only=False may result in the" + "unsharded parameters being redundantly copied to CPU memory for " + "GPUs sharing the same CPU memory, which risks CPU OOM. We " + "recommend using offload_to_cpu=True with rank0_only=True." + ) + + +@contextlib.contextmanager +def _unshard_fsdp_state_params( + module: nn.Module, + state: _FSDPState, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + """ + This unshards the parameters for a single FSDP state ``state`` that + corresponds to ``module``. + """ + _validate_unshard_params_args( + state, writeback, rank0_only, offload_to_cpu, with_grads + ) + state._device_handle.synchronize() + # If handles are shared by other module(s), the handle may be already unsharded. + maybe_handle = _module_handle(state, module) + handle = None + if ( + maybe_handle + and maybe_handle._training_state != HandleTrainingState.SUMMON_FULL_PARAMS + ): + handle = maybe_handle + if not handle: + yield + return + + assert ( + handle._training_state == HandleTrainingState.IDLE + ), f"Expects the handle training to be IDLE but got {handle._training_state}" + + handle._training_state = HandleTrainingState.SUMMON_FULL_PARAMS + + _reset_flat_param_grad_info_if_needed(handle) + free_unsharded_flat_param = handle.needs_unshard() + # No need to call `wait_stream()` since we unshard in the computation + # stream directly + computation_stream = state._device_handle.current_stream() + _unshard(state, handle, computation_stream, computation_stream) + if with_grads: + _unshard_grads(handle) + + if rank0_only and state.rank != 0: + # Free the unsharded flattened parameter early + _reshard(state, handle, free_unsharded_flat_param) + if with_grads: + _reshard_grads(handle) + try: + yield + finally: + handle._training_state = HandleTrainingState.IDLE + else: + # Unflatten the unsharded flattened parameters + with contextlib.ExitStack() as stack: + # Invariant: rank == 0 or !rank0_only + if offload_to_cpu and handle.uses_sharded_strategy: + stack.enter_context(handle.to_cpu()) + # NOTE: Since PyTorch enforces that a parameter and its + # gradients need to match metadata (e.g. device), we must + # move gradients to CPU *after* we move parameters. + # NOTE: This assumes 1 `FlatParameter` + if not state._use_orig_params: + stack.enter_context(_unflatten_as_params(state, module)) + try: + yield + finally: + stack.close() + if writeback: + _writeback_to_local_shard(handle, with_grads) + _reshard(state, handle, free_unsharded_flat_param) + if with_grads: + _reshard_grads(handle) + handle._training_state = HandleTrainingState.IDLE + + +@contextlib.contextmanager +def _unshard_params_recurse( + module: nn.Module, + state: _FSDPState, + recurse: bool, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + """ + This is a helper for :func:`_unshard_params` that recursively calls + :func:`_unshard_fsdp_state_params` on FSDP states if ``recurse=True``. + NOTE: This runs lazy initialization. + """ + _validate_unshard_params_args( + state, writeback, rank0_only, offload_to_cpu, with_grads + ) + if recurse: + with contextlib.ExitStack() as stack: + # TODO (awgu): The traversal function does not traverse through + # incompatible composable APIs. Verify if this is the desired + # behavior for this function. + for state, fsdp_module in zip( + *traversal_utils._get_fsdp_states_with_modules(module) + ): + stack.enter_context( + _unshard_params_recurse( + module=fsdp_module, + state=state, + recurse=False, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ) + ) + yield + return + _lazy_init(state, module) + if state.training_state == TrainingState.FORWARD_BACKWARD: + raise AssertionError( + "Cannot manually unshard parameters during forward/backward" + ) + elif state.training_state == TrainingState.SUMMON_FULL_PARAMS: + raise AssertionError( + "Cannot manually unshard parameters when already unsharding parameters" + ) + with _unshard_fsdp_state_params( + module=module, + state=state, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ): + try: + state.training_state = TrainingState.SUMMON_FULL_PARAMS + yield + finally: + state.training_state = TrainingState.IDLE + + +@contextlib.contextmanager +def _unshard_params( + module: nn.Module, + recurse: bool, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + """ + This unshards FSDP-managed parameters for all modules with FSDP applied in + the module tree rooted at ``module``. + """ + root_fsdp_states, root_fsdp_modules = _get_fsdp_root_states_with_modules(module) + with contextlib.ExitStack() as stack: + for root_fsdp_state, root_fsdp_module in zip( + root_fsdp_states, root_fsdp_modules + ): + stack.enter_context( + _unshard_params_recurse( + module=root_fsdp_module, + state=root_fsdp_state, + recurse=recurse, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ) + ) + yield + return + + +def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None: + """ + Deregisters the original parameters; registers the ``FlatParameter``. + """ + handle = _module_handle(state, module) + if not handle: + return + _p_assert( + handle._use_orig_params, + f"Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} " + f"handle: {handle._use_orig_params}", + ) + handle._deregister_orig_params() + _register_flat_param(state, module) + + +def _register_orig_params(state: _FSDPState, module: nn.Module) -> None: + """ + Deregisters the ``FlatParameter``; registers the original parameters. + """ + handle = _module_handle(state, module) + if not handle: + return + _deregister_flat_param(state, module) + if handle.is_sharded(handle.flat_param): + handle._use_sharded_views() + handle._use_sharded_grad_views() + else: + handle._use_unsharded_views(as_params=True) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..16f521f65b8d95ccbb719ebdfa916ea1b6efbedc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py @@ -0,0 +1,262 @@ +import collections +import functools +import inspect +import warnings +from functools import partial +from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union + +import torch.nn as nn +from torch.distributed.fsdp._common_utils import ( + _get_module_fsdp_state, + _override_module_mixed_precision, +) + +from torch.distributed.fsdp.wrap import ( + _construct_wrap_fn, + _or_policy, + _Policy, + _post_order_apply, + _recursive_wrap, + _run_mixed_precision_override_policy, + _wrap_module_cls_individually, +) + + +def _auto_wrap( + root_module: nn.Module, + policy: Union[Callable, _Policy], + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + root_kwargs: Dict[str, Any], + fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard` +): + """ + Auto wraps modules in ``root_module`` 's tree according to ``policy`` + following a post-order traversal. + + Precondition: ``root_kwargs`` should contain all arguments except + ``module``. This function accepts the kwargs dict directly since it gets + forwarded into the post-order traversal function. + """ + mixed_precision = root_kwargs["mixed_precision"] + is_wrapper = inspect.isclass(fsdp_fn) + # TODO: We may relax this no-nested-wrapping constraint to support manual + # wrapping followed by auto wrapping. + _check_nested_wrapping(root_module) + + if isinstance(policy, _Policy): + root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None + target_module_to_kwargs = policy._run_policy( + root_module, ignored_modules, root_kwargs + ) + if mixed_precision is not None: + target_module_to_kwargs = _run_mixed_precision_override_policy( + root_module, + mixed_precision._module_classes_to_ignore, + ignored_modules, + root_kwargs, + target_module_to_kwargs, + ) + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + _warn_on_overridden_mixed_precision(overridden_module_classes) + use_orig_params = root_kwargs.get("use_orig_params", False) + _validate_frozen_params( + root_module, + set(target_module_to_kwargs.keys()), + ignored_params, + use_orig_params, + ) + wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn) + _post_order_apply(root_module, wrap_fn) + return + + recursive_wrap_kwargs = { + "module": root_module, + "auto_wrap_policy": policy, + "wrapper_cls": fsdp_fn, + "ignored_modules": ignored_modules, + "ignored_params": ignored_params, + "only_wrap_children": True, + } + if mixed_precision is not None: + # Wrap modules of the ignored types separately and register forward + # hooks to cast to fp32 and back to the original dtype, respectively + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + policy = functools.partial( + _or_policy, + policies=[ + policy, + partial( + _wrap_module_cls_individually, + module_classes=mixed_precision._module_classes_to_ignore, + ), + ], + ) + recursive_wrap_kwargs["auto_wrap_policy"] = policy + _warn_on_overridden_mixed_precision(overridden_module_classes) + _recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type] + + +def _check_nested_wrapping(root_module: nn.Module): + for module_name, module in root_module.named_modules(): + if _get_module_fsdp_state(module) is not None: + raise ValueError( + "FSDP auto wrapping requires modules to not already have " + f"FSDP applied but found {module_name} in\n{root_module}" + ) + + +def _warn_on_overridden_mixed_precision( + overridden_module_classes: Set[Type[nn.Module]], +): + if len(overridden_module_classes) == 0: + return + warnings.warn( + "Both mixed precision and an auto_wrap_policy were specified to FSDP, " + f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n" + "These modules will be wrapped as separate FSDP instacnes with mixed " + "precision disabled." + ) + + +def _validate_frozen_params( + root_module: nn.Module, + modules_to_wrap: Set[nn.Module], + ignored_params: Set[nn.Parameter], + use_orig_params: bool, +): + """ + This checks that, given ``modules_to_wrap``, each module would manage + parameters that are uniformly frozen or non-frozen. This uniformity + requirement is strict for ``use_orig_params=False`` (hard error) and highly + recommended for ``use_orig_params=True`` (user warning). + """ + post_order_named_modules = _get_post_order_named_modules(root_module) + visited_modules: Set[nn.Module] = set() + for module_name, module in post_order_named_modules: + if module in modules_to_wrap: + param_to_fqn = _get_managed_param_to_fqn( + module, ignored_params, visited_modules, module_name + ) + frozen_param_fqns: List[str] = [] + frozen_param_numel = 0 + nonfrozen_param_fqns: List[str] = [] + nonfrozen_param_numel = 0 + for param, fqn in param_to_fqn.items(): + if param.requires_grad: + nonfrozen_param_fqns.append(fqn) + nonfrozen_param_numel += param.numel() + else: + frozen_param_fqns.append(fqn) + frozen_param_numel += param.numel() + if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0: + msg = f"{module_name} has both parameters with requires_grad=True and False." + if use_orig_params: + total_param_numel = frozen_param_numel + nonfrozen_param_numel + msg += ( + " We do not recommend wrapping such modules since " + "the gradient memory usage will be higher than expected " + f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel " + "before sharding via reduce-scatter). " + ) + else: + msg += " FSDP does not support wrapping such modules when use_orig_params=False. " + msg += "If possible, wrap the frozen parameters with FSDP separately.\n" + msg += ( + f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n" + f"The following parameters have requires_grad=False:\n{frozen_param_fqns}" + ) + if use_orig_params: + warnings.warn(msg) + else: + raise ValueError(msg) + + +def _get_post_order_named_modules( + root_module: nn.Module, +) -> List[Tuple[str, nn.Module]]: + """ + This returns the named modules following a post-order traversal, which is a + valid reverse topological sort. We achieve this using the reverse of a + stack-based DFS order instead of reversing ``root_module.named_modules()`` + since the former gives the modules in registration order at each level in + the module tree (as opposed to the reverse), which allows us to error/warn + on the first registered module that violates the condition. + + For example, consider the following module structure: + M( + S1(), + S2( + SS1(), + SS2(), + ), + S3(), + ) + The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse + ``named_modules()`` order is [S3, SS2, SS1, S2, S1, M]. + """ + visited_modules = {root_module} + stack = [("", root_module)] + # Append and reverse at the end for linear-time algorithm + reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = [] + while stack: + module_name, module = stack.pop() + reverse_post_order_named_modules.append((module_name, module)) + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + if module_name != "": + child_module_name = module_name + "." + child_module_name + stack.append((child_module_name, child_module)) + post_order_named_modules = list(reversed(reverse_post_order_named_modules)) + return post_order_named_modules + + +def _get_managed_param_to_fqn( + module_to_wrap: nn.Module, + ignored_params: Set[nn.Parameter], + visited_modules: Set[nn.Module], + root_prefix: str, +) -> Dict[nn.Parameter, str]: + """ + This returns a dict that maps managed parameter to its FQN for the given + ``module_to_wrap``. The dict's keys are exactly the parameters that would + be managed by the module, where this is achieved by calling this function + on the modules to wrap in reverse topological order, destructively updating + ``visited_modules``, and not traversing into those modules. The FQNs are + prefixed from the root (via ``root_prefix``) to be more informative. + + NOTE: This function is meant to be called pre-wrapping and iteratively in + reverse topological order to cover the full module tree. This differs from + the ``_get_param_to_fqn()`` function meant to be called post-wrapping and + on the full module tree in one shot. Given those differences, we do not try + to unify the two. + """ + param_to_fqn: Dict[nn.Parameter, str] = {} + # Run BFS (or any tree traversal works) + queue = collections.deque([(module_to_wrap, root_prefix)]) + visited_modules.add(module_to_wrap) + while queue: + module, prefix = queue.popleft() + for param_name, param in module.named_parameters(recurse=False): + if param not in ignored_params: + fqn = param_name if prefix == "" else prefix + "." + param_name + param_to_fqn[param] = fqn + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + child_prefix = ( + child_module_name + if prefix == "" + else prefix + "." + child_module_name + ) + queue.append((child_module, child_prefix)) + return param_to_fqn diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/api.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/api.py new file mode 100644 index 0000000000000000000000000000000000000000..0272ee0c57c9fce3b182738d47763b4585883c0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/api.py @@ -0,0 +1,410 @@ +""" +This file includes public APIs for FSDP such as the classes used for the +constructor arguments. +""" + +from dataclasses import dataclass +from enum import auto, Enum + +from typing import Optional, Sequence, Type + +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +__all__ = [ + "ShardingStrategy", + "BackwardPrefetch", + "MixedPrecision", + "CPUOffload", + "StateDictType", + "StateDictConfig", + "FullStateDictConfig", + "LocalStateDictConfig", + "ShardedStateDictConfig", + "OptimStateDictConfig", + "FullOptimStateDictConfig", + "LocalOptimStateDictConfig", + "ShardedOptimStateDictConfig", + "StateDictSettings", +] + + +class ShardingStrategy(Enum): + """ + This specifies the sharding strategy to be used for distributed training by + :class:`FullyShardedDataParallel`. + + - ``FULL_SHARD``: Parameters, gradients, and optimizer states are sharded. + For the parameters, this strategy unshards (via all-gather) before the + forward, reshards after the forward, unshards before the backward + computation, and reshards after the backward computation. For gradients, + it synchronizes and shards them (via reduce-scatter) after the backward + computation. The sharded optimizer states are updated locally per rank. + - ``SHARD_GRAD_OP``: Gradients and optimizer states are sharded during + computation, and additionally, parameters are sharded outside + computation. For the parameters, this strategy unshards before the + forward, does not reshard them after the forward, and only reshards them + after the backward computation. The sharded optimizer states are updated + locally per rank. Inside ``no_sync()``, the parameters are not resharded + after the backward computation. + - ``NO_SHARD``: Parameters, gradients, and optimizer states are not sharded + but instead replicated across ranks similar to PyTorch's + :class:`DistributedDataParallel` API. For gradients, this strategy + synchronizes them (via all-reduce) after the backward computation. The + unsharded optimizer states are updated locally per rank. + - ``HYBRID_SHARD``: Apply ``FULL_SHARD`` within a node, and replicate parameters across + nodes. This results in reduced communication volume as expensive all-gathers and + reduce-scatters are only done within a node, which can be more performant for medium + -sized models. + - ``_HYBRID_SHARD_ZERO2``: Apply ``SHARD_GRAD_OP`` within a node, and replicate parameters across + nodes. This is like ``HYBRID_SHARD``, except this may provide even higher throughput + since the unsharded parameters are not freed after the forward pass, saving the + all-gathers in the pre-backward. + """ + + FULL_SHARD = auto() + SHARD_GRAD_OP = auto() + NO_SHARD = auto() + HYBRID_SHARD = auto() + _HYBRID_SHARD_ZERO2 = auto() + + +class BackwardPrefetch(Enum): + """ + This configures explicit backward prefetching, which improves throughput by + enabling communication and computation overlap in the backward pass at the + cost of slightly increased memory usage. + + - ``BACKWARD_PRE``: This enables the most overlap but increases memory + usage the most. This prefetches the next set of parameters *before* the + current set of parameters' gradient computation. This overlaps the *next + all-gather* and the *current gradient computation*, and at the peak, it + holds the current set of parameters, next set of parameters, and current + set of gradients in memory. + - ``BACKWARD_POST``: This enables less overlap but requires less memory + usage. This prefetches the next set of parameters *after* the current + set of parameters' gradient computation. This overlaps the *current + reduce-scatter* and the *next gradient computation*, and it frees the + current set of parameters before allocating memory for the next set of + parameters, only holding the next set of parameters and current set of + gradients in memory at the peak. + - FSDP's ``backward_prefetch`` argument accepts ``None``, which disables + the backward prefetching altogether. This has no overlap and does not + increase memory usage. In general, we do not recommend this setting since + it may degrade throughput significantly. + + For more technical context: For a single process group using NCCL backend, + any collectives, even if issued from different streams, contend for the + same per-device NCCL stream, which implies that the relative order in which + the collectives are issued matters for overlapping. The two backward + prefetching values correspond to different issue orders. + """ + + # NOTE: For both modes, the ordering that defines "current" and "next" is + # not always exact in the current implementation. A mistargeted prefetch + # simply means that the parameter memory is allocated earlier than needed, + # possibly increasing peak memory usage, but does not affect correctness. + BACKWARD_PRE = auto() + BACKWARD_POST = auto() + + +@dataclass +class MixedPrecision: + """ + This configures FSDP-native mixed precision training. + + Attributes: + param_dtype (Optional[torch.dtype]): This specifies the dtype for model + parameters during forward and backward and thus the dtype for + forward and backward computation. Outside forward and backward, the + *sharded* parameters are kept in full precision (e.g. for the + optimizer step), and for model checkpointing, the parameters are + always saved in full precision. (Default: ``None``) + reduce_dtype (Optional[torch.dtype]): This specifies the dtype for + gradient reduction (i.e. reduce-scatter or all-reduce). If this is + ``None`` but ``param_dtype`` is not ``None``, then this takes on + the ``param_dtype`` value, still running gradient reduction in low + precision. This is permitted to differ from ``param_dtype``, e.g. + to force gradient reduction to run in full precision. (Default: + ``None``) + buffer_dtype (Optional[torch.dtype]): This specifies the dtype for + buffers. FSDP does not shard buffers. Rather, FSDP casts them to + ``buffer_dtype`` in the first forward pass and keeps them in that + dtype thereafter. For model checkpointing, the buffers are saved + in full precision except for ``LOCAL_STATE_DICT``. (Default: + ``None``) + keep_low_precision_grads (bool): If ``False``, then FSDP upcasts + gradients to full precision after the backward pass in preparation + for the optimizer step. If ``True``, then FSDP keeps the gradients + in the dtype used for gradient reduction, which can save memory if + using a custom optimizer that supports running in low precision. + (Default: ``False``) + cast_forward_inputs (bool): If ``True``, then this FSDP module casts + its forward args and kwargs to ``param_dtype``. This is to ensure + that parameter and input dtypes match for forward computation, as + required by many ops. This may need to be set to ``True`` when only + applying mixed precision to some but not all FSDP modules, in which + case a mixed-precision FSDP submodule needs to recast its inputs. + (Default: ``False``) + cast_root_forward_inputs (bool): If ``True``, then the root FSDP module + casts its forward args and kwargs to ``param_dtype``, overriding + the value of ``cast_forward_inputs``. For non-root FSDP modules, + this does not do anything. (Default: ``True``) + _module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies + module classes to ignore for mixed precision when using an + ``auto_wrap_policy``: Modules of these classes will have FSDP + applied to them separately with mixed precision disabled (meaning + that the final FSDP construction would deviate from the specified + policy). If ``auto_wrap_policy`` is not specified, then this does + not do anything. This API is experimental and subject to change. + (Default: ``(_BatchNorm,)``) + + .. note:: This API is experimental and subject to change. + + .. note:: Only floating point tensors are cast to their specified dtypes. + + .. note:: In ``summon_full_params``, parameters are forced to full + precision, but buffers are not. + + .. note:: Layer norm and batch norm accumulate in ``float32`` even when + their inputs are in a low precision like ``float16`` or ``bfloat16``. + Disabling FSDP's mixed precision for those norm modules only means that + the affine parameters are kept in ``float32``. However, this incurs + separate all-gathers and reduce-scatters for those norm modules, which + may be inefficient, so if the workload permits, the user should prefer + to still apply mixed precision to those modules. + + .. note:: By default, if the user passes a model with any ``_BatchNorm`` + modules and specifies an ``auto_wrap_policy``, then the batch norm + modules will have FSDP applied to them separately with mixed precision + disabled. See the ``_module_classes_to_ignore`` argument. + + .. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and + ``cast_forward_inputs=False`` by default. For the root FSDP instance, + its ``cast_root_forward_inputs`` takes precedence over its + ``cast_forward_inputs``. For non-root FSDP instances, their + ``cast_root_forward_inputs`` values are ignored. The default setting is + sufficient for the typical case where each FSDP instance has the same + ``MixedPrecision`` configuration and only needs to cast inputs to the + ``param_dtype`` at the beginning of the model's forward pass. + + .. note:: For nested FSDP instances with different ``MixedPrecision`` + configurations, we recommend setting individual ``cast_forward_inputs`` + values to configure casting inputs or not before each instance's + forward. In such a case, since the casts happen before each FSDP + instance's forward, a parent FSDP instance should have its non-FSDP + submodules run before its FSDP submodules to avoid the activation dtype + being changed due to a different ``MixedPrecision`` configuration. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3)) + >>> model[1] = FSDP( + >>> model[1], + >>> mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True), + >>> ) + >>> model = FSDP( + >>> model, + >>> mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True), + >>> ) + + The above shows a working example. On the other hand, if ``model[1]`` + were replaced with ``model[0]``, meaning that the submodule using + different ``MixedPrecision`` ran its forward first, then ``model[1]`` + would incorrectly see ``float16`` activations instead of ``bfloat16`` + ones. + + """ + + param_dtype: Optional[torch.dtype] = None + reduce_dtype: Optional[torch.dtype] = None + buffer_dtype: Optional[torch.dtype] = None + keep_low_precision_grads: bool = False + cast_forward_inputs: bool = False + cast_root_forward_inputs: bool = True + _module_classes_to_ignore: Sequence[Type[torch.nn.Module]] = (_BatchNorm,) + + +@dataclass +class CPUOffload: + """ + This configures CPU offloading. + + Attributes: + offload_params (bool): This specifies whether to offload parameters to + CPU when not involved in computation. If ``True``, then this + offloads gradients to CPU as well, meaning that the optimizer step + runs on CPU. + """ + + offload_params: bool = False + + +class StateDictType(Enum): + """ + This enum indicates that which type of ``state_dict`` the FSDP module is + currently processing (returning or loading). + The default value is FULL_STATE_DICT to comply the PyTorch convention. + ..note:: + FSDP currently supports three types of ``state_dict``: + 1. ``state_dict/load_state_dict`: this pair of APIs return and load + the non-sharded, unflattened parameters. The semantics is the + same as using DDP. + 2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return + and load local sharded, flattened parameters. The values returned + by ``_local_state_dict`` can be directly used by FSDP and is only + meaningful to FSDP (because parameters are flattened). Note that + these APIs are meant for use via the :func:`state_dict_type` + context manager as follows: + >>> # xdoctest: +SKIP("undefined variables") + >>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT): + ... state = fsdp.state_dict() # loads local state dict + 3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs + return and load sharded, unflattened parameters. The ``state_dict`` + return by ``sharded_state_dict`` can be used by all other parallel + schemes (resharding may be required). + """ + + FULL_STATE_DICT = auto() + LOCAL_STATE_DICT = auto() + SHARDED_STATE_DICT = auto() + + +@dataclass +class StateDictConfig: + """ + ``StateDictConfig`` is the base class for all ``state_dict`` configuration + classes. Users should instantiate a child class (e.g. + ``FullStateDictConfig``) in order to configure settings for the + corresponding ``state_dict`` type supported by FSDP. + + Attributes: + offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict + values to CPU, and if ``False``, then FSDP keeps them on GPU. + (Default: ``False``) + """ + + offload_to_cpu: bool = False + + +@dataclass +class FullStateDictConfig(StateDictConfig): + """ + ``FullStateDictConfig`` is a config class meant to be used with + ``StateDictType.FULL_STATE_DICT``. We recommend enabling both + ``offload_to_cpu=True`` and ``rank0_only=True`` when saving full state + dicts to save GPU memory and CPU memory, respectively. This config class + is meant to be used via the :func:`state_dict_type` context manager as + follows: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> fsdp = FSDP(model, auto_wrap_policy=...) + >>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + >>> with FSDP.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg): + >>> state = fsdp.state_dict() + >>> # `state` will be empty on non rank 0 and contain CPU tensors on rank 0. + >>> # To reload checkpoint for inference, finetuning, transfer learning, etc: + >>> model = model_fn() # Initialize model in preparation for wrapping with FSDP + >>> if dist.get_rank() == 0: + >>> # Load checkpoint only on rank 0 to avoid memory redundancy + >>> state_dict = torch.load("my_checkpoint.pt") + >>> model.load_state_dict(state_dict) + >>> # All ranks initialize FSDP module as usual. `sync_module_states` argument + >>> # communicates loaded checkpoint states from rank 0 to rest of the world. + >>> fsdp = FSDP(model, device_id=torch.cuda.current_device(), auto_wrap_policy=..., sync_module_states=True) + >>> # After this point, all ranks have FSDP model with loaded checkpoint. + + Attributes: + rank0_only (bool): If ``True``, then only rank 0 saves the full state + dict, and nonzero ranks save an empty dict. If ``False``, then all + ranks save the full state dict. (Default: ``False``) + """ + + rank0_only: bool = False + + +@dataclass +class LocalStateDictConfig(StateDictConfig): + pass + + +@dataclass +class ShardedStateDictConfig(StateDictConfig): + """ + ``ShardedStateDictConfig`` is a config class meant to be used with + ``StateDictType.SHARDED_STATE_DICT``. + + Attributes: + _use_dtensor (bool): If ``True``, then FSDP saves the state dict values + as ``DTensor``, and if ``False``, then FSDP saves them as + ``ShardedTensor``. (Default: ``False``) + + .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig` + and it is used by FSDP to determine the type of state dict values. Users should not + manually modify ``_use_dtensor``. + """ + + _use_dtensor: bool = False + + +@dataclass +class OptimStateDictConfig: + """ + ``OptimStateDictConfig`` is the base class for all ``optim_state_dict`` + configuration classes. Users should instantiate a child class (e.g. + ``FullOptimStateDictConfig``) in order to configure settings for the + corresponding ``optim_state_dict`` type supported by FSDP. + + Attributes: + offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict's + tensor values to CPU, and if ``False``, then FSDP keeps them on the + original device (which is GPU unless parameter CPU offloading is + enabled). (Default: ``True``) + """ + + offload_to_cpu: bool = True + + +@dataclass +class FullOptimStateDictConfig(OptimStateDictConfig): + """ + Attributes: + rank0_only (bool): If ``True``, then only rank 0 saves the full state + dict, and nonzero ranks save an empty dict. If ``False``, then all + ranks save the full state dict. (Default: ``False``) + """ + + rank0_only: bool = False + + +@dataclass +class LocalOptimStateDictConfig(OptimStateDictConfig): + offload_to_cpu: bool = False + + +@dataclass +class ShardedOptimStateDictConfig(OptimStateDictConfig): + """ + ``ShardedOptimStateDictConfig`` is a config class meant to be used with + ``StateDictType.SHARDED_STATE_DICT``. + + Attributes: + _use_dtensor (bool): If ``True``, then FSDP saves the state dict values + as ``DTensor``, and if ``False``, then FSDP saves them as + ``ShardedTensor``. (Default: ``False``) + + .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedOptimStateDictConfig` + and it is used by FSDP to determine the type of state dict values. Users should not + manually modify ``_use_dtensor``. + """ + + _use_dtensor: bool = False + + +@dataclass +class StateDictSettings: + state_dict_type: StateDictType + state_dict_config: StateDictConfig + optim_state_dict_config: OptimStateDictConfig diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..8152438a9c77cd9c389c9ee5f71803f30beb06e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py @@ -0,0 +1,2075 @@ +# mypy: ignore-errors + +import contextlib +import copy +import functools +import math +import traceback +import warnings +from contextlib import contextmanager +from enum import auto, Enum +from typing import ( + Any, + Callable, + Dict, + Generator, + Iterable, + Iterator, + List, + Optional, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed._tensor import DeviceMesh +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + _CHECKPOINT_WRAPPED_MODULE, + ActivationWrapper, +) +from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS +from torch.distributed.fsdp._common_utils import ( + _FSDPState, + _get_param_to_fqns, + FSDP_PREFIX, + FSDP_WRAPPED_MODULE, + TrainingState, +) +from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo +from torch.distributed.fsdp._init_utils import ( + _check_orig_params_flattened, + _init_buffer_state, + _init_core_state, + _init_device_handle, + _init_extension, + _init_ignored_module_states, + _init_param_handle_from_module, + _init_prefetching_state, + _init_process_group_state, + _init_runtime_state, + _init_state_dict_state, + HYBRID_SHARDING_STRATEGIES, + ProcessGroupType, +) +from torch.distributed.fsdp._runtime_utils import ( + _get_fsdp_root_states, + _is_fsdp_root, + _lazy_init, + _post_forward, + _post_forward_reshard, + _pre_forward, + _pre_forward_unshard, + _root_pre_forward, +) +from torch.distributed.fsdp._wrap_utils import _auto_wrap +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + FullOptimStateDictConfig, + FullStateDictConfig, + LocalOptimStateDictConfig, + LocalStateDictConfig, + MixedPrecision, + OptimStateDictConfig, + ShardedOptimStateDictConfig, + ShardedStateDictConfig, + ShardingStrategy, + StateDictConfig, + StateDictSettings, + StateDictType, +) +from torch.distributed.utils import _p_assert +from ._flat_param import FlatParameter + +from ._optim_utils import ( + _flatten_optim_state_dict, + _get_param_id_to_param_from_optim_input, + _get_param_key_to_param, + _get_param_to_param_id_from_optim_input, + _get_param_to_param_key, + _optim_state_dict, + _rekey_sharded_optim_state_dict, + _set_optim_use_dtensor, +) +from ._state_dict_utils import _register_all_state_dict_hooks +from ._unshard_param_utils import ( + _deregister_orig_params, + _register_flat_param, + _register_orig_params, + _unshard_params, + _unshard_params_recurse, +) +from .wrap import CustomPolicy, ModuleWrapPolicy + + +__all__ = [ + "FullyShardedDataParallel", + "OptimStateKeyType", +] + + +FLAT_PARAM = "_flat_param" + + +class OptimStateKeyType(Enum): + """Represents the type of key in an optimizer state-dict.""" + + PARAM_NAME = auto() + PARAM_ID = auto() + + +class FullyShardedDataParallel(nn.Module, _FSDPState): + """A wrapper for sharding module parameters across data parallel workers. + + This is inspired by `Xu et al.`_ as well as the ZeRO Stage 3 from DeepSpeed_. + FullyShardedDataParallel is commonly shortened to FSDP. + + .. _`Xu et al.`: https://arxiv.org/abs/2004.13336 + .. _DeepSpeed: https://www.deepspeed.ai/ + + For advanced notes please refer to :ref:`fsdp_notes`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> torch.cuda.set_device(device_id) + >>> sharded_module = FSDP(my_module) + >>> optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001) + >>> x = sharded_module(x, y=3, z=torch.Tensor([1])) + >>> loss = x.sum() + >>> loss.backward() + >>> optim.step() + + .. warning:: + The optimizer must be initialized *after* the module has been wrapped + with FSDP since FSDP will shard and transform the module's parameters + in a way that may not preserve the original parameter variables. Thus, + the previously initialized optimizer may have stale references to the + parameters. + + .. warning:: + If the destination CUDA device has ID ``dev_id``, either (1) + ``module`` should already be placed on that device, (2) the device + should be set using ``torch.cuda.set_device(dev_id)``, or (3) + ``dev_id`` should be passed into the ``device_id`` constructor + argument. This FSDP instance's compute device will be that destination + device. For (1) and (3), the FSDP initialization always occurs on GPU. + For (2), the FSDP initialization happens on ``module`` 's current + device, which may be CPU. + + .. warning:: + FSDP currently does not support gradient accumulation outside + ``no_sync()`` when using CPU offloading. Trying to do so yields + incorrect results since FSDP will use the newly-reduced gradient + instead of accumulating with any existing gradient. + + .. warning:: + Changing the original parameter variable names after construction will + lead to undefined behavior. + + .. warning:: + Passing in the ``sync_module_states=True`` flag requires ``module`` to + be on GPU or to use the ``device_id`` argument to specify a CUDA device + that FSDP will move ``module`` to in the FSDP constructor. This is + because ``sync_module_states=True`` requires GPU communication. + + .. warning:: + As of PyTorch 1.12, FSDP only offers limited support for shared parameters + (for example, setting one ``Linear`` layer's weight to another's). In + particular, modules that share parameters must be wrapped as part of the + same FSDP unit. If enhanced shared parameter support is needed for your + use case, please ping https://github.com/pytorch/pytorch/issues/77724 + + .. warning:: + FSDP has some constraints on freezing parameters (i.e. setting + ``param.requires_grad=False``). For ``use_orig_params=False``, each + FSDP instance must manage parameters that are all frozen or all + non-frozen. For ``use_orig_params=True``, FSDP supports mixing frozen + and non-frozen, but we recommend not doing so since then the gradient + memory usage will be higher than expected (namely, equivalent to not + freezing those parameters). This means that ideally, frozen parameters + should be isolated into their own ``nn.Module`` s and wrapped + separately with FSDP. + + .. note:: + Attempting to run the forward pass of a submodule that is contained in an + FSDP instance is not supported and will result in errors. This is because the + submodule's parameters will be sharded, but it itself is not an FSDP instance, + so its forward pass will not all-gather the full parameters appropriately. + This could potentially happen when attempting to run only the encoder of a + encoder-decoder model, and the encoder is not wrapped in its own FSDP instance. To + resolve this, please wrap the submodule in its own FSDP unit. + + .. note:: + FSDP moves input tensors to the ``forward`` method to the GPU compute + device, so the user does not need to manually move them from CPU. + + .. warning:: + The user should not modify the parameters between forward and backward + without using the :meth:`summon_full_params` context since the + modifications may not persist. Moreover, for ``use_orig_params=False``, + accessing the original parameters between forward and backward may + raise an illegal memory access. + + .. warning:: + For ``use_orig_params=True``, ``ShardingStrategy.SHARD_GRAD_OP`` + exposes the unsharded parameters, not the sharded parameters, after + forward since it does not free the unsharded ones, unlike + ``ShardingStrategy.FULL_SHARD``. One caveat is that, since gradients + are always sharded or ``None``, ``ShardingStrategy.SHARD_GRAD_OP`` will + not expose the sharded gradients with the unsharded parameters after + forward. If you want to inspect the gradients, try + :meth:`summon_full_params` with ``with_grads=True``. + + .. warning:: + FSDP replaces managed modules' parameters with ``torch.Tensor`` views + during forward and backward computation for autograd-related reasons. + If your module's forward relies on saved references to the parameters + instead of reacquiring the references each iteration, then it will not + see FSDP's newly created views, and autograd will not work correctly. + + .. note:: + With ``limit_all_gathers=True``, you may see a gap in the FSDP + pre-forward where the CPU thread is not issuing any kernels. This is + intentional and shows the rate limiter in effect. Synchronizing the CPU + thread in that way prevents over-allocating memory for subsequent + all-gathers, and it should not actually delay GPU kernel execution. + + .. note:: + When using ``sharding_strategy=ShardingStrategy.HYBRID_SHARD`` with the + sharding process group being intra-node and the replication process + group being inter-node, setting ``NCCL_CROSS_NIC=1`` can help improve + the all-reduce times over the replication process group for some + cluster setups. + + .. warning:: + FSDP does not work with double backwards due to how it registers + backward hooks. + + Args: + module (nn.Module): + This is the module to be wrapped with FSDP. + process_group (Optional[Union[ProcessGroup, Tuple[ProcessGroup, ProcessGroup]]]): + This is the process group over which the model is sharded and thus + the one used for FSDP's all-gather and reduce-scatter collective + communications. If ``None``, then FSDP uses the default process + group. For hybrid sharding strategies such as + ``ShardingStrategy.HYBRID_SHARD``, users can pass in a tuple of + process groups, representing the groups over which to shard and + replicate, respectively. If ``None``, then FSDP constructs process + groups for the user to shard intra-node and replicate inter-node. + (Default: ``None``) + sharding_strategy (Optional[ShardingStrategy]): + This configures the sharding strategy, which may trade off memory + saving and communication overhead. See :class:`ShardingStrategy` + for details. (Default: ``FULL_SHARD``) + cpu_offload (Optional[CPUOffload]): + This configures CPU offloading. If this is set to ``None``, then + no CPU offloading happens. See :class:`CPUOffload` for details. + (Default: ``None``) + auto_wrap_policy (Optional[Union[Callable[[nn.Module, bool, int], bool], ModuleWrapPolicy, CustomPolicy]]): + This specifies a policy to apply FSDP to submodules of ``module``, + which is needed for communication and computation overlap and thus + affects performance. If ``None``, then FSDP only applies to + ``module``, and users should manually apply FSDP to parent modules + themselves (proceeding bottom-up). For convenience, this accepts + ``ModuleWrapPolicy`` directly, which allows users to specify the + module classes to wrap (e.g. the transformer block). Otherwise, + this should be a callable that takes in three arguments + ``module: nn.Module``, ``recurse: bool``, and + ``nonwrapped_numel: int`` and should return a ``bool`` specifying + whether the passed-in ``module`` should have FSDP applied if + ``recurse=False`` or if the traversal should continue into the + module's subtree if ``recurse=True``. Users may add additional + arguments to the callable. The ``size_based_auto_wrap_policy`` in + ``torch.distributed.fsdp.wrap.py`` gives an example callable that + applies FSDP to a module if the parameters in its subtree exceed + 100M numel. We recommend printing the model after applying FSDP + and adjusting as needed. + + Example:: + + >>> def custom_auto_wrap_policy( + >>> module: nn.Module, + >>> recurse: bool, + >>> nonwrapped_numel: int, + >>> # Additional custom arguments + >>> min_num_params: int = int(1e8), + >>> ) -> bool: + >>> return nonwrapped_numel >= min_num_params + >>> # Configure a custom `min_num_params` + >>> my_auto_wrap_policy = functools.partial(custom_auto_wrap_policy, min_num_params=int(1e5)) + + backward_prefetch (Optional[BackwardPrefetch]): + This configures explicit backward prefetching of all-gathers. If + ``None``, then FSDP does not backward prefetch, and there is no + communication and computation overlap in the backward pass. See + :class:`BackwardPrefetch` for details. (Default: ``BACKWARD_PRE``) + mixed_precision (Optional[MixedPrecision]): + This configures native mixed precision for FSDP. If this is set to + ``None``, then no mixed precision is used. Otherwise, parameter, + buffer, and gradient reduction dtypes can be set. See + :class:`MixedPrecision` for details. (Default: ``None``) + ignored_modules (Optional[Iterable[torch.nn.Module]]): Modules whose + own parameters and child modules' parameters and buffers are + ignored by this instance. None of the modules directly in + ``ignored_modules`` should be :class:`FullyShardedDataParallel` + instances, and any child modules that are already-constructed + :class:`FullyShardedDataParallel` instances will not be ignored if + they are nested under this instance. This argument may be used to + avoid sharding specific parameters at module granularity when using an + ``auto_wrap_policy`` or if parameters' sharding is not managed by + FSDP. (Default: ``None``) + param_init_fn (Optional[Callable[[nn.Module], None]]): + A ``Callable[torch.nn.Module] -> None`` that + specifies how modules that are currently on the meta device should + be initialized onto an actual device. As of v1.12, FSDP detects + modules with parameters or buffers on meta device via ``is_meta`` + and either applies ``param_init_fn`` if specified or calls + ``nn.Module.reset_parameters()`` otherwise. For both cases, the + implementation should *only* initialize the parameters/buffers of + the module, not those of its submodules. This is to avoid + re-initialization. In addition, FSDP also supports deferred + initialization via torchdistX's (https://github.com/pytorch/torchdistX) + ``deferred_init()`` API, where the deferred modules are initialized + by calling ``param_init_fn`` if specified or torchdistX's default + ``materialize_module()`` otherwise. If ``param_init_fn`` is + specified, then it is applied to all meta-device modules, meaning + that it should probably case on the module type. FSDP calls the + initialization function before parameter flattening and sharding. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> module = MyModule(device="meta") + >>> def my_init_fn(module: nn.Module): + >>> # E.g. initialize depending on the module type + >>> ... + >>> fsdp_model = FSDP(module, param_init_fn=my_init_fn, auto_wrap_policy=size_based_auto_wrap_policy) + >>> print(next(fsdp_model.parameters()).device) # current CUDA device + >>> # With torchdistX + >>> module = deferred_init.deferred_init(MyModule, device="cuda") + >>> # Will initialize via deferred_init.materialize_module(). + >>> fsdp_model = FSDP(module, auto_wrap_policy=size_based_auto_wrap_policy) + + device_id (Optional[Union[int, torch.device]]): An ``int`` or + ``torch.device`` giving the CUDA device on which FSDP + initialization takes place, including the module initialization + if needed and the parameter sharding. This should be specified to + improve initialization speed if ``module`` is on CPU. If the + default CUDA device was set (e.g. via ``torch.cuda.set_device``), + then the user may pass ``torch.cuda.current_device`` to this. + (Default: ``None``) + sync_module_states (bool): If ``True``, then each FSDP module will + broadcast module parameters and buffers from rank 0 to ensure that + they are replicated across ranks (adding communication overhead to + this constructor). This can help load ``state_dict`` checkpoints + via ``load_state_dict`` in a memory efficient way. See + :class:`FullStateDictConfig` for an example of this. (Default: + ``False``) + forward_prefetch (bool): If ``True``, then FSDP *explicitly* prefetches + the next forward-pass all-gather before the current forward + computation. This is only useful for CPU-bound workloads, in which + case issuing the next all-gather earlier may improve overlap. This + should only be used for static-graph models since the prefetching + follows the first iteration's execution order. (Default: ``False``) + limit_all_gathers (bool): If ``True``, then FSDP explicitly + synchronizes the CPU thread to ensure GPU memory usage from only + *two* consecutive FSDP instances (the current instance running + computation and the next instance whose all-gather is prefetched). + If ``False``, then FSDP allows the CPU thread to issue all-gathers + without any extra synchronization. (Default: ``True``) We often + refer to this feature as the "rate limiter". This flag should only + be set to ``False`` for specific CPU-bound workloads with low + memory pressure in which case the CPU thread can aggressively issue + all kernels without concern for the GPU memory usage. + use_orig_params (bool): Setting this to ``True`` has FSDP use + ``module`` 's original parameters. FSDP exposes those original + parameters to the user via :meth:`nn.Module.named_parameters` + instead of FSDP's internal :class:`FlatParameter` s. This means + that the optimizer step runs on the original parameters, enabling + per-original-parameter hyperparameters. FSDP preserves the original + parameter variables and manipulates their data between unsharded + and sharded forms, where they are always views into the underlying + unsharded or sharded :class:`FlatParameter`, respectively. With the + current algorithm, the sharded form is always 1D, losing the + original tensor structure. An original parameter may have all, + some, or none of its data present for a given rank. In the none + case, its data will be like a size-0 empty tensor. Users should not + author programs relying on what data is present for a given + original parameter in its sharded form. ``True`` is required to + use ``torch.compile()``. Setting this to ``False`` exposes FSDP's + internal :class:`FlatParameter` s to the user via + :meth:`nn.Module.named_parameters`. (Default: ``False``) + ignored_states (Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]): + Ignored parameters or modules that will not be managed by this FSDP + instance, meaning that the parameters are not sharded and their + gradients are not reduced across ranks. This argument unifies with + the existing ``ignored_modules`` argument, and we may deprecate + ``ignored_modules`` soon. For backward compatibility, we keep both + ``ignored_states`` and `ignored_modules``, but FSDP only allows one + of them to be specified as not ``None``. + """ + + def __init__( + self, + module: nn.Module, + process_group: ProcessGroupType = None, + sharding_strategy: Optional[ShardingStrategy] = None, + cpu_offload: Optional[CPUOffload] = None, + auto_wrap_policy: Optional[ + Union[Callable, ModuleWrapPolicy, CustomPolicy] + ] = None, + backward_prefetch: Optional[BackwardPrefetch] = BackwardPrefetch.BACKWARD_PRE, + mixed_precision: Optional[MixedPrecision] = None, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + param_init_fn: Optional[Callable[[nn.Module], None]] = None, + device_id: Optional[Union[int, torch.device]] = None, + sync_module_states: bool = False, + forward_prefetch: bool = False, + limit_all_gathers: bool = True, + use_orig_params: bool = False, + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, + device_mesh: Optional[DeviceMesh] = None, + ): + torch._C._log_api_usage_once("torch.distributed.fsdp") + super().__init__() + _init_ignored_module_states(self, module, ignored_modules, ignored_states) + _init_device_handle(self, module, self._ignored_params, device_id) + + # Add module annotations for Dynamo support (see function for details) + _annotate_modules_for_dynamo(module, self._ignored_modules, use_orig_params) + + # Initializes self.process_group, along with rank and world size. This will + # also set another attribute, _inter_node_pg, to control the process group + # over which sharding occurs, if sharding_strategy is {HYBRID_SHARD, _HYBRID_SHARD_ZERO2}. + # Note that this is done before auto_wrapping, so that child FSDP modules simply pick up + # the same process group state as the root FSDP module. + self._device_mesh = device_mesh + _init_process_group_state( + self, + process_group, + sharding_strategy, + auto_wrap_policy, + device_mesh, + ) + if auto_wrap_policy is not None: + root_kwargs = { + "process_group": process_group, + "sharding_strategy": sharding_strategy, + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "mixed_precision": mixed_precision, + "param_init_fn": param_init_fn, + "device_id": device_id, + "sync_module_states": sync_module_states, + "forward_prefetch": forward_prefetch, + "limit_all_gathers": limit_all_gathers, + "use_orig_params": use_orig_params, + "ignored_states": self._ignored_params, + "device_mesh": device_mesh, + } + if sharding_strategy in HYBRID_SHARDING_STRATEGIES and device_mesh is None: + # Share root process groups with children to maintain + # the invariant that all FSDP modules will have the same + # process groups. + root_kwargs["process_group"] = (self.process_group, self._inter_node_pg) + + _auto_wrap( + module, + auto_wrap_policy, + self._ignored_modules, + self._ignored_params, + root_kwargs, + FullyShardedDataParallel, + ) + + backward_prefetch_limit = 1 + forward_prefetch_limit = 1 + _init_core_state( + self, + sharding_strategy, + mixed_precision, + cpu_offload, + limit_all_gathers, + use_orig_params, + backward_prefetch_limit, + forward_prefetch_limit, + ) + _init_runtime_state(self) + _init_prefetching_state(self, backward_prefetch, forward_prefetch) + _init_buffer_state(self, module) + # extension needs to be set before `_init_param_handle_from_module()` + _init_extension(self, device_mesh) + _init_param_handle_from_module( + self, + module, + device_id, + param_init_fn, + sync_module_states, + ) + self._fsdp_wrapped_module = module + if not use_orig_params: + _check_orig_params_flattened(self, self._ignored_params) + _register_flat_param(self, self) + + # `_state_dict_type` controls the `state_dict()` behavior, which is + # implemented using post-save and pre-load hooks + _init_state_dict_state(self) + _register_all_state_dict_hooks(self) + + @property + def module(self) -> nn.Module: + """Return the wrapped module.""" + # FSDP's `.module` must refer to the innermost wrapped module when + # composing with other module wrappers in order for state dict to work + if isinstance(self._fsdp_wrapped_module, ActivationWrapper): + return getattr(self._fsdp_wrapped_module, _CHECKPOINT_WRAPPED_MODULE) + return self._fsdp_wrapped_module + + @property + def _has_params(self) -> bool: + """Returns whether this FSDP instance manages any parameters.""" + return hasattr(self, "_handle") and self._handle is not None + + @property + def _flat_param(self) -> Optional[FlatParameter]: + return self._handle.flat_param if self._handle else None + + def __getattr__(self, name: str) -> Any: + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self._fsdp_wrapped_module, name) + + def __getitem__(self, key: int) -> Any: + """Forward indexing calls in case the module is an ``nn.Sequential``.""" + if hasattr(self, FSDP_WRAPPED_MODULE): + return self._fsdp_wrapped_module.__getitem__(key) # type: ignore[operator] + return super().__getitem__(key) + + def check_is_root(self) -> bool: + """Check if this instance is a root FSDP module.""" + return _is_fsdp_root(self, self) + + @staticmethod + def fsdp_modules( + module: nn.Module, + root_only: bool = False, + ) -> List["FullyShardedDataParallel"]: + """Return all nested FSDP instances. + + This possibly includes ``module`` itself and only includes FSDP root modules if ``root_only=True``. + + Args: + module (torch.nn.Module): Root module, which may or may not be an + ``FSDP`` module. + root_only (bool): Whether to return only FSDP root modules. + (Default: ``False``) + + Returns: + List[FullyShardedDataParallel]: FSDP modules that are nested in + the input ``module``. + """ + if root_only: + return _get_fsdp_root_states(module) + return traversal_utils._get_fsdp_states(module) + + def apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel": + r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self. + + Typical use includes initializing the parameters of a model (see also :ref:`nn-init-doc`). + + Compared to ``torch.nn.Module.apply``, this version additionally gathers + the full parameters before applying ``fn``. It should not be called from + within another ``summon_full_params`` context. + + Args: + fn (:class:`Module` -> None): function to be applied to each submodule + + Returns: + Module: self + """ + uninitialized = self._is_root is None + self._assert_state(TrainingState.IDLE) + # Use `_unshard_params_recurse()` with `recurse=False` instead of + # `_unshard_fsdp_state_params()` directly to perform lazy + # initialization, which is needed to initialize `FlatParameter` + # parameter attributes as required by the unshard logic + with _unshard_params_recurse( + self, + self, + recurse=False, + writeback=True, + rank0_only=False, + offload_to_cpu=False, + with_grads=False, + ): + ret = super().apply(fn) + + # Reset lazy init called in `_unshard_params_recurse()` since `apply()` + # may have been called on FSDP instance that is not truly a root, in + # which case it will be incorrectly marked as one. + if uninitialized and self._is_root: + for module in traversal_utils._get_fsdp_states(self): + module._reset_lazy_init() + + return ret + + def _mixed_precision_enabled_for_buffers(self) -> bool: + """Return whether the user explicitly enabled buffer mixed precision. + + NOTE: Unlike parameters and gradient reduction, buffer mixed precision + is applied at the FSDP instance level, not the ``FlatParameter`` level, + which may be different for the composable code path. + """ + return self.mixed_precision.buffer_dtype is not None + + def _low_precision_hook_enabled(self) -> bool: + """Whether a low precision hook is registered or not.""" + return self._comm_hook is not None and self._comm_hook in LOW_PRECISION_HOOKS + + def _reset_lazy_init(self) -> None: + """Reset instance so :func:`_lazy_init` will run on the next forward.""" + self._is_root: Optional[bool] = None + + @staticmethod + def set_state_dict_type( + module: nn.Module, + state_dict_type: StateDictType, + state_dict_config: Optional[StateDictConfig] = None, + optim_state_dict_config: Optional[OptimStateDictConfig] = None, + ) -> StateDictSettings: + """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module. + + Also takes (optional) configuration for the model's and optimizer's state dict. + The target module does not have to be a FSDP module. If the target + module is a FSDP module, its ``state_dict_type`` will also be changed. + + .. note:: This API should be called for only the top-level (root) + module. + + .. note:: This API enables users to transparently use the conventional + ``state_dict`` API to take model checkpoints in cases where the + root FSDP module is wrapped by another ``nn.Module``. For example, + the following will ensure ``state_dict`` is called on all non-FSDP + instances, while dispatching into `sharded_state_dict` implementation + for FSDP: + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = DDP(FSDP(...)) + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.SHARDED_STATE_DICT, + >>> state_dict_config = ShardedStateDictConfig(offload_to_cpu=True), + >>> optim_state_dict_config = OptimStateDictConfig(offload_to_cpu=True), + >>> ) + >>> param_state_dict = model.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict(model, optim) + + Args: + module (torch.nn.Module): Root module. + state_dict_type (StateDictType): the desired ``state_dict_type`` to set. + state_dict_config (Optional[StateDictConfig]): the configuration for the + target ``state_dict_type``. + optim_state_dict_config (Optional[OptimStateDictConfig]): the configuration + for the optimizer state dict. + + Returns: + A StateDictSettings that include the previous state_dict type and + configuration for the module. + """ + _state_dict_type_to_config = { + StateDictType.FULL_STATE_DICT: FullStateDictConfig, + StateDictType.LOCAL_STATE_DICT: LocalStateDictConfig, + StateDictType.SHARDED_STATE_DICT: ShardedStateDictConfig, + } + _optim_state_dict_type_to_config = { + StateDictType.FULL_STATE_DICT: FullOptimStateDictConfig, + StateDictType.LOCAL_STATE_DICT: LocalOptimStateDictConfig, + StateDictType.SHARDED_STATE_DICT: ShardedOptimStateDictConfig, + } + + # Use the default config if a state_dict config is not set. + state_dict_config_type = _state_dict_type_to_config[state_dict_type] + optim_state_dict_config_type = _optim_state_dict_type_to_config[state_dict_type] + if state_dict_config is None: + state_dict_config = state_dict_config_type() + if optim_state_dict_config is None: + optim_state_dict_config = optim_state_dict_config_type() + if state_dict_config_type != type(state_dict_config): + raise RuntimeError( + f"Expected state_dict_config of type {state_dict_config_type} " + f"but got {type(state_dict_config)}" + ) + if optim_state_dict_config_type != type(optim_state_dict_config): + raise RuntimeError( + f"Expected optim_state_dict_config of type {optim_state_dict_config_type} " + f"but got {type(optim_state_dict_config)}" + ) + + # Set the state_dict type and configurations. + prev_state_dict_type = None + prev_state_dict_config = None + prev_optim_state_dict_config = None + for submodule in traversal_utils._get_fsdp_states(module): + if prev_state_dict_type is None: + prev_state_dict_type = submodule._state_dict_type + else: + assert ( + prev_state_dict_type == submodule._state_dict_type + ), "All FSDP modules should have the same state_dict_type." + if prev_state_dict_config is None: + prev_state_dict_config = submodule._state_dict_config + else: + assert isinstance( + submodule._state_dict_config, type(prev_state_dict_config) + ), "All FSDP modules must have the same type of state_dict_config." + if prev_optim_state_dict_config is None: + prev_optim_state_dict_config = submodule._optim_state_dict_config + else: + assert isinstance( + submodule._optim_state_dict_config, + type(prev_optim_state_dict_config), + ), "All FSDP modules must have the same type of optim_state_dict_config." + + submodule._state_dict_type = state_dict_type + submodule._state_dict_config = state_dict_config + submodule._optim_state_dict_config = optim_state_dict_config + + return StateDictSettings( + prev_state_dict_type, prev_state_dict_config, prev_optim_state_dict_config + ) + + @staticmethod + def get_state_dict_type(module: nn.Module) -> StateDictSettings: + """Get the state_dict_type and the corresponding configurations for the FSDP modules rooted at ``module``. + + The target module does not have to be an FSDP module. + + Returns: + A ``StateDictSettings`` containing the state_dict_type and + state_dict / optim_state_dict configs that are currently set. + + Raises: + ``AssertionError`` if the ``StateDictSettings`` for different + FSDP submodules differ. + """ + state_dict_settings: Optional[StateDictSettings] = None + for submodule in FullyShardedDataParallel.fsdp_modules(module): + if state_dict_settings is None: + state_dict_settings = StateDictSettings( + state_dict_type=submodule._state_dict_type, + state_dict_config=submodule._state_dict_config, + optim_state_dict_config=submodule._optim_state_dict_config, + ) + _set_optim_use_dtensor(submodule, state_dict_settings) + else: + submodule_settings = StateDictSettings( + submodule._state_dict_type, + submodule._state_dict_config, + submodule._optim_state_dict_config, + ) + assert state_dict_settings == submodule_settings, ( + "All FSDP modules must have the same state dict settings." + f"Got {submodule_settings} and {state_dict_settings}." + ) + _set_optim_use_dtensor(submodule, submodule_settings) + return state_dict_settings + + @staticmethod + @contextlib.contextmanager + def state_dict_type( + module: nn.Module, + state_dict_type: StateDictType, + state_dict_config: Optional[StateDictConfig] = None, + optim_state_dict_config: Optional[OptimStateDictConfig] = None, + ) -> Generator: + """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module. + + This context manager has the same functions as :meth:`set_state_dict_type`. Read the document of + :meth:`set_state_dict_type` for the detail. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = DDP(FSDP(...)) + >>> with FSDP.state_dict_type( + >>> model, + >>> StateDictType.SHARDED_STATE_DICT, + >>> ): + >>> checkpoint = model.state_dict() + + Args: + module (torch.nn.Module): Root module. + state_dict_type (StateDictType): the desired ``state_dict_type`` to set. + state_dict_config (Optional[StateDictConfig]): the model ``state_dict`` + configuration for the target ``state_dict_type``. + optim_state_dict_config (Optional[OptimStateDictConfig]): the optimizer + ``state_dict`` configuration for the target ``state_dict_type``. + """ + prev_state_dict_settings = FullyShardedDataParallel.set_state_dict_type( + module, + state_dict_type, + state_dict_config, + optim_state_dict_config, + ) + yield + FullyShardedDataParallel.set_state_dict_type( + module, + prev_state_dict_settings.state_dict_type, + prev_state_dict_settings.state_dict_config, + prev_state_dict_settings.optim_state_dict_config, + ) + + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Run the forward pass for the wrapped module, inserting FSDP-specific pre- and post-forward sharding logic.""" + handle = self._handle + with torch.autograd.profiler.record_function( + "FullyShardedDataParallel.forward" + ): + args, kwargs = _root_pre_forward(self, self, args, kwargs) + unused = None + args, kwargs = _pre_forward( + self, + handle, + _pre_forward_unshard, + self._fsdp_wrapped_module, + args, + kwargs, + ) + if handle: + _p_assert( + handle.flat_param.device == self.compute_device, + "Expected `FlatParameter` to be on the compute device " + f"{self.compute_device} but got {handle.flat_param.device}", + ) + output = self._fsdp_wrapped_module(*args, **kwargs) + return _post_forward( + self, handle, _post_forward_reshard, self, unused, output + ) + + @staticmethod + @contextlib.contextmanager + def summon_full_params( + module: nn.Module, + recurse: bool = True, + writeback: bool = True, + rank0_only: bool = False, + offload_to_cpu: bool = False, + with_grads: bool = False, + ) -> Generator: + r"""Expose full params for FSDP instances with this context manager. + + Can be useful *after* forward/backward for a model to get + the params for additional processing or checking. It can take a non-FSDP + module and will summon full params for all contained FSDP modules as + well as their children, depending on the ``recurse`` argument. + + .. note:: This can be used on inner FSDPs. + .. note:: This can *not* be used within a forward or backward pass. Nor + can forward and backward be started from within this context. + .. note:: Parameters will revert to their local shards after the context + manager exits, storage behavior is the same as forward. + .. note:: The full parameters can be modified, but only the portion + corresponding to the local param shard will persist after the + context manager exits (unless ``writeback=False``, in which case + changes will be discarded). In the case where FSDP does not shard + the parameters, currently only when ``world_size == 1``, or ``NO_SHARD`` + config, the modification is persisted regardless of ``writeback``. + .. note:: This method works on modules which are not FSDP themselves but + may contain multiple independent FSDP units. In that case, the given + arguments will apply to all contained FSDP units. + + .. warning:: Note that ``rank0_only=True`` in conjunction with + ``writeback=True`` is not currently supported and will raise an + error. This is because model parameter shapes would be different + across ranks within the context, and writing to them can lead to + inconsistency across ranks when the context is exited. + + .. warning:: Note that ``offload_to_cpu`` and ``rank0_only=False`` will + result in full parameters being redundantly copied to CPU memory for + GPUs that reside on the same machine, which may incur the risk of + CPU OOM. It is recommended to use ``offload_to_cpu`` with + ``rank0_only=True``. + + Args: + recurse (bool, Optional): recursively summon all params for nested + FSDP instances (default: True). + writeback (bool, Optional): if ``False``, modifications to params are + discarded after the context manager exits; + disabling this can be slightly more efficient (default: True) + rank0_only (bool, Optional): if ``True``, full parameters are + materialized on only global rank 0. This means that within the + context, only rank 0 will have full parameters and the other + ranks will have sharded parameters. Note that setting + ``rank0_only=True`` with ``writeback=True`` is not supported, + as model parameter shapes will be different across ranks + within the context, and writing to them can lead to + inconsistency across ranks when the context is exited. + offload_to_cpu (bool, Optional): If ``True``, full parameters are + offloaded to CPU. Note that this offloading currently only + occurs if the parameter is sharded (which is only not the case + for world_size = 1 or ``NO_SHARD`` config). It is recommended + to use ``offload_to_cpu`` with ``rank0_only=True`` to avoid + redundant copies of model parameters being offloaded to the same CPU memory. + with_grads (bool, Optional): If ``True``, gradients are also + unsharded with the parameters. Currently, this is only + supported when passing ``use_orig_params=True`` to the FSDP + constructor and ``offload_to_cpu=False`` to this method. + (Default: ``False``) + """ + with _unshard_params( + module, recurse, writeback, rank0_only, offload_to_cpu, with_grads + ): + yield + + @contextlib.contextmanager + def _deregister_orig_params_ctx(self): + """Deregister the original parameters and expose the :class:`FlatParameter`. + + If a :class:`FlatParameter` is sharded, then + this refreshes the sharded views before exiting. This method should + only be called when using the original parameters. + """ + _p_assert( + self._use_orig_params, + "`_deregister_orig_params_ctx()` should only be called when " + "`_use_orig_params=True`", + ) + for fsdp_module in traversal_utils._get_fsdp_states(self): + _deregister_orig_params(fsdp_module, fsdp_module) + try: + yield + finally: + for fsdp_module in traversal_utils._get_fsdp_states(self): + _register_orig_params(fsdp_module, fsdp_module) + + def _apply(self, *args, **kwargs): + """Deregister the original parameters and expose the :class:`FlatParameter` s before calling ``_apply()``.""" + # When using the original parameters: Since (1) the `FlatParameter`s + # own the storage and (2) `_apply()` is the subroutine underlying the + # most common storage-changing ops like `to()` and `cuda()`, we + # override `_apply()` to have the storage change directly performed on + # the `FlatParameter`s instead of applying to the original parameters + # and then writing back to the `FlatParameter`s. + context = ( + self._deregister_orig_params_ctx() + if self._use_orig_params + else contextlib.nullcontext() + ) + with context: + return super()._apply(*args, **kwargs) + + def named_buffers( + self, + *args, + **kwargs, + ) -> Iterator[Tuple[str, torch.Tensor]]: + """Return an iterator over module buffers, yielding both the name of the buffer and the buffer itself. + + Intercepts buffer names and removes all occurrences of the FSDP-specific flattened buffer prefix + when inside the :meth:`summon_full_params` context manager. + """ + should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS + for buffer_name, buffer in super().named_buffers(*args, **kwargs): + if should_clean_name: + # Remove any instances of the FSDP-specific prefix; there can + # be multiple in the case of nested FSDP modules + buffer_name = buffer_name.replace(FSDP_PREFIX, "") + yield (buffer_name, buffer) + + def named_parameters( + self, + *args, + **kwargs, + ) -> Iterator[Tuple[str, torch.nn.Parameter]]: + """Return an iterator over module parameters, yielding both the name of the parameter and the parameter itself. + + Intercepts parameter names and removes all occurrences of the FSDP-specific flattened parameter prefix + when inside the :meth:`summon_full_params` context manager. + """ + should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS + for param_name, param in super().named_parameters(*args, **kwargs): + if should_clean_name: + # Remove any instances of the FSDP-specific prefix; there can + # be multiple in the case of nested FSDP modules + param_name = param_name.replace(FSDP_PREFIX, "") + yield (param_name, param) + + def _assert_state(self, state: Union[TrainingState, List[TrainingState]]) -> None: + """Assert we are in the given state.""" + # Since assert can be turned off and this error checking + # is really important, we use explicit error checking + # and raise a ValueError if needed. + if isinstance(state, TrainingState): + state = [state] + if self.training_state not in state: + msg = ( + f"expected to be in states {state} but current state " + f"is {self.training_state}" + ) + # In case we are failing in the context of autograd hook, asserting + # may not generate useful msg. So, let's print it to be sure. + if self.rank == 0: + print(f"Asserting FSDP instance is: {self}") + print(f"ERROR: {msg}") + traceback.print_stack() + raise ValueError(msg) + + @contextmanager + def no_sync(self) -> Generator: + """Disable gradient synchronizations across FSDP instances. + + Within this context, gradients will be accumulated in module + variables, which will later be synchronized in the first + forward-backward pass after exiting the context. This should only be + used on the root FSDP instance and will recursively apply to all + children FSDP instances. + + .. note:: This likely results in higher memory usage because FSDP will + accumulate the full model gradients (instead of gradient shards) + until the eventual sync. + + .. note:: When used with CPU offloading, the gradients will not be + offloaded to CPU when inside the context manager. Instead, they + will only be offloaded right after the eventual sync. + """ + _lazy_init(self, self) + if not self._is_root: + raise RuntimeError( + "`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module." + ) + self._assert_state(TrainingState.IDLE) + old_flags = [] + for m in self.modules(): + if isinstance(m, FullyShardedDataParallel): + old_flags.append((m, m._sync_gradients)) + m._sync_gradients = False + try: + yield + finally: + for m, old_flag in old_flags: + assert not m._sync_gradients, ( + "`_sync_gradients` was incorrectly set to " + "`True` while in the `no_sync()` context manager" + ) + m._sync_gradients = old_flag + + @torch.no_grad() + def clip_grad_norm_( + self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0 + ) -> torch.Tensor: + """Clip the gradient norm of all parameters. + + The norm is computed over all parameters' gradients as viewed as a single vector, and the + gradients are modified in-place. + + Args: + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` + for infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + + .. note:: If every FSDP instance uses ``NO_SHARD``, meaning that no + gradients are sharded across ranks, then you may directly use + :func:`torch.nn.utils.clip_grad_norm_`. + + .. note:: If at least some FSDP instance uses a sharded strategy (i.e. + one other than ``NO_SHARD``), then you should use this method + instead of :func:`torch.nn.utils.clip_grad_norm_` since this method + handles the fact that gradients are sharded across ranks. + + .. note:: The total norm returned will have the "largest" dtype across + all parameters/gradients as defined by PyTorch's type promotion + semantics. For example, if *all* parameters/gradients use a low + precision dtype, then the returned norm's dtype will be that low + precision dtype, but if there exists at least one parameter/ + gradient using FP32, then the returned norm's dtype will be FP32. + + .. warning:: This needs to be called on all ranks since it uses + collective communications. + """ + _lazy_init(self, self) + if not self._is_root: + raise RuntimeError( + "`clip_grad_norm_()` should only be called on the root FSDP instance" + ) + self._assert_state(TrainingState.IDLE) + # If every FSDP instance uses `NO_SHARD`, then we can directly use + # the normal `nn.utils` one targeting local gradients + all_no_shard = all( + not handle.uses_sharded_strategy for handle in self._all_handles + ) + if all_no_shard: + return torch.nn.utils.clip_grad_norm_( + self.parameters(), max_norm, norm_type + ) + # Otherwise, there exists some FSDP instance using a sharded strategy, + # where sharded and non-sharded parameters must be handled separately + max_norm = float(max_norm) + norm_type = float(norm_type) + sharded_params = set() + nonsharded_params = set() # `NO_SHARD` or not FSDP-managed + grads: List[torch.Tensor] = [] + for handle in self._all_handles: + target_set = ( + sharded_params if handle.uses_sharded_strategy else nonsharded_params + ) + if handle._use_orig_params: + for param in handle.flat_param._params: + target_set.add(param) + if param.grad is not None: + grads.append(param.grad) + else: + target_set.add(handle.flat_param) + if handle.flat_param.grad is not None: + grads.append(handle.flat_param.grad) + for param in self.parameters(): + not_fsdp_managed = ( + param not in sharded_params and param not in nonsharded_params + ) + if not_fsdp_managed: + nonsharded_params.add(param) + if param.grad is not None: + grads.append(param.grad) + # Compute local norms (forced to be in FP32) + local_sharded_norm = _get_grad_norm(sharded_params, norm_type).to( + self.compute_device + ) + local_nonsharded_norm = _get_grad_norm(nonsharded_params, norm_type).to( + self.compute_device + ) + # Reconstruct the total gradient norm depending on the norm type + if norm_type == math.inf: + total_norm = torch.maximum(local_sharded_norm, local_nonsharded_norm) + dist.all_reduce( + total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group + ) + else: + total_norm = local_sharded_norm**norm_type + dist.all_reduce(total_norm, group=self.process_group) + # All-reducing the local non-sharded norm would count it an extra + # world-size-many times + total_norm += local_nonsharded_norm**norm_type + total_norm = total_norm ** (1.0 / norm_type) + if self.cpu_offload.offload_params: + total_norm = total_norm.cpu() + + clip_coef = max_norm / (total_norm + 1e-6) + # Multiplying by the clamped coefficient is meaningless when it is + # equal to 1, but it avoids the host-device sync that would result from + # `if clip_coef < 1` + clip_coef_clamped = torch.clamp(clip_coef, max=1.0) + for grad in grads: + grad.mul_(clip_coef_clamped.to(grad.device, grad.dtype)) + # Use the "largest" dtype by type promotion semantics to use the same + # dtype as if we did not force local norm computation to be in FP32 + if len(grads) == 0: + # If this rank has no gradients, then we must default to FP32 + # unless we use additional communication, which we prefer to avoid + # since `clip_grad_norm_()` is called in the training loop + warnings.warn( + f"Called FSDP.clip_grad_norm_() on rank {self.rank} with no " + "gradients -- returning the total norm in the default dtype " + f"{total_norm.dtype}" + ) # warn since this is generally unexpected + return total_norm + total_norm_dtype = functools.reduce( + torch.promote_types, + [grad.dtype for grad in grads], + ) + return total_norm.to(total_norm_dtype) + + @staticmethod + def _warn_optim_input(optim_input): + if optim_input is not None: + warnings.warn( + "The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. You may remove it " + "from your code without changing its functionality." + ) + + @staticmethod + def _is_using_optim_input(optim_input, optim) -> bool: + if optim_input is None and optim is None: + # Use the default behavior of `optim_input`` + return True + if optim_input is not None: + # Use the `optim_input` code path + return True + # Use the `optim` code path + return False + + @staticmethod + def _warn_legacy_optim_state_dict(curr: str, new: str): + warnings.warn( + f"``FullyShardedDataParallel.{curr}``is being deprecated and is " + f"replaced by ``FullyShardedDataParallel.{new}``. " + f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2." + ) + + @staticmethod + def _optim_state_dict_impl( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + rank0_only: bool = True, + full_state_dict: bool = True, + group: Optional[dist.ProcessGroup] = None, + cpu_offload: bool = True, + ) -> Dict[str, Any]: + """Transform the state-dict of an optimizer corresponding to a sharded model. + + This is the internal API that is used by all the optim_state_dict implementations. + Given model, optim, the original optim_state_dict, this API removes the + FSDP internal information and internal sharding from the optim_state_dict. + """ + if full_state_dict: + FullyShardedDataParallel._warn_optim_input(optim_input) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + else: + using_optim_input = False + assert optim_input is None and not rank0_only + + use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[ + 0 + ]._use_orig_params + assert all( + use_orig_params == m._use_orig_params + for m in FullyShardedDataParallel.fsdp_modules(model) + ), "Not all FSDP modules have the same _use_orig_params value" + + return _optim_state_dict( + model=model, + optim=optim, + optim_state_dict=optim_state_dict, + optim_input=optim_input, + rank0_only=rank0_only, + shard_state=not full_state_dict, + group=group, + using_optim_input=using_optim_input, + use_orig_params=use_orig_params, + cpu_offload=cpu_offload, + ) + + @staticmethod + def _optim_state_dict_to_load_impl( + optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + full_state_dict: bool = True, + rank0_only: bool = False, + is_named_optimizer: bool = False, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model. + + This is the internal API that is used by all the load optim_state_dict implementations. + Given model, optim, and the saved optim_state_dict, this API adds the FSDP + internal information and internal sharding to the optim_state_dict. + """ + if full_state_dict: + FullyShardedDataParallel._warn_optim_input(optim_input) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + else: + using_optim_input = False + assert optim_input is None and not rank0_only + + use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[ + 0 + ]._use_orig_params + assert all( + use_orig_params == m._use_orig_params + for m in FullyShardedDataParallel.fsdp_modules(model) + ), "Not all FSDP modules have the same _use_orig_params value" + + if rank0_only and dist.get_rank(group) > 0: + optim_state_dict = {} + sharded_osd = _flatten_optim_state_dict( + optim_state_dict, + model=model, + use_orig_params=use_orig_params, + optim=(optim if is_named_optimizer else None), + rank0_only=rank0_only, + group=group, + ) + return _rekey_sharded_optim_state_dict( + sharded_osd, + model=model, + optim=optim, + optim_input=optim_input, + using_optim_input=using_optim_input, + is_named_optimizer=is_named_optimizer, + ) + + @staticmethod + def full_optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + rank0_only: bool = True, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """Return the full optimizer state-dict. + + Consolidates the full optimizer state on rank 0 and returns it + as a :class:`dict` following the convention of + :meth:`torch.optim.Optimizer.state_dict`, i.e. with keys ``"state"`` + and ``"param_groups"``. The flattened parameters in ``FSDP`` modules + contained in ``model`` are mapped back to their unflattened parameters. + + .. warning:: This needs to be called on all ranks since it uses + collective communications. However, if ``rank0_only=True``, then + the state dict is only populated on rank 0, and all other ranks + return an empty :class:`dict`. + + .. warning:: Unlike ``torch.optim.Optimizer.state_dict()``, this method + uses full parameter names as keys instead of parameter IDs. + + .. note:: Like in :meth:`torch.optim.Optimizer.state_dict`, the tensors + contained in the optimizer state dict are not cloned, so there may + be aliasing surprises. For best practices, consider saving the + returned optimizer state dict immediately, e.g. using + ``torch.save()``. + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer ``optim`` representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + rank0_only (bool): If ``True``, saves the populated :class:`dict` + only on rank 0; if ``False``, saves it on all ranks. (Default: + ``True``) + group (dist.ProcessGroup): Model's process group or ``None`` if using + the default process group. (Default: ``None``) + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model`` 's original unflattened parameters and including keys + "state" and "param_groups" following the convention of + :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=True``, + then nonzero ranks return an empty :class:`dict`. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "full_optim_state_dict", "optim_state_dict" + ) + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim.state_dict(), + optim_input=optim_input, + rank0_only=rank0_only, + group=group, + full_state_dict=True, + ) + + @staticmethod + def sharded_optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """Return the optimizer state-dict in its sharded form. + + The API is similar to :meth:`full_optim_state_dict` but this API chunks + all non-zero-dimension states to :class:`ShardedTensor` to save memory. + This API should only be used when the model ``state_dict`` is derived + with the context manager ``with state_dict_type(SHARDED_STATE_DICT):``. + + For the detailed usage, refer to :meth:`full_optim_state_dict`. + + .. warning:: The returned state dict contains ``ShardedTensor`` and + cannot be directly used by the regular ``optim.load_state_dict``. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "sharded_optim_state_dict", "optim_state_dict" + ) + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim.state_dict(), + optim_input=None, + rank0_only=False, + full_state_dict=False, + group=group, + ) + + @staticmethod + def shard_full_optim_state_dict( + full_optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + ) -> Dict[str, Any]: + """Shard a full optimizer state-dict. + + Remaps the state in ``full_optim_state_dict`` to flattened parameters instead of unflattened + parameters and restricts to only this rank's part of the optimizer state. + The first argument should be the return value of :meth:`full_optim_state_dict`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> model, optim = ... + >>> full_osd = FSDP.full_optim_state_dict(model, optim) + >>> torch.save(full_osd, PATH) + >>> # Define new model with possibly different world size + >>> new_model, new_optim = ... + >>> full_osd = torch.load(PATH) + >>> sharded_osd = FSDP.shard_full_optim_state_dict(full_osd, new_model) + >>> new_optim.load_state_dict(sharded_osd) + + .. note:: Both :meth:`shard_full_optim_state_dict` and + :meth:`scatter_full_optim_state_dict` may be used to get the + sharded optimizer state dict to load. Assuming that the full + optimizer state dict resides in CPU memory, the former requires + each rank to have the full dict in CPU memory, where each rank + individually shards the dict without any communication, while the + latter requires only rank 0 to have the full dict in CPU memory, + where rank 0 moves each shard to GPU memory (for NCCL) and + communicates it to ranks appropriately. Hence, the former has + higher aggregate CPU memory cost, while the latter has higher + communication cost. + + Args: + full_optim_state_dict (Dict[str, Any]): Optimizer state dict + corresponding to the unflattened parameters and holding the + full non-sharded optimizer state. + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + correspond to the optimizer state in ``full_optim_state_dict``. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + optim (Optional[torch.optim.Optimizer]): Optimizer that will load + the state dict returned by this method. This is the preferred + argument to use over ``optim_input``. (Default: ``None``) + + Returns: + Dict[str, Any]: The full optimizer state dict now remapped to + flattened parameters instead of unflattened parameters and + restricted to only include this rank's part of the optimizer state. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "shard_full_optim_state_dict", "optim_state_dict_to_load" + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=full_optim_state_dict, + model=model, + optim_input=optim_input, + optim=optim, + full_state_dict=True, + is_named_optimizer=False, + ) + + @staticmethod + def flatten_sharded_optim_state_dict( + sharded_optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim: torch.optim.Optimizer, + ) -> Dict[str, Any]: + """Flatten a sharded optimizer state-dict. + + The API is similar to :meth:`shard_full_optim_state_dict`. The only + difference is that the input ``sharded_optim_state_dict`` should be + returned from :meth:`sharded_optim_state_dict`. Therefore, there will + be all-gather calls on each rank to gather ``ShardedTensor`` s. + + Args: + sharded_optim_state_dict (Dict[str, Any]): Optimizer state dict + corresponding to the unflattened parameters and holding the + sharded optimizer state. + model (torch.nn.Module): + Refer to :meth:`shard_full_optim_state_dict`. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + + Returns: + Refer to :meth:`shard_full_optim_state_dict`. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "flatten_sharded_optim_state_dict", "optim_state_dict_to_load" + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=sharded_optim_state_dict, + model=model, + optim_input=None, + optim=optim, + full_state_dict=False, + is_named_optimizer=False, + ) + + @staticmethod + def scatter_full_optim_state_dict( + full_optim_state_dict: Optional[Dict[str, Any]], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + group: Optional[Any] = None, + ) -> Dict[str, Any]: + """Scatter the full optimizer state dict from rank 0 to all other ranks. + + Returns the sharded optimizer state dict on each rank. + The return value is the same as :meth:`shard_full_optim_state_dict`, and on rank + 0, the first argument should be the return value of + :meth:`full_optim_state_dict`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> model, optim = ... + >>> full_osd = FSDP.full_optim_state_dict(model, optim) # only non-empty on rank 0 + >>> # Define new model with possibly different world size + >>> new_model, new_optim, new_group = ... + >>> sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, new_model, group=new_group) + >>> new_optim.load_state_dict(sharded_osd) + + .. note:: Both :meth:`shard_full_optim_state_dict` and + :meth:`scatter_full_optim_state_dict` may be used to get the + sharded optimizer state dict to load. Assuming that the full + optimizer state dict resides in CPU memory, the former requires + each rank to have the full dict in CPU memory, where each rank + individually shards the dict without any communication, while the + latter requires only rank 0 to have the full dict in CPU memory, + where rank 0 moves each shard to GPU memory (for NCCL) and + communicates it to ranks appropriately. Hence, the former has + higher aggregate CPU memory cost, while the latter has higher + communication cost. + + Args: + full_optim_state_dict (Optional[Dict[str, Any]]): Optimizer state + dict corresponding to the unflattened parameters and holding + the full non-sharded optimizer state if on rank 0; the argument + is ignored on nonzero ranks. + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + correspond to the optimizer state in ``full_optim_state_dict``. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + optim (Optional[torch.optim.Optimizer]): Optimizer that will load + the state dict returned by this method. This is the preferred + argument to use over ``optim_input``. (Default: ``None``) + group (dist.ProcessGroup): Model's process group or ``None`` if + using the default process group. (Default: ``None``) + + Returns: + Dict[str, Any]: The full optimizer state dict now remapped to + flattened parameters instead of unflattened parameters and + restricted to only include this rank's part of the optimizer state. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "scatter_full_optim_state_dict", "optim_state_dict_to_load" + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=full_optim_state_dict, + model=model, + optim_input=optim_input, + optim=optim, + full_state_dict=True, + rank0_only=True, + is_named_optimizer=False, + group=group, + ) + + @staticmethod + def rekey_optim_state_dict( + optim_state_dict: Dict[str, Any], + optim_state_key_type: OptimStateKeyType, + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + ) -> Dict[str, Any]: + """Re-keys the optimizer state dict ``optim_state_dict`` to use the key type ``optim_state_key_type``. + + This can be used to achieve compatibility between optimizer state dicts from models with FSDP + instances and ones without. + + To re-key an FSDP full optimizer state dict (i.e. from + :meth:`full_optim_state_dict`) to use parameter IDs and be loadable to + a non-wrapped model:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> wrapped_model, wrapped_optim = ... + >>> full_osd = FSDP.full_optim_state_dict(wrapped_model, wrapped_optim) + >>> nonwrapped_model, nonwrapped_optim = ... + >>> rekeyed_osd = FSDP.rekey_optim_state_dict(full_osd, OptimStateKeyType.PARAM_ID, nonwrapped_model) + >>> nonwrapped_optim.load_state_dict(rekeyed_osd) + + To re-key a normal optimizer state dict from a non-wrapped model to be + loadable to a wrapped model:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> nonwrapped_model, nonwrapped_optim = ... + >>> osd = nonwrapped_optim.state_dict() + >>> rekeyed_osd = FSDP.rekey_optim_state_dict(osd, OptimStateKeyType.PARAM_NAME, nonwrapped_model) + >>> wrapped_model, wrapped_optim = ... + >>> sharded_osd = FSDP.shard_full_optim_state_dict(rekeyed_osd, wrapped_model) + >>> wrapped_optim.load_state_dict(sharded_osd) + + Returns: + Dict[str, Any]: The optimizer state dict re-keyed using the + parameter keys specified by ``optim_state_key_type``. + """ + FullyShardedDataParallel._warn_optim_input(optim_input) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + assert optim_state_key_type in ( + OptimStateKeyType.PARAM_NAME, + OptimStateKeyType.PARAM_ID, + ) + osd = optim_state_dict # alias + # Validate that the existing parameter keys are uniformly typed + uses_param_name_mask = [type(param_key) is str for param_key in osd["state"]] + uses_param_id_mask = [type(param_key) is int for param_key in osd["state"]] + if (any(uses_param_name_mask) and not all(uses_param_name_mask)) or ( + any(uses_param_id_mask) and not all(uses_param_id_mask) + ): + error_msg = f"Invalid parameter keys: {osd['state'].keys()}" + raise ValueError(error_msg) + # Return directly if the existing key type matches the target key type + if ( + optim_state_key_type == OptimStateKeyType.PARAM_NAME + and all(uses_param_name_mask) + ) or ( + optim_state_key_type == OptimStateKeyType.PARAM_ID + and all(uses_param_id_mask) + ): + return osd + # Otherwise, actually perform the re-keying + new_osd = {} + if optim_state_key_type == OptimStateKeyType.PARAM_NAME: # ID -> name + param_id_to_param = ( + _get_param_id_to_param_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_key_to_param(optim) + ) + param_to_param_name = _get_param_to_fqn(model) + param_id_to_param_name: List[str] = [ + param_to_param_name[param] for param in param_id_to_param.values() + ] + new_osd["state"] = { + param_id_to_param_name[param_id]: param_state + for param_id, param_state in osd["state"].items() + } + new_osd["param_groups"] = copy.deepcopy(osd["param_groups"]) + for param_group in new_osd["param_groups"]: + param_group["params"] = sorted( + [ + param_id_to_param_name[param_id] + for param_id in param_group["params"] + ] + ) + return new_osd + elif optim_state_key_type == OptimStateKeyType.PARAM_ID: # name -> ID + param_name_to_param = _get_fqn_to_param(model) + param_to_param_id = ( + _get_param_to_param_id_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_to_param_key(optim) + ) + # Because not all model parameters may be passed as the optimizer + # input, we may need to drop some parameters from this mapping + param_name_to_param_id = { + param_name: param_to_param_id[param] + for param_name, param in param_name_to_param.items() + if param in param_to_param_id + } + new_osd["state"] = { + param_name_to_param_id[param_name]: param_state + for param_name, param_state in osd["state"].items() + } + new_osd["param_groups"] = copy.deepcopy(osd["param_groups"]) + for param_group in new_osd["param_groups"]: + param_group["params"] = sorted( + [ + param_name_to_param_id[param_name] + for param_name in param_group["params"] + ] + ) + return new_osd + return new_osd # should never reach here + + @staticmethod + def optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Optional[Dict[str, Any]] = None, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Transform the state-dict of an optimizer corresponding to a sharded model. + + The given state-dict can be transformed to one of three types: + 1) full optimizer state_dict, 2) sharded optimizer state_dict, 3) local optimizer state_dict. + + For full optimizer state_dict, all states are unflattened and not sharded. + Rank0 only and CPU only can be specified via :meth:`state_dict_type` to + avoid OOM. + + For sharded optimizer state_dict, all states are unflattened but sharded. + CPU only can be specified via :meth:`state_dict_type` to further save + memory. + + For local state_dict, no transformation will be performed. But a state + will be converted from nn.Tensor to ShardedTensor to represent its sharding + nature (this is not supported yet). + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> from torch.distributed.fsdp import StateDictType + >>> from torch.distributed.fsdp import FullStateDictConfig + >>> from torch.distributed.fsdp import FullOptimStateDictConfig + >>> # Save a checkpoint + >>> model, optim = ... + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> state_dict = model.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict(model, optim) + >>> save_a_checkpoint(state_dict, optim_state_dict) + >>> # Load a checkpoint + >>> model, optim = ... + >>> state_dict, optim_state_dict = load_a_checkpoint() + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> model.load_state_dict(state_dict) + >>> optim_state_dict = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state_dict + >>> ) + >>> optim.load_state_dict(optim_state_dict) + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_state_dict (Dict[str, Any]): the target optimizer state_dict to + transform. If the value is None, optim.state_dict() will be used. ( + Default: ``None``) + group (dist.ProcessGroup): Model's process group across which parameters + are sharded or ``None`` if using the default process group. ( + Default: ``None``) + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model``. The sharding of the optimizer state is based on + ``state_dict_type``. + """ + state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model) + if optim_state_dict is None: + optim_state_dict = optim.state_dict() + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim_state_dict, + optim_input=None, + rank0_only=getattr( + state_dict_settings.optim_state_dict_config, "rank0_only", False + ), + full_state_dict=state_dict_settings.state_dict_type + == StateDictType.FULL_STATE_DICT, + group=group, + cpu_offload=getattr( + state_dict_settings.optim_state_dict_config, "offload_to_cpu", True + ), + ) + + @staticmethod + def optim_state_dict_to_load( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + is_named_optimizer: bool = False, + load_directly: bool = False, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model. + + Given a ``optim_state_dict`` that is transformed through + :meth:`optim_state_dict`, it gets converted to the flattened optimizer + state_dict that can be loaded to ``optim`` which is the optimizer for + ``model``. ``model`` must be sharded by FullyShardedDataParallel. + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> from torch.distributed.fsdp import StateDictType + >>> from torch.distributed.fsdp import FullStateDictConfig + >>> from torch.distributed.fsdp import FullOptimStateDictConfig + >>> # Save a checkpoint + >>> model, optim = ... + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> state_dict = model.state_dict() + >>> original_osd = optim.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict( + >>> model, + >>> optim, + >>> optim_state_dict=original_osd + >>> ) + >>> save_a_checkpoint(state_dict, optim_state_dict) + >>> # Load a checkpoint + >>> model, optim = ... + >>> state_dict, optim_state_dict = load_a_checkpoint() + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> model.load_state_dict(state_dict) + >>> optim_state_dict = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state_dict + >>> ) + >>> optim.load_state_dict(optim_state_dict) + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_state_dict (Dict[str, Any]): The optimizer states to be loaded. + is_named_optimizer (bool): Is this optimizer a NamedOptimizer or + KeyedOptimizer. Only set to True if ``optim`` is TorchRec's + KeyedOptimizer or torch.distributed's NamedOptimizer. + load_directly (bool): If this is set to True, this API will also + call optim.load_state_dict(result) before returning the result. + Otherwise, users are responsible to call ``optim.load_state_dict()`` + (Default: ``False``) + group (dist.ProcessGroup): Model's process group across which parameters + are sharded or ``None`` if using the default process group. ( + Default: ``None``) + """ + state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model) + result = FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=optim_state_dict, + model=model, + optim_input=None, + optim=optim, + full_state_dict=( + state_dict_settings.state_dict_type == StateDictType.FULL_STATE_DICT + ), + rank0_only=getattr( + state_dict_settings.optim_state_dict_config, "rank0_only", False + ), + is_named_optimizer=is_named_optimizer, + group=group, + ) + if load_directly: + optim.load_state_dict(result) + return result + + def register_comm_hook(self, state: object, hook: callable): + """Register a communication hook. + + This is an enhancement that provides a flexible hook to users where they can specify how FSDP aggregates + gradients across multiple workers. + This hook can be used to implement several algorithms like + `GossipGrad `_ and gradient compression + which involve different communication strategies for + parameter syncs while training with :class:`FullyShardedDataParallel`. + + .. warning :: + FSDP communication hook should be registered before running an initial forward pass + and only once. + + Args: + state (object): Passed to the hook to maintain any state information during the training process. + Examples include error feedback in gradient compression, + peers to communicate with next in `GossipGrad `_, etc. + It is locally stored by each worker + and shared by all the gradient tensors on the worker. + hook (Callable): Callable, which has one of the following signatures: + 1) ``hook: Callable[torch.Tensor] -> None``: + This function takes in a Python tensor, which represents + the full, flattened, unsharded gradient with respect to all variables + corresponding to the model this FSDP unit is wrapping + (that are not wrapped by other FSDP sub-units). + It then performs all necessary processing and returns ``None``; + 2) ``hook: Callable[torch.Tensor, torch.Tensor] -> None``: + This function takes in two Python tensors, the first one represents + the full, flattened, unsharded gradient with respect to all variables + corresponding to the model this FSDP unit is wrapping + (that are not wrapped by other FSDP sub-units). The latter + represents a pre-sized tensor to store a chunk of a sharded gradient after + reduction. + In both cases, callable performs all necessary processing and returns ``None``. + Callables with signature 1 are expected to handle gradient communication for a `NO_SHARD` case. + Callables with signature 2 are expected to handle gradient communication for sharded cases. + + """ + if not self.check_is_root(): + raise AssertionError( + "register_comm_hook can only be called on a root instance." + ) + for fsdp_state in traversal_utils._get_fsdp_states(self): + if fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES: + raise AssertionError( + f"Communication hook is not supported for hybrid strategies: {fsdp_state.sharding_strategy}" + ) + if fsdp_state._comm_hook is not None: + raise AssertionError("A communication hook is already registered") + if not callable(hook): + raise ValueError( + f"The communication hook must be callable but got {hook}" + ) + fsdp_state._comm_hook = hook + fsdp_state._comm_hook_state = state + + +def _get_grad_norm( + params: Iterable[nn.Parameter], + norm_type: float, +) -> torch.Tensor: + """ + Return the gradient norm of parameters ``param`` s, where the gradients are viewed as a single vector. + + The returned norm is in FP32 even if parameters/gradients are in a low precision. This is because the downstream + use of this return value is a reduction across ranks. + """ + params_with_grad = [param for param in params if param.grad is not None] + if len(params_with_grad) == 0: + return torch.tensor(0.0) + grads = [param.grad for param in params_with_grad] + grad_dtypes = {grad.dtype for grad in grads} + if len(grad_dtypes) != 1: + raise ValueError( + f"Requires uniform dtype across all gradients but got {grad_dtypes}" + ) + # Compute the gradient norm in FP32, where we treat the gradients as a + # single vector + grad_norm = torch.linalg.vector_norm( + torch.stack( + [ + torch.linalg.vector_norm(grad.detach(), norm_type, dtype=torch.float32) + for grad in grads + ], + ), + norm_type, + dtype=torch.float32, + ) + return grad_norm + + +def _get_param_to_fqn( + model: torch.nn.Module, +) -> Dict[torch.nn.Parameter, str]: + """ + Construct a mapping from parameters to their parameter names. + + The ``model`` should not contain any :class:`FullyShardedDataParallel` instances, which + means that none of the parameters should be ``FlatParameter`` s. As a + result, compared to :meth:`_get_param_to_fqns`, the mapped + values may be flattened from singleton :class:`list` s to the contained + names themselves. + + Args: + model (torch.nn.Module): Root module, which should not contain any + :class:`FullyShardedDataParallel` instances. + """ + param_to_param_names = _get_param_to_fqns(model) + for param_names in param_to_param_names.values(): + assert ( + len(param_names) > 0 + ), "`_get_param_to_fqns()` should not construct empty lists" + if len(param_names) > 1: + raise RuntimeError( + "Each parameter should only map to one parameter name but got " + f"{len(param_names)}: {param_names}" + ) + param_to_param_name = { + param: param_names[0] for param, param_names in param_to_param_names.items() + } + return param_to_param_name + + +def _get_fqn_to_param( + model: torch.nn.Module, +) -> Dict[str, torch.nn.Parameter]: + """Construct the inverse mapping of :meth:`_get_param_to_fqn`.""" + param_to_param_name = _get_param_to_fqn(model) + return dict(zip(param_to_param_name.values(), param_to_param_name.keys())) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..929c14355a14f64b80f290d66c89608e54b0ebd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py @@ -0,0 +1,388 @@ +import logging +from collections import abc, defaultdict +from typing import Any, Dict, Iterable, List, Optional, overload, Sequence, Tuple, Union + +import torch +import torch.distributed as dist +from torch.amp.grad_scaler import _MultiDeviceReplicator, GradScaler, OptState +from torch.distributed.distributed_c10d import ProcessGroup + +log = logging.getLogger(__name__) + + +def _refresh_per_optimizer_state() -> Dict[str, Any]: + return {"stage": OptState.READY, "found_inf_per_device": {}} + + +def _is_supported_device(tensor: torch.Tensor) -> bool: + return tensor.is_cuda or tensor.device.type in ("xla", "cpu", "hpu") + + +class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator): + """ + Lazily serves tensor to request device. This class extends + _MultiDeviceReplicator to allow support for "cpu" as a device. + """ + + def __init__(self, master_tensor: torch.Tensor) -> None: + assert _is_supported_device(master_tensor) + self.master = master_tensor + self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} + + +class ShardedGradScaler(GradScaler): + """ + ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends + functionality from GradScaler: + * Supports Pytorch DDP and FSDP implementations + * Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP]) + * Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns + * Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across + nodes + + Example:: + + # Creates a ShardedGradScaler once at the beginning of training. + scaler = ShardedGradScaler() + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + scaler.scale(loss).backward() + + # scaler.step() first unscales gradients of the optimizer's params. + # If gradients don't contain infs/NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + See :class:`GradScaler` for explanation of scaling/unscaling and more use cases. + + Args: + init_scale (float, optional, default=2.**16): Initial scale factor. + growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during + :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. + backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during + :meth:`update` if inf/NaN gradients occur in an iteration. + growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients + that must occur for the scale to be multiplied by ``growth_factor``. + enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply + invokes the underlying ``optimizer.step()``, and other methods become no-ops. + Default: ``True`` + process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD): + process group for sharding + """ + + def __init__( + self, + device: str = "cuda", + init_scale: float = 2.0**16, + backoff_factor: float = 0.5, + growth_factor: float = 2.0, + growth_interval: int = 2000, + enabled: bool = True, + process_group: Optional[ProcessGroup] = dist.group.WORLD, + ) -> None: + super().__init__( + device, + init_scale=init_scale, + backoff_factor=backoff_factor, + growth_factor=growth_factor, + growth_interval=growth_interval, + enabled=enabled, + ) + if self._enabled: + self.process_group = process_group + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + @overload + def scale(self, outputs: torch.Tensor) -> torch.Tensor: + ... + + @overload + def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]: + ... + + @overload + def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]: + ... + + @overload + def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: + ... + + def scale( + self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]] + ) -> Union[torch.Tensor, Iterable[torch.Tensor]]: + if not self._enabled: + return outputs + + if isinstance(outputs, torch.Tensor): + assert _is_supported_device(outputs) + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + scaled_output = outputs * self._scale.to( + device=outputs.device, non_blocking=True + ) + # Here we ensure the return dtype is the same as the outputs dtype. + # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision + # format (fp16, bf16) and so the scaled loss should be of the same dtype. + return scaled_output.type(outputs.dtype) + + stash: List[_GeneralMultiDeviceReplicator] = [] + + def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]): + if isinstance(val, torch.Tensor): + assert _is_supported_device(val) + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_GeneralMultiDeviceReplicator(self._scale)) + scaled_val = val * stash[0].get(val.device) + # Here we ensure the return dtype is the same as the outputs dtype. + # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision + # format (fp16, bf16) and so the scaled loss should be of the same dtype. + return scaled_val.type(val.dtype) + if isinstance(val, abc.Iterable): + iterator = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterator) + return iterator + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _foreach_non_finite_check_and_unscale_cpu_( + self, + grads: Sequence[torch.Tensor], + found_inf: torch.Tensor, + inv_scale: torch.Tensor, + ) -> None: + if len(grads) == 0: + return + assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor." + assert found_inf.numel() == 1, "found_inf must be a 1-element tensor." + + for grad in grads: + if grad.device.type != "cpu": + log.error( + "tensor device is %s but was expected to be ``cpu``", + grad.device, + ) + raise ValueError( + "Gradients were found on a non-CPU device when" + " expected to be on CPU." + ) + if ( + torch.isinf(grad).any().item() is True + or torch.isnan(grad).any().item() is True + ): + found_inf.data = torch.tensor([1.0]) + break + else: + grad.data *= inv_scale.item() + + def _unscale_grads_( + self, + optimizer: torch.optim.Optimizer, + inv_scale: torch.Tensor, + found_inf: torch.Tensor, + allow_fp16: bool = True, + ) -> Dict[torch.device, torch.Tensor]: + per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale) + per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be thousands of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated] + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + # coalesce is not supported in torch.float16 + param_grad_fp32 = param.grad.type(torch.float32).coalesce() + param.grad = param_grad_fp32.type(torch.float16) + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + if grads[0].device.type == "cpu": + self._foreach_non_finite_check_and_unscale_cpu_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + else: + torch._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + # There exist contexts (e.g. w/ `use_orig_params=True`) wherein some + # ranks may have no (non-zero sized) parameter shards, necessitating the + # initialization of `per_device_found_inf._per_device_tensors` here + if not per_device_found_inf._per_device_tensors: + assert self._scale is not None + per_device_found_inf.get(self._scale.device) + return per_device_found_inf._per_device_tensors + + def unscale_(self, optimizer: torch.optim.Optimizer) -> None: + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.double().reciprocal().float() + found_inf = torch.full( + (1,), 0.0, dtype=torch.float32, device=self._scale.device + ) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, True + ) + optimizer_state["stage"] = OptState.UNSCALED + + # Synchronize the detected inf across the ranks + optimizer_state = self._per_optimizer_states[id(optimizer)] + works = [] + found_inf_on_cpus = [] + found_inf_on_cudas = [] + + for found_inf in optimizer_state["found_inf_per_device"].values(): + if self._device == "cuda" and found_inf.device.type == "cpu": + found_inf_on_cpus.append(found_inf) + found_inf_on_cuda = found_inf.cuda() + found_inf_on_cudas.append(found_inf_on_cuda) + works.append( + dist.all_reduce( + found_inf_on_cuda, async_op=True, group=self.process_group + ) + ) + else: + works.append( + dist.all_reduce(found_inf, async_op=True, group=self.process_group) + ) + for work in works: + work.wait() + if found_inf_on_cpus: + torch._foreach_copy_(found_inf_on_cpus, found_inf_on_cudas) + + def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None: + """ + If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero. + Otherwise, scale is multiplied by the growth factor when the growth interval is reached. + """ + assert self._scale is not None and self._growth_tracker is not None + + if found_inf.item() >= 1.0: + self._scale *= self._backoff_factor + self._growth_tracker.fill_(0) + else: + successful = self._growth_tracker + 1 + if successful == self._growth_interval: + self._scale *= self._growth_factor + self._growth_tracker.fill_(0) + else: + self._growth_tracker = successful + + def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: + """ + Updates the scale factor. + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + Args: + new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor. + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + """ + + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated] + + if new_scale is not None: + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) # type: ignore[union-attr] + else: + reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \ + torch.FloatTensor with requires_grad=False." + assert new_scale.device.type == self._device, reason + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) # type: ignore[union-attr] + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device=_scale.device, non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + if _scale.device.type == "cpu": + self._amp_update_scale_cpu_(found_inf_combined) + else: + torch._amp_update_scale_( + self._scale, # type: ignore[arg-type] + self._growth_tracker, # type: ignore[arg-type] + found_inf_combined, + self._growth_factor, # type: ignore[arg-type] + self._backoff_factor, # type: ignore[arg-type] + self._growth_interval, # type: ignore[arg-type] + ) + + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py new file mode 100644 index 0000000000000000000000000000000000000000..90796269de4617cb8e524b5503344181424073bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py @@ -0,0 +1,606 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import contextlib +import copy +from abc import ABC, abstractmethod +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterable, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, +) + +import torch.nn as nn + +__all__ = [ + "always_wrap_policy", + "lambda_auto_wrap_policy", + "transformer_auto_wrap_policy", + "size_based_auto_wrap_policy", + "enable_wrap", + "wrap", + "CustomPolicy", + "ModuleWrapPolicy", +] + + +# NOTE: We intentionally keep this function simple and isolate the complexity +# to `fn` to enable using this function generically. We may move this to a +# non-FSDP-specific folder and/or make it public in the future. +def _post_order_apply( + root_module: nn.Module, + fn: Callable[[nn.Module], Optional[nn.Module]], +): + """ + This applies ``fn`` to every module in the module tree of ``root_module`` + following a post-order traversal. If ``fn`` returns an :class:`nn.Module`, + then this replaces the original module with the newly returned one in the + tree. Otherwise, ``fn`` should return ``None``, in which case the module is + not changed. + """ + # Track visited modules to avoid visiting shared modules multiple times + visited_modules: Set[nn.Module] = {root_module} + + def _post_order_apply_inner( + module: nn.Module, + module_name: str, + parent_module: Optional[nn.Module], + ): + for child_module_name, child_module in module.named_children(): + if child_module not in visited_modules: + visited_modules.add(child_module) + _post_order_apply_inner(child_module, child_module_name, module) + optional_module = fn(module) + if optional_module is not None: + assert isinstance(parent_module, nn.Module), ( + "Non-root modules should have their parent module set but got " + f"{parent_module} for {module}" + ) + assert module_name, ( + "Non-root modules should have their module name set but got " + f"an empty module name for {module}" + ) + assert isinstance( + optional_module, nn.Module + ), f"fn should return None or an nn.Module but got {optional_module}" + setattr(parent_module, module_name, optional_module) + + _post_order_apply_inner(root_module, "", None) + + +def _construct_wrap_fn( + root_module: nn.Module, + target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]], + fsdp_fn: Callable, +) -> Callable[[nn.Module], Optional[nn.Module]]: + """ + This constructs the "wrap" function to pass to :func:`_post_order_apply` + based on ``target_module_to_kwargs``, which should be constructed from the + wrapping policy. + """ + + def fn(module: nn.Module) -> Optional[nn.Module]: + # Explicitly avoid wrapping the root module since for FSDP, it is + # handled by the caller + if module in target_module_to_kwargs and module is not root_module: + kwargs = target_module_to_kwargs[module] + return fsdp_fn(module, **kwargs) + return None + + return fn + + +def _run_mixed_precision_override_policy( + root_module: nn.Module, + module_classes: Iterable[Type[nn.Module]], + ignored_modules: Set[nn.Module], + root_kwargs: Dict[str, Any], + target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]], +): + module_classes_tuple = tuple(set(module_classes)) + for module in root_module.modules(): + if module in ignored_modules: + continue + elif isinstance(module, module_classes_tuple): + # This policy overrides any existing policy + if module not in target_module_to_kwargs: + # Only inherit from the root kwargs if not already specified + target_module_to_kwargs[module] = root_kwargs + target_module_to_kwargs[module]["mixed_precision"] = None + return target_module_to_kwargs + + +def always_wrap_policy(*args, **kwargs) -> bool: + """ + A simple recursive wrap policy that always returns ``True``. This means + that every submodule is wrapped by the wrapper class in + :func:`_recursive_wrap`. + """ + return True + + +class _Policy(ABC): + """ + This defines an abstract base class that represents a policy for applying + a module-level API. + """ + + @abstractmethod + def _run_policy( + self, + root_module: nn.Module, + ignored_modules: Set[nn.Module], + root_kwargs: Dict[str, Any], + ) -> Dict[nn.Module, Dict[str, Any]]: + """ + This should return a dict ``target_module_to_kwargs`` that maps from + each target module to wrap to its kwargs. + """ + ... + + +def _module_wrap_policy( + module: nn.Module, + recurse: bool, + nonwrapped_numel: int, + module_classes: Set[Type[nn.Module]], +) -> bool: + """ + This auto wrap policy wraps every module that is an instance of any type in + ``module_classes`` as its own FSDP instance. The root module given by + ``module`` is always wrapped as an FSDP instance regardless. Since the + wrapping proceeds bottom up, each FSDP instance manages the parameters in + its subtree excluding any already managed by a child FSDP instance. + + Args: + module (nn.Module): Current module being considered. + recurse (bool): If ``False``, then this function must decide whether + ``module`` should be wrapped as an FSDP instance or not. If + ``True``, then the function is still recursing down the module + tree as a part of the DFS. + nonwrapped_numel (int): Parameter numel not yet wrapped. + module_classes (Set[Type[nn.Module]]): Set of module classes that are + wrapped as FSDP instances. + + Returns: + ``True`` if ``recurse=True``, and whether ``module`` should be wrapped + if ``recurse=False``. + """ + if recurse: + return True # always recurse + return isinstance(module, tuple(module_classes)) + + +class ModuleWrapPolicy(_Policy): + """ + This policy applies to every module of the specified module classes, + passing in the kwargs given to the root. + """ + + def __init__(self, module_classes: Iterable[Type[nn.Module]]): + module_classes_set = set(module_classes) + self._module_classes = module_classes_set + self._module_classes_str = str(module_classes_set) + + def _run_policy( + self, + root_module: nn.Module, + ignored_modules: Set[nn.Module], + root_kwargs: Dict[str, Any], + ) -> Dict[nn.Module, Dict[str, Any]]: + module_classes = tuple(self._module_classes) + target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {} + for module in root_module.modules(): + if module in ignored_modules: + continue + elif isinstance(module, module_classes): + # Shallow copy to avoid coupling changes across modules + target_module_to_kwargs[module] = copy.copy(root_kwargs) + return target_module_to_kwargs + + def __call__(self, module, recurse, *args, **kwargs): + # nonwrapped_numel is not used. + return _module_wrap_policy( + module, recurse, nonwrapped_numel=-1, module_classes=self._module_classes + ) + + def __repr__(self) -> str: + return super().__repr__() + f"({self._module_classes_str})" + + +class CustomPolicy(_Policy): + """ + This policy takes in a lambda function that maps a given ``nn.Module`` to + either ``False``, ``True``, or a kwarg dictionary. + - If the function returns ``False`` or an empty dictionary, then the module + does not have the API applied. + - If the function returns ``True``, then the module has the API applied + with the root's kwargs. + - If the function returns a non-empty dictionary, then the module has the + API applied, and the dictionary overrides the root's kwargs. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = init_transformer_model(...) + >>> def lambda_fn(module: nn.Module): + >>> if module is model.lm_head: + >>> return {"sharding_strategy": ShardingStrategy.SHARD_GRAD_OP} + >>> elif isinstance(module, TransformerBlock): + >>> return True + >>> return False + >>> policy = CustomPolicy(lambda_fn) + >>> fsdp_model = FSDP(model, auto_wrap_policy=policy) + """ + + def __init__(self, lambda_fn: Callable[[nn.Module], Union[bool, Dict[str, Any]]]): + self._lambda_fn = lambda_fn + + def _run_policy( + self, + root_module: nn.Module, + ignored_modules: Set[nn.Module], + root_kwargs: Dict[str, Any], + ) -> Dict[nn.Module, Dict[str, Any]]: + target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {} + for module in root_module.modules(): + if module in ignored_modules: + continue + res = self._lambda_fn(module) + if not isinstance(res, (dict, bool)): + raise ValueError( + "The lambda_fn passed to CustomPolicy should return " + f"False/True or a kwarg dict, but it returned {res}" + ) + if not res: + continue + kwargs = copy.copy(root_kwargs) + if isinstance(res, dict): + # Override the root kwargs with the ones specified by the + # lambda function + kwargs.update(res) + target_module_to_kwargs[module] = kwargs + return target_module_to_kwargs + + +def lambda_auto_wrap_policy( + module: nn.Module, recurse: bool, nonwrapped_numel: int, lambda_fn: Callable +) -> bool: + """ + A convenient auto wrap policy to wrap submodules based on an arbitrary user + function. If `lambda_fn(submodule) == True``, the submodule will be wrapped as + a `wrapper_cls` unit. + + Return if a module should be wrapped during auto wrapping. + + The first three parameters are required by :func:`_recursive_wrap`. + + Args: + module (nn.Module): Current module being considered. + recurse (bool): If ``False``, then this function must decide whether + ``module`` should be wrapped as an FSDP instance or not. If + ``True``, then the function is still recursing down the module + tree as a part of the DFS. + nonwrapped_numel (int): Parameter numel not yet wrapped. + + lambda_fn (Callable[[nn.Module], bool]): If this returns ``True``, then + this module will be wrapped. + """ + if recurse: + return True # always recurse + return lambda_fn(module) + + +def transformer_auto_wrap_policy( + module: nn.Module, + recurse: bool, + nonwrapped_numel: int, + transformer_layer_cls: Set[Type[nn.Module]], +) -> bool: + """ + See :func:`_module_wrap_policy`, where ``transformer_layer_cls`` is the + same as ``module_classes``. Note that shared parameters must be wrapped in + the same FSDP instance, so this auto wrap policy can help wrap shared + embeddings into the same FSDP instance for transformer models. + """ + return _module_wrap_policy(module, recurse, nonwrapped_numel, transformer_layer_cls) + + +def _wrap_module_cls_individually( + module: nn.Module, module_classes: Sequence[type], recurse: bool, *args, **kwargs +): + if recurse: + # always recurse + return True + else: + # if not recursing, decide whether we should wrap based on whether the type of module + # is in `module_classes`. + return isinstance(module, tuple(module_classes)) + + +def _or_policy( + module: nn.Module, + recurse: bool, + nonwrapped_numel: int, + policies, +) -> bool: + """ + A policy that wraps ``module`` if any policy in the passed in iterable of + ``policies`` returns ``True``. + """ + return any( + policy(module=module, recurse=recurse, nonwrapped_numel=nonwrapped_numel) + for policy in policies + ) + + +def size_based_auto_wrap_policy( + module: nn.Module, + recurse: bool, + nonwrapped_numel: int, + # Additional custom arguments + min_num_params: int = int(1e8), + force_leaf_modules: Optional[Set[Type[nn.Module]]] = None, + exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None, +) -> bool: + """ + A size-based auto wrap policy. + + Args: + module (nn.Module): Current module being considered. + recurse (bool): If ``False``, then this function must decide whether + ``module`` should be wrapped as an FSDP instance or not. If + ``True``, then the function is still recursing down the module + tree as a part of the DFS. + nonwrapped_numel (int): Parameter numel not yet wrapped. + + min_num_params (int): Customizable policy input that controls the size + threshold over which a module is ready to be wrapped. This is in + units of numel. + force_leaf_modules (Set[Type[nn.Module]]): Set of module types to keep + as leaves, i.e. their children will never be wrapped. + exclude_wrap_modules (Set[Type[nn.Module]]): Set of module types to be + excluded in wrapping. + + Returns: + Whether ``module`` should be wrapped. + """ + force_leaf_modules = ( + size_based_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore[attr-defined] + if force_leaf_modules is None + else force_leaf_modules + ) + exclude_wrap_modules = ( + size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore[attr-defined] + if exclude_wrap_modules is None + else exclude_wrap_modules + ) + + # Keep the argument `min_num_params` for BC for now, but it represents the + # minimum non-wrapped *numel* before triggering a wrapping + min_nonwrapped_numel = min_num_params + is_large = nonwrapped_numel >= min_nonwrapped_numel + if recurse: + # We should recurse if the module is big enough but not in force_leaf_modules list. + return is_large and not isinstance(module, tuple(force_leaf_modules)) + else: + # If we are not recursing, determine if we should wrap. + return is_large and not isinstance(module, tuple(exclude_wrap_modules)) + + +# Set those defaults to the size_based_auto_wrap_policy function. Make them easy to be imported. +size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES = {nn.ModuleList, nn.ModuleDict} # type: ignore[attr-defined] +size_based_auto_wrap_policy.FORCE_LEAF_MODULES = {nn.MultiheadAttention} # type: ignore[attr-defined] + + +@contextlib.contextmanager +def enable_wrap( + *, wrapper_cls: Any, **wrapper_kwargs: Any +) -> Generator[None, None, None]: + """ + Context manager to wrap modules using a wrapper. + + Useful for when you'd like to apply the same configuration arguments to all + child modules that you wrap. A particularly important use case is wrapping + large layers so that they get sharded (in-place) during initialization, to + avoid running out of system memory. Large layers can indicate that they + should be sharded via the ``wrap`` annotation and this context manager can + provide the exact configuration for these nested instances. + + Usage:: + + with enable_wrap(wrapper_cls, **params): + # Wraps layer in FSDP by default if within context + self.l1 = wrap(torch.nn.Linear(5, 5)) + + Args: + wrapper_cls: + Class that `wrap` annotation will `wrap` modules with, such as + `FullyShardedDataParallel`. + **wrapper_kwargs: + Configuration settings that will be passed to all ``wrap`` + instances inside the context + """ + kwargs = { + "wrapper_cls": wrapper_cls, + **wrapper_kwargs, + } + with _ConfigAutoWrap(**kwargs): + yield + + +def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module: + """ + Annotate that a module should be wrapped. Annotated modules will only be + wrapped if inside of an :func:`enable_wrap` context manager. This allows + a module to be initialized both with and without a wrapper without code + change. + + The class that this function wraps the passed in ``nn.Module`` with is the + passed in ``wrapper_cls`` argument into ``enable_wrap``. Both + ``enable_wrap`` and ``wrap`` can take in kwargs specifying how to construct + the ``wrapper_cls`` instance. In the case of duplicate kwargs in + ``enable_wrap`` and ``wrap``, the argument passed into ``wrap`` will be + respected. + + Usage:: + + with enable_wrap(wrapper_cls=FSDP, **fsdp_config): + # Wraps layer in FSDP by default if within context + self.l1 = wrap(torch.nn.Linear(5, 5)) + + Args: + module (nn.Module): module to wrap (if in :func:`enable_wrap` context) + **wrap_overrides: configuration overrides that will take priority over + the values provided by the :func:`enable_wrap` context + """ + if _ConfigAutoWrap.in_autowrap_context: + assert _ConfigAutoWrap.wrapper_cls is not None + + wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides} + return _wrap( + module, + _ConfigAutoWrap.wrapper_cls, + **wrap_overrides, + ) + return module + + +def _wrap(module: nn.Module, wrapper_cls: Callable, **kwargs) -> nn.Module: + assert wrapper_cls is not None + if hasattr(module, "_wrap_overrides"): + # If module has a _wrap_overrides attribute, we force overriding the + # FSDP config with these attributes for this module. Currently this + # is only used to disable mixed precision for BatchNorm when + # auto_wrapping. + overrides = {**kwargs, **module._wrap_overrides} # type: ignore[arg-type] + return wrapper_cls(module, **overrides) + + return wrapper_cls(module, **kwargs) + + +def _recursive_wrap( + module: nn.Module, + auto_wrap_policy: Callable, + wrapper_cls: Callable, + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + only_wrap_children: bool = False, + **kwargs: Any, +) -> Tuple[nn.Module, int]: + """ + Wraps submodules of ``module`` for which ``auto_wrap_policy`` returns + ``True`` with ``wrapper_cls``. + + Args: + module (nn.Module): Module to recursively wrap. + auto_wrap_policy (Callable): A callable representing a policy that + determines which modules to recursively wrap with ``wrapper_cls``. + ignored_modules (Set[torch.nn.Module]): Modules to ignore when + wrapping. + ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when + wrapping; these should be the parameters contained in the modules + in ``ignored_modules``. + Returns: + (nn.Module, int): + ``module`` after wrapping and the numel recursively wrapped. + """ + assert auto_wrap_policy is not None, "Must specify auto_wrap_policy." + assert wrapper_cls is not None, "Must specify wrapper_cls" + # Make sure no child is already wrapped. + for _, child in module.named_modules(): + if child in ignored_modules: + continue + try: + assert not isinstance(child, cast(type, wrapper_cls)) + except TypeError: + # wrapper_cls is a function as opposed to a class type, just bypass above check. + pass + + # We count all params, assuming none of them are already wrapped. + nonwrapped_numel = sum( + p.numel() for p in module.parameters() if p not in ignored_params + ) + + assert auto_wrap_policy is not None + if auto_wrap_policy(module=module, recurse=True, nonwrapped_numel=nonwrapped_numel): + total_wrapped_numel = 0 + # Iterate through the children, recursively wrap if necessary + for name, child in module.named_children(): + if child in ignored_modules: + continue + wrapped_child, num_wrapped_params = _recursive_wrap( + module=child, + auto_wrap_policy=auto_wrap_policy, + wrapper_cls=wrapper_cls, + ignored_modules=ignored_modules, + ignored_params=ignored_params, + **kwargs, + ) + setattr(module, name, wrapped_child) + # Keep track of how many parameters have been wrapped + total_wrapped_numel += num_wrapped_params + # decide if we need to wrap the current module, + # since the left over parameters exceed the number of params to wrap + remainder = nonwrapped_numel - total_wrapped_numel + if not only_wrap_children and auto_wrap_policy( + module=module, recurse=False, nonwrapped_numel=remainder + ): + # Leaf node or final wrapping of the remainder both happen here. + return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel + else: + return module, total_wrapped_numel + return module, 0 + + +class _ConfigAutoWrap: + """ + Helper class to wrap modules based on default config args via a context manager. + See :func:`enable_wrap` for more information. + """ + + in_autowrap_context: bool = False # Context flag + wrapper_cls: Optional[Callable] = None # The wrapper class + kwargs: Dict[str, Any] = {} # Wrapper's args + + def __init__(self, **kwargs: Dict[str, Any]): + self.kwargs = kwargs + + @staticmethod + def enable_autowrap_context(kwargs: Any) -> None: + if _ConfigAutoWrap.in_autowrap_context: + raise NotImplementedError( + "You are already within an autowrap context and we currently do not supported nested autowrap." + ) + _ConfigAutoWrap.in_autowrap_context = True + # Get and save the wrapper cls for the context. + assert ( + "wrapper_cls" in kwargs.keys() + ), "Expected to pass in wrapper_cls arg into _ConfigAutoWrap." + _ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"]) + del kwargs["wrapper_cls"] + # Save the rest. + _ConfigAutoWrap.kwargs = kwargs + + @staticmethod + def disable_autowrap_context() -> None: + _ConfigAutoWrap.in_autowrap_context = False + _ConfigAutoWrap.wrapper_cls = None + _ConfigAutoWrap.kwargs = {} + + def __enter__(self) -> None: + self.enable_autowrap_context(self.kwargs) + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.disable_autowrap_context() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc82f0692c15eb4fd77a2ddfcb551021300006c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py @@ -0,0 +1,7 @@ +import warnings +warnings.warn( + "torch.distributed.pipeline is deprecated. For up-to-date pipeline parallel " + "implementation, please refer to the PiPPy library under the PyTorch " + "organization (Pipeline Parallelism for PyTorch): " + "https://github.com/pytorch/PiPPy" +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a79b8361d9d0cf035948199f92bf82cb5308afa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..932408ad7c0dba87bf79fcb09b99d8eef117b74a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d980ef00619007267afe0f8a0eea54fedb8cc346 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2baa8cede404b0459a62b2241eb737750b0daa2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58e086dc48b9d47b5303603510a4003b302d36ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..777e6ded165309ed735a6c6341c77a35473cca2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c6d7447c5787e92962bb5bcb55cec14869aaf26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d93a2a20885243f2bfce2c0958ae2a1f68bbddc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b11e28897f86a96f2486a9a3583b29a709c69385 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69ae858e4924cf4db8473b5c47a5985d608e7d5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..923b736607d7b3191a38dd4394e08225f275693f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..990550414ca47a680c9fe2b30c0817ad4bd9bee5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from torch.distributed.tensor.parallel.api import parallelize_module + +from torch.distributed.tensor.parallel.loss import loss_parallel +from torch.distributed.tensor.parallel.style import ( + ColwiseParallel, + ParallelStyle, + PrepareModuleInput, + PrepareModuleOutput, + RowwiseParallel, + SequenceParallel, +) + +__all__ = [ + "ColwiseParallel", + "ParallelStyle", + "PrepareModuleInput", + "PrepareModuleOutput", + "RowwiseParallel", + "SequenceParallel", + "parallelize_module", + "loss_parallel" +] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..084a9122cfccc2fdfe5384822d9cc7dce630aff7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30ba5fbea13f9674ceaaf61fcdddfc11aa2b6cc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ed29c21229d408a31e47f7ea46608b7dada0571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05e4d322b459720495f16f540a95cb24e510b090 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1eea6a03fc48ca6b1e058db6a2c5454ca40e222 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61cf824b7b7b299ac8eb2e9a652c38b1a1ee4419 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2de97b212bedb832fbdead53c6d88c0bf1f93766 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fa0d6e0e039fba6ba9839e6b902bde852d18fe7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..129976fd3e176af17f004c68afc3e49fe95c6365 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1ebfd53ab7b3bb88ed496435747dd1fd9ac8ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py @@ -0,0 +1,51 @@ +from functools import partial +from typing import no_type_check, Optional, Tuple + +import torch +from torch.distributed._functional_collectives import AsyncCollectiveTensor +from torch.distributed._tensor import DTensor +from torch.distributed._tensor.placement_types import DTensorSpec + + +@no_type_check +def sync_grad_hook(grad, *, device_handle=None, compute_stream=None): + if isinstance(grad, AsyncCollectiveTensor): + if compute_stream is not None: + with device_handle.stream(compute_stream): + grad = grad.wait() + else: + grad = grad.wait() + + return grad + + +def _flatten_tensor( + tensor: torch.Tensor, +) -> Tuple[torch.Tensor, Optional[DTensorSpec]]: + if isinstance(tensor, DTensor): + tensor._local_tensor.requires_grad_() + return tensor._local_tensor, tensor._spec + return tensor, None + + +@no_type_check +def _unflatten_tensor(tensor, spec, *, device_handle=None, compute_stream=None): + # unflatten would mainly be called everytime FSDP allgather parameters. + result = DTensor.from_local( + tensor, + spec.mesh, + spec.placements, + run_check=False, + shape=spec.shape, + stride=spec.stride, + ) + if tensor.requires_grad: + # only register the hook if the tensor requires grad + tensor.register_hook( + partial( + sync_grad_hook, + device_handle=device_handle, + compute_stream=compute_stream, + ) + ) + return result diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc37003fb2d0490d18fdb08cba77e239e7ef4c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py @@ -0,0 +1,60 @@ +import warnings +from typing import Tuple, Union + +from torch.distributed._tensor import DeviceMesh +from torch.distributed._tensor.placement_types import Placement +from torch.distributed.device_mesh import _mesh_resources +try: + from torch._dynamo.external_utils import is_compiling as is_torchdynamo_compiling +except Exception: + def is_torchdynamo_compiling(): # type: ignore[misc] + return False + +LayoutsType = Union[Placement, Tuple[Placement, ...]] + + +def _deprecate_warnings(func_name: str, extra_msg: str) -> None: + """ + Inject common validation logics for `_prepare_input` funcs via this decorator. + + Include verifying that input needs to be either a :class:`Tensor` or :class:`DTensor` + and only 1D :class:`DeviceMesh` is passed in. + """ + # TODO: Will follow up with dynamo POC to make warnings.warn working with dynamo. + if not is_torchdynamo_compiling(): + warnings.warn(f"{func_name} is deprecated and will be removed soon. {extra_msg}") + + +def _validate_tp_mesh_dim( + device_mesh: DeviceMesh, +) -> None: + """ + Check whether TP mesh dimension is valid or not. + + Args: + device_mesh (:class:`DeviceMesh`): + The `device_mesh` where we perform + Tensor Parallelism on. + + Return: + `True` if the mesh dimension + is valid, `False` otherwise. + """ + if device_mesh.ndim > 1: + raise ValueError(f"Tensor Parallel only accepts a 1D DeviceMesh, but found {device_mesh.ndim}D!" + "If you have a 2-D or N-D device_mesh, consider passing in device_mesh[\"tp\"]") + + parent_mesh = _mesh_resources.get_parent_mesh(device_mesh) + if parent_mesh: + if parent_mesh.ndim != 2: + raise RuntimeError( + f"Found TP device_mesh has a parent mesh with dims {parent_mesh.ndim}", + "Currently we only support 2D TP composition with DP.", + ) + + tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(device_mesh) + if tp_mesh_dim != 1: + raise RuntimeError( + f"Found TP device_mesh on the {tp_mesh_dim} dimension of its parent mesh.", + "Currently we only support intranode TP and TP needs to be the innermost dimension on its parent mesh.", + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a8e2e5bc1bfdfd29d61048147286d0256de4c9e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Dict, Union + +import torch +import torch.distributed._tensor.random as random +import torch.nn as nn +from torch.distributed._tensor import ( + DeviceMesh, +) +from torch.distributed._tensor.random import ( + is_rng_supported_mesh, + TensorParallelRNGTracker, +) +from torch.distributed.tensor.parallel._utils import _validate_tp_mesh_dim +from torch.distributed.tensor.parallel.style import ( + ParallelStyle, +) + + +__all__ = [ + "parallelize_module", +] + + +def parallelize_module( # type: ignore[return] + module: nn.Module, + device_mesh: DeviceMesh, + parallelize_plan: Union[ParallelStyle, Dict[str, ParallelStyle]], +) -> nn.Module: + """ + Apply Tensor Parallelism in PyTorch by parallelizing modules or sub-modules based on a user-specified plan. + + We parallelize module or sub_modules based on a parallelize_plan. The parallelize_plan contains + :class:`ParallelStyle`, which indicates how user wants the module or sub_module + to be parallelized. + + User can also specify different parallel style per module fully qualified name (FQN). + + Note that ``parallelize_module`` only accepts a 1-D :class:`DeviceMesh`, if you have a 2-D or N-D :class:`DeviceMesh`, + slice the DeviceMesh to a 1-D sub DeviceMesh first then pass to this API(i.e. ``device_mesh[\"tp\"]``) + + Args: + module (:class:`nn.Module`): + Module to be parallelized. + device_mesh (:class:`DeviceMesh`): + Object which describes the mesh topology + of devices for the DTensor. + parallelize_plan (Union[:class:`ParallelStyle`, Dict[str, :class:`ParallelStyle`]]): + The plan used to parallelize the module. It can be either a + :class:`ParallelStyle` object which contains how + we prepare input/output for Tensor Parallelism or it can be a + dict of module FQN and its corresponding :class:`ParallelStyle` object. + Return: + A :class:`nn.Module` object parallelized. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import parallelize_module, ColwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> + >>> # Define the module. + >>> m = Model(...) + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> m = parallelize_module(m, tp_mesh, {"w1": ColwiseParallel(), "w2": RowwiseParallel()}) + >>> + + .. note:: For complex module architecture like Attention, MLP layers, we recommend composing + different ParallelStyles together (i.e. ``ColwiseParallel`` and ``RowwiseParallel``) and pass + as a parallelize_plan, to achieves the desired sharding computation. + """ + torch._C._log_api_usage_once("torch.distributed.tensor.parallel.parallelize_module") + + _validate_tp_mesh_dim(device_mesh) + + # instantiate a TP RNG state tracker if it's not there + if is_rng_supported_mesh(device_mesh) and not isinstance( + random._rng_tracker, TensorParallelRNGTracker + ): + random._rng_tracker = TensorParallelRNGTracker(device_mesh.device_type) + # TODO: we should allow user to pass in the default seed from a config + random._rng_tracker._manual_seed(device_mesh, base_seed=1234) + # By default we execute random ops in non-tensor-parallel region. If users want + # to execute in tensor-parallel region, they can manually set this field to True + # after parallelizing the model. + random._rng_tracker.distribute_region_enabled = False + + if isinstance(parallelize_plan, ParallelStyle): + return parallelize_plan._apply(module, device_mesh) + elif isinstance(parallelize_plan, dict): + for module_path, parallelize_style in parallelize_plan.items(): + sub_module = module.get_submodule(module_path) + parent_module = module + if "." in module_path: + parent_module_path = ".".join(module_path.split(".")[:-1]) + parent_module = module.get_submodule(parent_module_path) + module_path = module_path.split(".")[-1] + parent_module.register_module( # type: ignore[call-arg] # pyre-ignore[20] + module_path, + parallelize_module( # type: ignore[arg-type] + sub_module, device_mesh, parallelize_style # type: ignore[arg-type] # pyre-ignore[6] + ), + ) + return module + else: + raise RuntimeError( # pyre-ignore[7] + "Expect Union[ParallelStyle, Dict[str, ParallelStyle]] for" + f" parallelize_plan, {type(parallelize_plan)} found!" + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py new file mode 100644 index 0000000000000000000000000000000000000000..474e542551ae90348d8f7e0a856e210ae24d022a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py @@ -0,0 +1,96 @@ +from typing import Any, List, Tuple + +import torch.nn as nn +from torch.distributed.tensor.parallel._data_parallel_utils import ( + _flatten_tensor, + _unflatten_tensor, +) + +__all__ = [] # type: ignore[var-annotated] + + +def _get_submodule_n_params(module: nn.Module, path: str): + """ + Get submodule and the direct path of parameter from the module + """ + if "." in path: + path_list = path.split(".") + parent_module_path = ".".join(path_list[:-1]) + module = module.get_submodule(parent_module_path) + path = path_list[-1] + return module, path + + +def _update_module_param(param_list: List[Tuple[nn.Module, str, nn.Parameter]]): + """ + Update parameters within the module + """ + for item in param_list: + parent_module, module_path, t = item + assert hasattr(parent_module, module_path) + delattr(parent_module, module_path) + setattr(parent_module, module_path, t) + + +def _reconstruct_dtensor(module: nn.Module, _input: Any): + """ + Recontruct DTensor parameters from local tensors + """ + param_list = [] + # TODO: To add perf optimizations to this iterations + for name, t in module.named_parameters(): + if hasattr(t, "_st_info"): + dtensor = _unflatten_tensor(t, t._st_info) + param_list.append((*_get_submodule_n_params(module, name), dtensor)) + _update_module_param(param_list) # type: ignore[arg-type] + + +def _localize_dtensor(module: nn.Module, *_: Any): + """ + Convert DTensor parameters to local tensors + """ + param_list = [] + for name, param in module.named_parameters(): + t, sharding_info = _flatten_tensor(param) + if sharding_info is not None: + t = nn.Parameter(t) + t._st_info = sharding_info # type: ignore[attr-defined] + param_list.append((*_get_submodule_n_params(module, name), t)) + _update_module_param(param_list) # type: ignore[arg-type] + + +def _pre_dp_module_transform(module: nn.Module): + """ + Enable the composability between Tensor Parallelism (TP) and Data + Parallelism(DP) in PyTorch when using DDP. We need to convert Parameters which + are DTensors to local tensors before wrapping with data parallelism API. + We then register two hooks, one for converting local tensors back to DTensor + preforward and one to convert DTensors back to tensors after Forward. By + integrating this way, we avoid any special handling of DTensor parameters by DDP + and get DTensor's gradients propagated back to DP, e.g. gradient buckets of DDP. + + For now, this API only works with ``DistributedDataParallel``. It will later support + other DP methods such as FSDP. + + Args: + module (:class:`nn.Module`): + Module which has been applied TP on. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import parallelize_module, PairwiseParallel + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> from torch.distributed.tensor.parallel.ddp import pre_dp_module_transform + >>> + >>> # Define the module. + >>> m = module(...) + >>> parallelize_module(m, PairwiseParallel()) + >>> m = pre_dp_module_transform(m) + >>> m = DDP(m) + >>> + """ + + _localize_dtensor(module, None, None) + # TODO: To add test cases and ensure that it works for nested modules + module.register_forward_pre_hook(_reconstruct_dtensor) + module.register_forward_hook(_localize_dtensor) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..7b8d0af39bc2b48289696a52923995d4f488f446 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py @@ -0,0 +1,391 @@ +import copy +from typing import Any, cast, List, Optional, Tuple + +import torch +import torch.distributed as dist + +import torch.distributed._shard.sharding_spec as shard_spec +import torch.distributed.distributed_c10d as c10d +from torch.distributed._shard.sharded_tensor import ( + Shard, + ShardedTensor, + ShardedTensorMetadata, + TensorProperties, +) + +from torch.distributed._shard.sharding_spec import ShardMetadata +from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec +from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard as DShard +from torch.distributed.device_mesh import _mesh_resources + +from torch.distributed.fsdp._common_utils import _set_fsdp_flattened +from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions +from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor +from torch.distributed.remote_device import _remote_device +from torch.distributed.tensor.parallel._data_parallel_utils import ( + _flatten_tensor, + _unflatten_tensor, +) + +__all__ = ["DTensorExtensions"] + + +def _get_box(tensor: DTensor) -> Tuple[torch.Size, torch.Size]: + device_mesh = tensor.device_mesh + assert device_mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + placement = tensor.placements[0] + offsets = [0] * len(tensor.size()) + num_chunks = device_mesh.size(mesh_dim=0) + + if tensor.placements[0].is_shard(): + shard_dim = cast(DShard, placement).dim + chunk_size = tensor.size(shard_dim) // num_chunks + offsets[shard_dim] = chunk_size + + return (torch.Size(offsets), tensor._local_tensor.size()) + + +def _get_box_for(tensor: DTensor, idx: int) -> Tuple[torch.Size, torch.Size]: + offsets, size = _get_box(tensor) + return (torch.Size([val * idx for val in offsets]), size) + + +def _get_local_box(tensor: DTensor) -> Tuple[torch.Size, torch.Size]: + device_mesh = tensor.device_mesh + coord = device_mesh.get_coordinate() + assert coord is not None + return _get_box_for(tensor, coord[0]) + + +def _create_shard_md_from_dt(dt: DTensor, current_rank: int) -> ShardMetadata: + mesh = dt.device_mesh + assert mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + offsets, sizes = _get_local_box(dt) + return ShardMetadata( + shard_offsets=list(offsets), + shard_sizes=list(sizes), + placement=f"rank:{current_rank}/{dt._local_tensor.device}", + ) + + +def _create_sharded_tensor_md_from_dt( + dt: DTensor, dt_pg: c10d.ProcessGroup +) -> ShardedTensorMetadata: + # This is where it gets tricky, we have to produce a ShardedTensor that has full coverage + # and yet has only one valid shard for the current rank. + + shards_md = [] + my_rank = dist.get_rank(dt_pg) + scapegoat_rank = 0 if my_rank > 0 else 1 + + if dt.placements[0].is_shard(): + shard_count = dt_pg.size() + else: + shard_count = 1 + + for i in range(shard_count): + offsets, sizes = _get_box_for(dt, i) + shards_md.append( + ShardMetadata( + shard_offsets=list(offsets), + shard_sizes=list(sizes), + placement=( + f"rank:{scapegoat_rank if i > 0 else my_rank}/{dt._local_tensor.device}" + ), + ) + ) + + return ShardedTensorMetadata( + shards_metadata=shards_md, + size=dt.size(), + tensor_properties=TensorProperties( + dtype=dt.dtype, + layout=dt.layout, + requires_grad=dt.requires_grad, + # ignore memory_format and pin_memory as those are not supported by DT + ), + ) + + +def _get_dt_pg(dt: DTensor) -> c10d.ProcessGroup: + mesh = dt.device_mesh + assert mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + dim_groups = mesh.get_group() + assert isinstance(dim_groups, list) + return dim_groups[0] + + +def _rewrite_spec_if_needed( + spec: shard_spec.ShardingSpec, tensor: torch.Tensor, rank: int +) -> shard_spec.ShardingSpec: + """ + Rewrite ``spec`` to match the device of ``tensor``. + + FSDP.sharded_optim_state_dict sneakly ships optimizer state to CPU so if the original ShardingSpec + produces CUDA metadata, ST construction bombs. + """ + if not isinstance(spec, ChunkShardingSpec): + return spec + + # let's see if we need + rewrite = False + for p in spec.placements: + p = cast(_remote_device, p) + if p.rank() == rank and p.device() != tensor.device: + rewrite = True + break + if rewrite: + spec = copy.deepcopy(spec) + for i, placement in enumerate(spec.placements): + placement = cast(_remote_device, placement) + if placement.rank() == rank and placement.device() != tensor.device: + spec.placements[i] = _remote_device(f"rank:{rank}/{tensor.device}") + + return spec + + +def _chunk_tensor( + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, +) -> torch.Tensor: + if type(tensor) is ShardedTensor: + assert len(tensor.local_shards()) == 1 + + inner_param = tensor.local_tensor() + inner_st = _create_chunk_sharded_tensor( + inner_param, + rank, + world_size, + num_devices_per_node, + pg, + ) + + outer_local_shard = tensor.local_shards()[0] + shards: List[Shard] = [ + Shard(inner_st, copy.deepcopy(outer_local_shard.metadata)) + ] + st_meta = copy.deepcopy(tensor.metadata()) + st_meta.tensor_properties.requires_grad = False + + st_outer = ShardedTensor._init_from_local_shards_and_global_metadata( + shards, + sharded_tensor_metadata=st_meta, + process_group=tensor._process_group, + init_rrefs=False, + ) + return st_outer + elif type(tensor) is DTensor: + device_mesh = tensor.device_mesh + assert device_mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + inner_param = tensor._local_tensor + + inner_st = _create_chunk_sharded_tensor( + inner_param, + rank, + world_size, + torch.cuda.device_count(), + pg, + ) + + dt_pg = _get_dt_pg(tensor) + # We do this differently here, we create a ST with no local shards then patch it + shards = [ + Shard(inner_st, _create_shard_md_from_dt(tensor, dist.get_rank(dt_pg))) + ] + + st_meta = _create_sharded_tensor_md_from_dt(tensor, dt_pg) + st_meta.tensor_properties.requires_grad = False + + st_outer = ShardedTensor._init_from_local_shards_and_global_metadata( + shards, + sharded_tensor_metadata=st_meta, + process_group=dt_pg, + init_rrefs=False, + ) + + return st_outer + else: + return _create_chunk_sharded_tensor( + tensor, + rank, + world_size, + num_devices_per_node, + pg, + ) + + +def _chunk_dtensor( + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, +) -> DTensor: + """ + Shard a tensor to chunks along the first dimension. + + The local rank will gets its corresponding chunk as the local tensor to create a DTensor. + """ + parent_mesh = _mesh_resources.get_parent_mesh(device_mesh) + if parent_mesh is None: + raise RuntimeError("No parent device_mesh is found for FSDP device_mesh.") + if parent_mesh.ndim < 2: + raise RuntimeError( + f"Found parent device_mesh of ndim={parent_mesh.ndim},", + "but meshes must be at least 2D.", + ) + + # We need to explicitly call .detach() to return a new tensor detached from the current graph. + tensor = tensor.clone().detach() + + # When a layer is not involved in TP, then the tensor will not be a DTensor. + # e.g. When a layer is not sppecified in the parallelize_plan, TP will have no effect on the layer. + # e.g. When you do PairwiseParallel on a 3 layer model, TP will have no effect on the third layer. + if isinstance(tensor, torch.Tensor) and not isinstance(tensor, DTensor): + + # For tensors, it is replicated across tp dimension and sharded across FSDP dimension. + # TP is the inner dimension and FSDP is the outer dimension. + # Therefore, shard placements for tensor is (Shard(0), Replicate()). + replicate_placements = [Replicate() for _ in range(parent_mesh.ndim)] + shard_placements = [Replicate() for _ in range(parent_mesh.ndim)] + shard_placements[0] = DShard(0) # type: ignore[call-overload] + + return DTensor.from_local( + tensor, parent_mesh, replicate_placements + ).redistribute( + device_mesh=parent_mesh, + placements=shard_placements, + ) + + else: + tp_placements = tensor.placements + tp_placement = tp_placements[0] + + tensor = tensor.to_local() + + # For DTensors, it is sharded across tp dimension first and then sharded across FSDP dimension. + # TP is the inner dimension and FSDP is the outer dimension. + # Therefore, shard placements for tensor is (Shard(0), tp_placement). + # For higher dimensional meshes, it is replicated across other dimensions. For example, with + # HSDP the shard placements for tensor is (Replicate, Shard(0), tp_placement). + replicate_placements = [Replicate() for _ in range(parent_mesh.ndim)] + replicate_placements[-1] = tp_placement # type: ignore[call-overload] + shard_placements = [Replicate() for i in range(parent_mesh.ndim)] # type: ignore[misc] + shard_placements[-2] = DShard(0) # type: ignore[call-overload] + shard_placements[-1] = tp_placement # type: ignore[call-overload] + + return DTensor.from_local( + tensor, parent_mesh, replicate_placements + ).redistribute( + device_mesh=parent_mesh, + placements=shard_placements, + ) + + +def _pre_load_state_dict( + tensor: torch.Tensor, +) -> Tuple[torch.Tensor, List[Shard]]: + shards = cast(ShardedTensor, tensor).local_shards() + if len(shards) == 1 and type(shards[0].tensor) is ShardedTensor: + inner_tensor = shards[0].tensor + shards = inner_tensor.local_shards() # pyre-ignore[16] + tensor = inner_tensor + + return (tensor, shards if len(shards) > 0 else []) + + +def _all_gather_dtensor( + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], +) -> torch.Tensor: + """All gather a DTensor in its FSDP dimension and return the local tensor.""" + assert parent_mesh == tensor.device_mesh + + placements = list(copy.deepcopy(tensor.placements)) + # FSDP + TP: [Shard(0), tp_placement] -> [Replicate(), tp_placement] + # HSDP + TP: [Replicate(), Shard(0), tp_placement] -> [Replicate(), Replicate(), tp_placement] + for i in range(0, len(placements) - 1): + placements[i] = Replicate() + tensor = tensor.redistribute( + device_mesh=tensor.device_mesh, + placements=placements, + ) + + return tensor.to_local() + + +class DTensorExtensions(FSDPExtensions): + """ + DTensorExtension is the TensorFlattener extension needed for 2D FSDP + TP. + + This is the implementation for FSDPExtensions defined in + https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/_fsdp_extensions.py + """ + def __init__(self, device_handle) -> None: + super().__init__() + self.compute_stream = None + self.device_handle = device_handle + # we have to use the dynamo disable this way to disable dynamo as the decorater way would + # trigger build failure with torch deploy... + self.post_unflatten_transform = torch._dynamo.disable(self.post_unflatten_transform) # type: ignore[method-assign] + + def pre_flatten_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, Optional[Any]]: + return _flatten_tensor(tensor) + + def post_unflatten_transform( + self, tensor: torch.Tensor, param_extension: Any + ) -> torch.Tensor: + stream = self.compute_stream or self.device_handle.current_stream() + with self.device_handle.stream(stream): + # runtime we put the unflattened tensor call on the compute stream since + # the unflattened tensor might contain computations in fwd/bwd where we + # need to sync properly. + # TODO: this is a short term fix and we should make the get_unflat_views + # directly happen in the compute stream. + result = _unflatten_tensor( + tensor, + param_extension, + device_handle=self.device_handle, + compute_stream=self.compute_stream + ) + _set_fsdp_flattened(result) + return result + + def chunk_tensor( + self, + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, + device: Optional[torch.device] = None, + ) -> torch.Tensor: + return _chunk_tensor(tensor, rank, world_size, num_devices_per_node, pg) + + def chunk_dtensor( + self, + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, + ) -> torch.Tensor: + return _chunk_dtensor(tensor, rank, device_mesh) + + def pre_load_state_dict_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, List[Shard]]: + return _pre_load_state_dict(tensor) + + def all_gather_dtensor( + self, + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], + ) -> torch.Tensor: + return _all_gather_dtensor(tensor, parent_mesh) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea97846e313a40a3e8c6a302244361146a8fd67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from functools import partial +from typing import Any, Optional, Tuple + +import torch +from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard + +__all__ = [ + "input_reshard", +] + + +def input_reshard( + module: torch.nn.Module, + tp_device_mesh: DeviceMesh, + input_reshard_dim: Optional[int] = None, +) -> torch.nn.Module: + """ + Register hooks to an nn.Module for input resharding, enabling sharding and restoration during backward computation. + + Register hooks to an nn.Module with input resharding so that we can shard + per the given `tp_device_mesh` and `input_reshard_dim` and restore the + input back when recomputing the activations in the backward. The reason + why we can do this is that for Tensor Parallel(TP), the input are same + across all TP ranks. + + Args: + module (:class:`nn.Module`): + Module to be registered with input resharding. + tp_device_mesh (:class:`DeviceMesh`): + Object which describes the mesh topology + of devices for Tensor Parallel. + input_reshard_dim (Optional[int]): + The dimension of where we perform the sharding + of input. If set None, there is no sharding of input. + Default: None + + Return: + A :class:`nn.Module` object registered with TP input resharding. + """ + cx: Optional[torch.autograd.graph.saved_tensors_hooks] = None + + def input_reshard_forward_pre_hook(_: torch.nn.Module, _i: Tuple[Any, ...]) -> None: + saved_tensor_hooks = torch.autograd.graph.saved_tensors_hooks( + partial(_pack_hook_tp, tp_device_mesh, input_reshard_dim), + partial(_unpack_hook_tp, tp_device_mesh, input_reshard_dim), + ) + saved_tensor_hooks.__enter__() + nonlocal cx + cx = saved_tensor_hooks # type: ignore[name-defined] + + def input_reshard_backward_hook(_: torch.nn.Module, _i: Tuple[Any, ...], _o: Any) -> Any: + nonlocal cx + cx.__exit__() # type: ignore[name-defined, union-attr] + + if input_reshard_dim is None: + return module + module.register_forward_pre_hook(input_reshard_forward_pre_hook) + module.register_forward_hook(input_reshard_backward_hook) + return module + + +def _pack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: torch.Tensor) -> Any: # noqa: D401 + """Hook function called after FWD to shard input.""" + if isinstance(x, DTensor) and all(p.is_replicate() for p in x._spec.placements): + return x.redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)]) + elif ( + not isinstance(x, DTensor) + and isinstance(x, torch.Tensor) + and x.numel() >= mesh.size() + ): + return ( + DTensor.from_local(x, device_mesh=mesh) + .redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)]) + .to_local() + ) + else: + return x + + +def _unpack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: Any) -> torch.Tensor: # noqa: D401 + """Hook function called before activation recomputing in BWD to restore input.""" + if ( + isinstance(x, DTensor) + and len(x._spec.placements) == 1 + and x._spec.placements[0].is_shard() + ): + return x.redistribute(device_mesh=mesh, placements=[Replicate()]) + elif ( + not isinstance(x, DTensor) + and isinstance(x, torch.Tensor) + and x.numel() >= mesh.size() + ): + return ( + DTensor.from_local( + x, device_mesh=mesh, placements=[Shard(input_reshard_dim)] + ) + .redistribute(device_mesh=mesh, placements=[Replicate()]) + .to_local() + ) + else: + return x diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..f7144a38e92328fc69883af815e0e7bbc6a83d8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py @@ -0,0 +1,484 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import contextlib +from typing import cast, Dict, Optional, Tuple + +import torch +import torch._prims_common as utils +import torch.distributed._functional_collectives as funcol +import torch.distributed.distributed_c10d as c10d +from torch import Tensor +from torch.distributed._tensor import DTensor, Replicate, Shard +from torch.distributed._tensor.ops.embedding_ops import _MaskPartial +from torch.distributed._tensor.ops.math_ops import ( + _skip_dim, + Reduction, + replicate_reduction_dims, +) +from torch.distributed._tensor.placement_types import Placement, TensorMeta +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +__all__ = ["loss_parallel"] + + +@contextlib.contextmanager +def loss_parallel(): + """ + A context manager that enables loss parallelism, where efficient parallelized loss computation + can be performed when the input is sharded on the class dimension. Currently only the cross-entropy + loss is supported. + + Within this context manager, one can use :func:`~torch.nn.functional.cross_entropy` or + :class:`~torch.nn.CrossEntropyLoss` as usual, with the following assumptions on the input parameters. + The corresponding ``backward()`` call, if any, also needs to happen under this context manager. + + Args: + input (:class:`DTensor`): + Input logits. Assumed to be sharded on the class dimension. + target (Union[:class:`torch.Tensor`, :class:`DTensor`]): + Must be ground truth class indices (class probabilities currently not supported). + Assumed to be replicated across the ``DeviceMesh``. + weight (Union[:class:`torch.Tensor`, :class:`DTensor`], optional): + If given, assumed to be replicated across the ``DeviceMesh``. + label_smoothing: + Currently not supported. + + Returns: + A replicated :class:`DTensor`. + + Example: + A sharded DTensor is manually created here to showcase the usage. + In practice, it is usually the output of a TP module. + + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import loss_parallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> device_mesh = init_device_mesh("cuda", (8,)) + >>> input = torch.randn(4, 16, device="cuda", requires_grad=True) + >>> dist_input = distribute_tensor(input, device_mesh, placements=[Shard(1)]) + >>> target = torch.randint(16, (4,), device="cuda") + >>> with loss_parallel(): + >>> loss = F.cross_entropy(dist_input, target, reduction="mean") + >>> loss.backward() + >>> ... + """ + _enable_custom_loss_ops() + + yield + + _disable_custom_loss_ops() + + +# Currently only needs to support one dimensional DeviceMesh; in general return +# the mesh_dim with placements[mesh_dim].is_shard(dim) +def _find_all_reduce_mesh_dim(placements: Tuple[Placement, ...], dim: int) -> int: + if not len(placements) == 1: + raise ValueError( + "Currently loss_parallel() only supports input on one-dimensional DeviceMesh." + ) + if not placements[0].is_shard(dim): + raise ValueError( + f"loss_parallel() should be enabled only when the input tensor is sharded on dimension {dim}." + ) + return 0 + + +def _cast_to_dtensor( + tensor, placements: Tuple[Placement, ...], mesh: DeviceMesh +) -> DTensor: + if isinstance(tensor, DTensor): + if tensor.placements == placements: + return tensor + else: + raise RuntimeError(f"Expected {placements} but got {tensor.placements}.") + elif isinstance(tensor, torch.Tensor): + return DTensor.from_local( + tensor, device_mesh=mesh, placements=placements, run_check=False + ) + else: + raise TypeError(f"Unsupported type {type(tensor)}") + + +def _propagate_tensor_meta( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> TensorMeta: + op_info = DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + tensor_meta = DTensor._op_dispatcher.sharding_propagator._propagate_tensor_meta( + op_info.schema + ) + if isinstance(tensor_meta, TensorMeta): + return tensor_meta + elif isinstance(tensor_meta, tuple): + return tensor_meta[0] + else: + raise RuntimeError(f"Unexpected tensor meta type: {type(tensor_meta)}.") + + +# NOTE: The implementation follows torch._decomp.decomposition._log_softmax, +# with all_reduce manually inserted to perform distributed computation. +def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): + x = x.contiguous() + if half_to_float: + assert x.dtype == torch.half + computation_dtype, result_dtype = utils.elementwise_dtypes( + x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + x = x.to(computation_dtype) + if x.numel() == 0: + shifted = x + else: + x_max = torch.amax(x, dim, keepdim=True) + x_max = funcol.all_reduce( + x_max, reduceOp=c10d.ReduceOp.MAX.name, group=(mesh, mesh_dim) + ) + shifted = x - x_max + shifted_sumexp = torch.sum(torch.exp(shifted), dim, keepdim=True) + shifted_sumexp = funcol.all_reduce( + shifted_sumexp, reduceOp=c10d.ReduceOp.SUM.name, group=(mesh, mesh_dim) + ) + shifted_logsumexp = torch.log(shifted_sumexp) + result = shifted - shifted_logsumexp + if not half_to_float: + result = result.to(result_dtype) + return result + + +def _log_softmax_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + x = cast(DTensor, args[0]) + dim = cast(int, args[1]) + half_to_float = cast(bool, args[2]) + + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, dim) + + output_tensor_meta = _propagate_tensor_meta(op_call, args, kwargs) + + res = _log_softmax(x._local_tensor, dim, half_to_float, spec.mesh, mesh_dim) + + return DTensor( + res, + spec.mesh, + spec.placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=res.requires_grad, + stride=output_tensor_meta.stride, + ) + + +# NOTE: As explained below at _nll_loss_and_log_softmax_backward, the +# _log_softmax_backward_handler does not actually do any computation. +def _log_softmax_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + grad_output = cast(DTensor, args[0]) + input_dtype = cast(torch.dtype, args[3]) + return grad_output.to(input_dtype) + + +# NOTE: The implementation follows torch._decomp.decomposition._nll_loss_forward, +# with customized communication inserted to perform distributed computation. +def _nll_loss_forward( + x: Tensor, + target: Tensor, + weight: Optional[Tensor], + local_weight: Optional[Tensor], + reduction: int, + ignore_index: int, + channel_dim_size: int, + mesh: DeviceMesh, + mesh_dim: int, +) -> Tuple[Tensor, Tensor]: + n_dims = x.dim() + channel_dim = 1 + if n_dims < 2: + channel_dim = 0 + + def _weight_view(weight: Tensor) -> Tensor: + if n_dims > 1: + shape = [ + 1, + ] * n_dims + shape[channel_dim] = weight.shape[0] + w = weight.view(shape) + else: + w = weight + return w + + if weight is not None: + w = _weight_view(weight) + assert local_weight is not None + local_w = _weight_view(local_weight) + x = x * local_w + safe_target = torch.where(target != ignore_index, target, 0) + safe_target_ = safe_target.unsqueeze(channel_dim) + + # The following code block is a distributed version of + # result = -torch.gather(self, channel_dim, safe_target_).squeeze(channel_dim) + partial_placement = _MaskPartial(logical_dim_size=channel_dim_size) + safe_target_partial_ = partial_placement._partition_value( + safe_target_, mesh, mesh_dim + ) + result_partial = torch.gather(x, channel_dim, safe_target_partial_) + # an all_reduce happens here + result_reduced = partial_placement._reduce_value(result_partial, mesh, mesh_dim) + result = -result_reduced.squeeze(channel_dim) + + result = torch.where(target != ignore_index, result, 0) + + if reduction == Reduction.NONE.value and n_dims > 1: + total_weight = x.new_full((), 0.0) + return result, total_weight + + if weight is not None: + new_shape = list(x.shape) + new_shape[channel_dim] = -1 + w = w.expand(new_shape) + wsum = torch.gather(w, channel_dim, safe_target_).squeeze(channel_dim) + wsum = torch.where(target != ignore_index, wsum, 0) + total_weight = wsum.sum() + else: + total_weight = (target != ignore_index).sum().to(x) + + # NOTE: this is correct only on 1D DeviceMesh; o/w additional + # all-reduce on result and total_weight is needed + if reduction == Reduction.SUM.value: + result = result.sum() + elif reduction == Reduction.MEAN.value: + result = result.sum() / total_weight + + return result, total_weight + + +def _nll_loss_forward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + x = cast(DTensor, args[0]) + target = args[1] + weight = args[2] + reduction = cast(int, args[3]) + ignore_index = cast(int, args[4]) + + channel_dim = 1 if x.dim() >= 2 else 0 + channel_dim_size = x.shape[channel_dim] + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, channel_dim) + + # Check user input: if target and weight are not DTensors, convert them to DTensors; + # if they are DTensors, check that they have the desired placements. + target_placements = _skip_dim( + replicate_reduction_dims(spec.placements, [channel_dim]), channel_dim + ) + all_replicate_placements = (Replicate(),) * spec.mesh.ndim + target = _cast_to_dtensor(target, target_placements, spec.mesh) + local_weight = None + if weight is not None: + weight = _cast_to_dtensor(weight, all_replicate_placements, spec.mesh) + # For local computation, both (replicated) weight and (sharded) local_weight + # are needed in _nll_loss_forward(). local_weight is generated here using + # DTensor API, without incurring any communication. + sharded_placements = [ + Shard(0) if i == mesh_dim else Replicate() for i in range(spec.mesh.ndim) + ] + local_weight = weight.redistribute(spec.mesh, sharded_placements)._local_tensor + assert local_weight.shape[0] == x._local_tensor.shape[channel_dim] + + if reduction == Reduction.NONE.value: + output_placements = target_placements + else: + output_placements = all_replicate_placements + + # tensor inputs to _propagate_tensor_meta need to be DTensors + args = list(args) + args[1], args[2] = target, weight + output_tensor_meta = _propagate_tensor_meta(op_call, tuple(args), kwargs) + + result, total_weight = _nll_loss_forward( + x._local_tensor, + target._local_tensor, + weight._local_tensor if weight is not None else None, + local_weight, + reduction, + ignore_index, + channel_dim_size, + spec.mesh, + mesh_dim, + ) + + return ( + DTensor( + result, + spec.mesh, + output_placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=result.requires_grad, + stride=output_tensor_meta.stride, + ), + total_weight, + ) + + +# NOTE: The backward computation of cross_entropy goes through two steps: +# backward for nll_loss and then backward for log_softmax. In loss parallel, +# the two steps are fused into the following function (called by _nll_loss_backward_handler) +# to avoid communication when target contains class indices not class probabilities. +# Also note that the _log_softmax_backward_handler does not perform computation. +# The implementation resembles _nll_loss_backward and _log_softmax_backward_data +# from torch._decomp.decomposition. +def _nll_loss_and_log_softmax_backward( + grad_output: Tensor, + x: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, + total_weight: Tensor, + channel_dim_size: int, + mesh: DeviceMesh, + mesh_dim: int, +) -> Tensor: + channel_dim = 0 if x.dim() < 2 else 1 + if reduction == Reduction.MEAN.value: + grad_output = grad_output / total_weight + + target = target.unsqueeze(channel_dim) + safe_target = torch.where(target != ignore_index, target, 0) + grad_input = torch.zeros_like(x) + + # The following code block is a distributed version of + # grad_input = torch.scatter(grad_input, channel_dim, safe_target, -1.0) + partial_placement = _MaskPartial(logical_dim_size=channel_dim_size) + safe_target = safe_target.squeeze(channel_dim).flatten() + masked_safe_target = partial_placement._partition_value(safe_target, mesh, mesh_dim) + # only update grad_input to -1 if not masked + assert partial_placement.mask_buffer.data is not None + grad_update = partial_placement.mask_buffer.data.float() - 1.0 + arange_1d = torch.arange( + masked_safe_target.shape[0], device=masked_safe_target.device + ) + # The first two cases with x.dim() <= 2 are for aten.nll_loss_backward.default; + # the last case is for aten.nll_loss2d_backward.default. + if x.dim() == 1: + grad_input[masked_safe_target] = grad_update + elif x.dim() == 2: + grad_input[arange_1d, masked_safe_target] = grad_update + else: + grad_input_t = grad_input.transpose(channel_dim, -1) + intermidate_shape = grad_input_t.shape + grad_input_2d = grad_input_t.reshape(-1, x.shape[channel_dim]) + grad_input_2d[arange_1d, masked_safe_target] = grad_update + grad_input = grad_input_2d.view(intermidate_shape).transpose(channel_dim, -1) + + if grad_input.dim() > grad_output.dim() > 0: + grad_output = grad_output.unsqueeze(channel_dim) + + if weight is not None: + new_shape = [1 for _ in range(x.dim())] + new_shape[channel_dim] = weight.shape[0] + weight = weight.reshape(new_shape) + # In order for fused computation to work, the following line is rewritten. + # grad_output = grad_output * weight + new_shape = list(x.shape) + new_shape[channel_dim] = -1 + w = weight.expand(new_shape) + w_target = torch.gather(w, channel_dim, target) + grad_output = grad_output * w_target + + grad_output = torch.where(target != ignore_index, grad_output, 0) + + # NOTE: Instead of directly returning the grad_input as grad_output for log_softmax, + # here we perform backward computation for log_softmax altogether to avoid the + # otherwise extra all_gather communication. + # return grad_input * grad_output + return (grad_input + torch.exp(x)) * grad_output + + +def _nll_loss_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + grad_output = cast(DTensor, args[0]) + x = cast(DTensor, args[1]) + target = args[2] + weight = args[3] + reduction = cast(int, args[4]) + ignore_index = cast(int, args[5]) + total_weight = cast(Tensor, args[6]) + + channel_dim = 1 if x.dim() >= 2 else 0 + channel_dim_size = x.shape[channel_dim] + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, channel_dim) + + # if target and weight are not DTensors, convert them to DTensors + target_placements = _skip_dim( + replicate_reduction_dims(spec.placements, [channel_dim]), channel_dim + ) + all_replicate_placements = (Replicate(),) * spec.mesh.ndim + target = _cast_to_dtensor(target, target_placements, spec.mesh) + if weight is not None: + weight = _cast_to_dtensor(weight, all_replicate_placements, spec.mesh) + + # tensor inputs to _propagate_tensor_meta need to be DTensors + args = list(args) + args[2], args[3] = target, weight + args[6] = _cast_to_dtensor(total_weight, all_replicate_placements, spec.mesh) + output_tensor_meta = _propagate_tensor_meta(op_call, tuple(args), kwargs) + + result = _nll_loss_and_log_softmax_backward( + grad_output._local_tensor, + x._local_tensor, + target._local_tensor, + weight._local_tensor if weight is not None else None, + reduction, + ignore_index, + total_weight, + channel_dim_size, + spec.mesh, + mesh_dim, + ) + + return DTensor( + result, + spec.mesh, + # the output sharding is the same as input sharding: Shard(channel_dim) on mesh_dim + spec.placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=result.requires_grad, + stride=output_tensor_meta.stride, + ) + + +customized_loss_ops = { + aten._log_softmax.default: _log_softmax_handler, + aten._log_softmax_backward_data.default: _log_softmax_backward_handler, + aten.nll_loss_forward.default: _nll_loss_forward_handler, + aten.nll_loss2d_forward.default: _nll_loss_forward_handler, + aten.nll_loss_backward.default: _nll_loss_backward_handler, + aten.nll_loss2d_backward.default: _nll_loss_backward_handler, +} + + +def _enable_custom_loss_ops(): + DTensor._op_dispatcher._custom_op_handlers.update(customized_loss_ops) + + +def _disable_custom_loss_ops(): + for custom_op in customized_loss_ops: + DTensor._op_dispatcher._custom_op_handlers.pop(custom_op) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py new file mode 100644 index 0000000000000000000000000000000000000000..3cce417dbeff4a1907910c76e871caf97a0fdb74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py @@ -0,0 +1,489 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from abc import ABC, abstractmethod +from typing import Optional, Union, Tuple +from functools import partial + +import torch +import torch.nn as nn +from torch.distributed._tensor import DeviceMesh, DTensor, Placement, Replicate, Shard, distribute_tensor, distribute_module + + +__all__ = [ + "ParallelStyle", + "RowwiseParallel", + "SequenceParallel", + "ColwiseParallel", + "PrepareModuleInput", + "PrepareModuleOutput", +] + + +class ParallelStyle(ABC): + """ + The parallel style contract defines how the module or submodule should be parallelized. + + It only defines the ``apply`` method for ``parallelize_module`` to use, this allows maximum + flexibility for different kind of style implementations. + """ + + @abstractmethod + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + ... + + +class ColwiseParallel(ParallelStyle): + """ + Partition a compatible nn.Module in a column-wise fashion. Currently supports nn.Linear and nn.Embedding. + Users can compose it together with RowwiseParallel to achieve the sharding of more complicated modules. + (i.e. MLP, Attention) + + Keyword Args: + input_layouts (Placement, optional): + The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to + become a DTensor. If not specified, we assume the input tensor to be replicated. + output_layouts (Placement, optional): + The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module + with the user desired layout. If not specified, the output tensor is sharded on the last dimension. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True. + Returns: + A :class:`ParallelStyle` object that represents Colwise sharding of the nn.Module. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, ColwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "w1" nn.Linear submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "w1" Linear will be converted to Replicated DTensor + >>> # and the output of "w1" will return :class:`torch.Tensor` that shards on the last dim. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"w1": ColwiseParallel()}) + >>> ... + + .. note:: By default ``ColwiseParallel`` output is sharded on the last dimension if the ``output_layouts`` not + specified, if there're operators that require specific tensor shape (i.e. before the paired ``RowwiseParallel``), + keep in mind that if the output is sharded the operator might need to be adjusted to the sharded size. + """ + + def __init__( + self, + *, + input_layouts: Optional[Placement] = None, + output_layouts: Optional[Placement] = None, + use_local_output: bool = True + ): + super().__init__() + self.input_layouts = (input_layouts or Replicate(), ) + self.output_layouts = (output_layouts or Shard(-1), ) + # colwise linear runtime sharding (desired sharding): + # 1. requires replicate input + # 2. shard output on last dim + self.desired_input_layouts = (Replicate(), ) + self.use_local_output = use_local_output + + @staticmethod + def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): + # TODO: figure out dynamo support for instance method and switch this to instance method + + # annotate module input placements/sharding with input_layouts + input_tensor = inputs[0] + if not isinstance(input_tensor, DTensor): + input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False) + + # transform the input layouts to the desired layouts of ColwiseParallel + if input_layouts != desired_input_layouts: + input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True) + return input_tensor + + def _partition_linear_fn(self, name, module, device_mesh): + # colwise shard weight/bias to Shard(0), weight be Shard(0) + # means Colwise as Linear is input * weight^T + bias, where + # weight would become Shard(1) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(0)]) + ) + module.register_parameter(name, dist_param) + + def _partition_embedding_fn(self, name, module, device_mesh): + # colwise shard embedding.weight is straight forward as Shard(1) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(1)]) + ) + module.register_parameter(name, dist_param) + + @staticmethod + def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): + # outputs is a shard on last dimension DTensor, i.e. Shard(-1) + outputs = outputs.redistribute(placements=output_layouts, async_op=True) + # back to local tensor + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + if isinstance(module, nn.Linear): + partition_fn = self._partition_linear_fn + elif isinstance(module, nn.Embedding): + partition_fn = self._partition_embedding_fn + else: + raise NotImplementedError("ColwiseParallel currently only support nn.Linear and nn.Embedding!") + + return distribute_module( + module, + device_mesh, + partition_fn, + partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), + partial(self._prepare_output_fn, self.output_layouts, self.use_local_output), + ) + + +class RowwiseParallel(ParallelStyle): + """ + Partition a compatible nn.Module in a row-wise fashion. Currently supports nn.Linear and nn.Embedding. + Users can compose it with ColwiseParallel to achieve the sharding of more complicated modules. + (i.e. MLP, Attention) + + Keyword Args: + input_layouts (Placement, optional): + The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to + become a DTensor. If not specified, we assume the input tensor to be sharded on the last dimension. + output_layouts (Placement, optional): + The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module + with the user desired layout. If not specified, the output tensor is replicated. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True. + Returns: + A :class:`ParallelStyle` object that represents Rowwise sharding of the nn.Module. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, RowwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "w2" nn.Linear submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "w2" Linear will be converted to DTensor that shards on the last dim + >>> # and the output of "w2" will return a replicated :class:`torch.Tensor`. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"w2": RowwiseParallel()}), + >>> ... + """ + + def __init__( + self, + *, + input_layouts: Optional[Placement] = None, + output_layouts: Optional[Placement] = None, + use_local_output: bool = True + ): + super().__init__() + self.input_layouts = (input_layouts or Shard(-1), ) + self.output_layouts = (output_layouts or Replicate(), ) + self.use_local_output = use_local_output + + @staticmethod + def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): + input_tensor = inputs[0] + if not isinstance(input_tensor, DTensor): + input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False) + + if input_layouts != desired_input_layouts: + input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True) + return input_tensor + + def _partition_linear_fn(self, name, module, device_mesh): + # Rowwise shard weight to Shard(1), bias to Replicate(), weight be Shard(1) + # means Rowwise as nn.Linear is input * weight^T + bias, where + # weight would become Shard(0) + module.register_parameter("weight", nn.Parameter( + distribute_tensor(module.weight, device_mesh, [Shard(1)]) + )) + if module.bias is not None: + module.register_parameter("bias", nn.Parameter( + distribute_tensor(module.bias, device_mesh, [Replicate()]) + )) + + def _partition_embedding_fn(self, name, module, device_mesh): + # rowwise shard embedding.weight is Shard(0) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(0)]) + ) + module.register_parameter(name, dist_param) + + @staticmethod + def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): + # Rowwise sharding produces partial output, depending on output layouts: + # 1. to replicate -> allreduce + # 2. to shard -> reduce_scatter + outputs = outputs.redistribute(placements=output_layouts, async_op=True) + # back to local tensor if use_local_output is True + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + if isinstance(module, nn.Linear): + partition_fn = self._partition_linear_fn + # rowwise linear runtime sharding requires input tensor shard on last dim + self.desired_input_layouts: Tuple[Placement, ...] = (Shard(-1), ) + elif isinstance(module, nn.Embedding): + partition_fn = self._partition_embedding_fn + # rowwise embedding runtime sharding requires input tensor replicated + self.desired_input_layouts = (Replicate(), ) + else: + raise NotImplementedError("RowwiseParallel currently only support nn.Linear and nn.Embedding!") + + return distribute_module( + module, + device_mesh, + partition_fn, + partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), + partial(self._prepare_output_fn, self.output_layouts, self.use_local_output), + ) + + +class SequenceParallel(ParallelStyle): + """ + SequenceParallel replicates a compatible ``nn.Module`` parameters and runs the sharded computation with + input sharded on the sequence dimension. This currently supports ``nn.LayerNorm``, ``nn.Dropout``, and the + `RMSNorm python implementation `__ + + This style implements the operation that is described in the paper + `Reducing Activation Recomputation in Large Transformer Models `__ + + Both the input and output of the ``nn.Module`` will be sharded on the sequence dimension. + + Keyword Args: + sequence_dim (int, optional): + The sequence dimension of the input tensor for the ``nn.Module``, this is used to annotate the input tensor to + become a DTensor that is sharded on the sequence dimension, default: 1. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: False. + Returns: + A :class:`ParallelStyle` object that represents Sequence Parallel of the ``nn.Module``. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, SequenceParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "norm" nn.LayerNorm submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "norm" will be converted to DTensor that shards on the sequence dim + >>> # and the output of "norm" will return a sharded on sequence dimension :class:`DTensor`. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"norm": SequenceParallel()}), + >>> ... + + .. note:: SequenceParallel style assumes ones initialization if there are weights in the nn.Module (i.e. + ``nn.LayerNorm`` or ``RMSNorm``, and they by default have ones initialization). If you have custom + inits for the weights on those modules, you need to broadcast the weights before/after parallelizing + to ensure that they are replicated. + """ + def __init__( + self, + *, + sequence_dim: int = 1, + use_local_output: bool = False + ): + super().__init__() + self.sequence_dim = sequence_dim + self.use_local_output = use_local_output + + def _replicate_module_fn(self, name: str, module: nn.Module, device_mesh: DeviceMesh): + for p_name, param in module.named_parameters(): + # simple replication with fixed ones_ init from LayerNorm/RMSNorm, which allow + # us to simply just use from_local + replicated_param = torch.nn.Parameter( + DTensor.from_local(param, device_mesh, [Replicate()], run_check=False) + ) + module.register_parameter(p_name, replicated_param) + + @staticmethod + def _prepare_input_fn(sequence_dim, mod, inputs, device_mesh): + input_tensor = inputs[0] + if isinstance(input_tensor, DTensor): + return inputs + elif isinstance(input_tensor, torch.Tensor): + return DTensor.from_local(input_tensor, device_mesh, [Shard(sequence_dim)], run_check=False) + else: + raise ValueError(f"expecting input of {mod} to be a torch.Tensor or DTensor, but got {input_tensor}") + + @staticmethod + def _prepare_output_fn(use_local_output, mod, outputs, device_mesh): + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + return distribute_module( + module, + device_mesh, + self._replicate_module_fn, + partial(self._prepare_input_fn, self.sequence_dim), + partial(self._prepare_output_fn, self.use_local_output), + ) + + +class PrepareModuleInput(ParallelStyle): + """ + Configure the nn.Module's inputs to convert the input tensors of the nn.Module to DTensors at runtime according to + ``input_layouts``, and perform layout redistribution according to the ``desired_input_layouts``. + + Keyword Args: + input_layouts (Union[Placement, Tuple[Placement]]): + The DTensor layouts of input tensors for the nn.Module, this is used to convert the input tensors to + DTensors. If some inputs are not torch.Tensor or no need to convert to DTensors, ``None`` need to be specified + as a placeholder. + desired_input_layouts (Union[Placement, Tuple[Placement]]): + The desired DTensor layout of input tensors for the nn.Module, this is used to ensure the inputs of the nn.Module + have the desired DTensor layouts. This argument needs to have the same length with ``input_layouts``. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module inputs, default: False. + Returns: + A :class:`ParallelStyle` object that prepares the sharding layouts of the nn.Module's inputs. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, PrepareModuleInput + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> block = TransformerBlock(...) # block is a nn.Module that contains an "attn" Attention submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # According to the style specified below, the first input of attn will be annotated to Sharded DTensor + >>> # and then redistributed to Replicated DTensor. + >>> parallelize_module( + >>> block, # this can be a submodule or module + >>> tp_mesh, + >>> parallelize_plan={ + >>> "attn": PrepareModuleInput( + >>> input_layouts=(Shard(0), None, None, ...), + >>> desired_input_layouts=(Replicate(), None, None, ...) + >>> ), + >>> } + >>> ) + """ + + def __init__( + self, + *, + input_layouts: Union[Placement, Tuple[Placement]], + desired_input_layouts: Union[Placement, Tuple[Placement]], + use_local_output: bool = False + ): + self.input_layouts = (input_layouts,) if isinstance(input_layouts, Placement) else input_layouts + self.desired_input_layouts = \ + (desired_input_layouts,) if isinstance(desired_input_layouts, Placement) else desired_input_layouts + self.use_local_output = use_local_output + assert len(self.input_layouts) == len(self.desired_input_layouts), \ + "input_layouts and desired_input_layouts should have same length!" + + def _prepare_input_fn(self, inputs, device_mesh): + prepared_inputs = [] + if not isinstance(inputs, tuple): + inputs = (inputs,) + if len(inputs) != len(self.input_layouts): + raise ValueError("module inputs and input_layouts should have same length!") + + for inp, input_layout, desired_layout in zip(inputs, self.input_layouts, self.desired_input_layouts): + if input_layout is not None: + if isinstance(inp, DTensor): + # TODO: re-enable the check once we fix the compile path + # assert inp.placements[0] == input_layout + dt_inp = inp + else: + dt_inp = DTensor.from_local(inp, device_mesh, (input_layout,), run_check=False) + if input_layout != desired_layout: + dt_inp = dt_inp.redistribute(placements=(desired_layout,)) + prepared_inputs.append(dt_inp.to_local() if self.use_local_output else dt_inp) + else: + prepared_inputs.append(inp) + return tuple(prepared_inputs) + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + module.register_forward_pre_hook(lambda _, inputs: self._prepare_input_fn(inputs, device_mesh)) # type: ignore[misc, call-arg] + return module + + +class PrepareModuleOutput(ParallelStyle): + """ + Configure the nn.Module's outputs to convert the output tensors of the nn.Module to DTensors at runtime according to + ``output_layouts``, and perform layout redistribution according to the ``desired_output_layouts``. + + Keyword Args: + output_layouts (Union[Placement, Tuple[Placement]]): + The DTensor layouts of output tensors for the nn.Module, this is used to convert the output tensors to + DTensors if they are :class:`torch.Tensor`. If some outputs are not torch.Tensor or no need to convert to DTensors, + ``None`` need to be specified as a placeholder. + desired_output_layouts (Union[Placement, Tuple[Placement]]): + The desired DTensor layouts of output tensors for the nn.Module, this is used to ensure the outputs of the nn.Module + have the desired DTensor layouts. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module outputs, default: True. + Returns: + A ParallelStyle object that prepares the sharding layouts of the nn.Module's outputs. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, PrepareModuleOutput + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> block = TransformerBlock(...) # block is a nn.Module that contains an "attn" Attention submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # According to the style specified below, the output of the TransformerBlock will be converted to Replicated DTensor + >>> # and then redistributed to Sharded DTensor. + >>> parallelize_module( + >>> block, # this can be a submodule or module + >>> tp_mesh, + >>> parallelize_plan = PrepareModuleOutput( + >>> output_layouts=Replicate(), + >>> desired_output_layouts=Shard(0) + >>> ) + >>> ) + """ + def __init__( + self, + *, + output_layouts: Union[Placement, Tuple[Placement]], + desired_output_layouts: Union[Placement, Tuple[Placement]], + use_local_output: bool = True + ): + self.output_layouts = (output_layouts,) if isinstance(output_layouts, Placement) else output_layouts + self.desired_output_layouts = \ + (desired_output_layouts,) if isinstance(desired_output_layouts, Placement) else desired_output_layouts + self.use_local_output = use_local_output + assert len(self.output_layouts) == len(self.desired_output_layouts), \ + "output_layouts and desired_output_layouts should have same length!" + + def _prepare_out_fn(self, outputs, device_mesh): + prepared_outputs = [] + if not isinstance(outputs, tuple): + outputs = (outputs,) + if len(outputs) != len(self.output_layouts): + raise ValueError("module outputs and output_layouts should have same length!") + for out, out_layout, desired_out_layout in zip(outputs, self.output_layouts, self.desired_output_layouts): + if out_layout is not None: + if isinstance(out, DTensor): + # TODO: re-enable the check once we fix the compile path + # assert out.placements[0] == out_layout + dt_out = out + else: + dt_out = DTensor.from_local(out, device_mesh, (out_layout,), run_check=False) + + if out_layout != desired_out_layout: + dt_out = dt_out.redistribute(placements=(desired_out_layout,)) + prepared_outputs.append(dt_out.to_local() if self.use_local_output else dt_out) + else: + prepared_outputs.append(out) + if len(prepared_outputs) == 1: + return prepared_outputs[0] + else: + return tuple(prepared_outputs) + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + module.register_forward_hook(lambda _, inputs, outputs: self._prepare_out_fn(outputs, device_mesh)) # type: ignore[misc, call-arg] + return module