diff --git a/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..8a9901b5238f5ab7ea19879bd85df87c9fbd6a3d --- /dev/null +++ b/ckpts/universal/global_step120/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa08d8aebd7e783ad6b9bfb1f029b49070b388941e8b783eeabed1b09c7107b9 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6438814ed1c97290d50e0afb379ce1030d4288e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py @@ -0,0 +1,2 @@ +from ._fsdp_api import MixedPrecisionPolicy +from .fully_shard import FSDP, fully_shard diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f48bfd3455b4cc8daba009a75fc7993e43b9201d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a44719f78c6d998c16125f0b77df12662dddfa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2380223e4bbdfa6a45f28310cfb5b0fd8e8b43b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd990e1473636c5d1603a07b94870c889e52f288 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66826a92e6d2e34cc7ed9314c5b077ceb9ff7b0a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4a80749a802b25140604299e1309521f4db431 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cabb13ab7860226ba2a1bd8e228747c229a825ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58041fd7bd62f47247cc8acbe38a00e6e98536b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e659592d64c12f101aa8d4bda6bc1a6b6a707f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py new file mode 100644 index 0000000000000000000000000000000000000000..8b3ad6e5cf683291a6b657d2b940f112854d9dad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py @@ -0,0 +1,217 @@ +from typing import List, NamedTuple, Optional, Tuple + +import torch +import torch.distributed as dist +from torch.distributed.distributed_c10d import ReduceOp +from ._fsdp_common import ( + _get_dim0_padded_size, + _raise_assert_with_print, + _to_dtype_if_needed, +) +from ._fsdp_param import FSDPParam + + +class AllGatherResult(NamedTuple): + all_gather_output: torch.Tensor + all_gather_event: Optional[torch.cuda.Event] + all_gather_work: Optional[dist.distributed_c10d.Work] + all_gather_input_numels: List[int] + + +@torch.no_grad() +def foreach_all_gather( + fsdp_params: List[FSDPParam], + group: dist.ProcessGroup, + async_op: bool, + all_gather_copy_in_stream: torch.cuda.Stream, + all_gather_stream: torch.cuda.Stream, + device: torch.device, +) -> Optional[AllGatherResult]: + world_size, rank = group.size(), group.rank() + # - Copy in + with torch.cuda.stream(all_gather_copy_in_stream): + param_all_gather_inputs = [ + fsdp_param.all_gather_input for fsdp_param in fsdp_params + ] + dtype = param_all_gather_inputs[0].dtype + if not all(t.dtype == dtype for t in param_all_gather_inputs): + raise NotImplementedError( + f"Mixed dtype not supported yet: {[t.dtype for t in param_all_gather_inputs]}" + ) + inp_split_sizes = [inp.numel() for inp in param_all_gather_inputs] + all_gather_input_numel = sum(inp_split_sizes) + all_gather_output = torch.empty( + (all_gather_input_numel * world_size,), dtype=dtype, device=device + ) + all_gather_input = all_gather_output.narrow( + 0, all_gather_input_numel * rank, all_gather_input_numel + ) + foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes) + torch._foreach_copy_(foreach_copy_dsts, param_all_gather_inputs) + del param_all_gather_inputs + all_gather_stream.wait_stream(all_gather_copy_in_stream) + with torch.cuda.stream(all_gather_stream): + # - All-gather + all_gather_work = dist.all_gather_into_tensor( + output_tensor=all_gather_output, + input_tensor=all_gather_input, + group=group, + async_op=async_op, + ) + all_gather_event = all_gather_stream.record_event() + return AllGatherResult( + all_gather_output, all_gather_event, all_gather_work, inp_split_sizes + ) + + +@torch.no_grad() +def foreach_all_gather_copy_out( + all_gather_result: AllGatherResult, + fsdp_params: List[FSDPParam], + group: dist.ProcessGroup, +) -> None: + ( + all_gather_output, + all_gather_event, + all_gather_work, + all_gather_input_numels, + ) = all_gather_result + if all_gather_event is not None: # sync op + torch.cuda.current_stream().wait_event(all_gather_event) + if all_gather_work is not None: # async op + all_gather_work.wait() + world_size = group.size() + dtype, device = all_gather_output.dtype, all_gather_output.device + for all_gather_input_numel, fsdp_param in zip(all_gather_input_numels, fsdp_params): + fsdp_param.init_all_gather_output( + all_gather_input_numel, world_size, dtype, device + ) # no-op after 1st call + fsdp_param.alloc_all_gather_output() + all_gather_output = all_gather_output.view(world_size, -1) + out = [ + fsdp_param.all_gather_output.view(world_size, -1) for fsdp_param in fsdp_params + ] + torch.split_with_sizes_copy( + all_gather_output, all_gather_input_numels, dim=1, out=out + ) + + +@torch.no_grad() +def foreach_reduce_scatter( + fsdp_params: List[FSDPParam], + unsharded_grads: List[torch.Tensor], + group: dist.ProcessGroup, + reduce_scatter_stream: torch.cuda.Stream, + orig_dtype: torch.dtype, + reduce_dtype: Optional[torch.dtype], + device: torch.device, + divide_factors: Optional[Tuple[float, float]], +) -> torch.cuda.Event: + """ + ``unsharded_grads`` owns the references to the gradients computed by + autograd, so clearing the list frees the gradients. + """ + grad_dtypes = {grad.dtype for grad in unsharded_grads} + if len(grad_dtypes) != 1: + # Check this at runtime since it could be a real runtime error if e.g. + # fp8 weights do not produce the correct higher precision gradients + _raise_assert_with_print( + f"FSDP reduce-scatter expects uniform gradient dtype but got {grad_dtypes}" + ) + grad_dtype = unsharded_grads[0].dtype + reduce_dtype = reduce_dtype or grad_dtype + world_size = group.size() + padded_unsharded_sizes = tuple( + _get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads + ) + reduce_scatter_input_numel = sum(s.numel() for s in padded_unsharded_sizes) + reduce_scatter_output_numel = reduce_scatter_input_numel // world_size + current_stream = torch.cuda.current_stream() + reduce_scatter_stream.wait_stream(current_stream) + with torch.cuda.stream(reduce_scatter_stream): + reduce_scatter_input = torch.empty( + (reduce_scatter_input_numel,), dtype=reduce_dtype, device=device + ) + foreach_reduce_scatter_copy_in( + unsharded_grads, reduce_scatter_input, world_size + ) + # Only after the copy-in finishes can we free the gradients, which were + # computed in the default stream + current_stream.wait_stream(reduce_scatter_stream) + unsharded_grads.clear() + reduce_scatter_output = reduce_scatter_input.new_empty( + (reduce_scatter_output_numel,) + ) + _reduce_scatter( + reduce_scatter_output, reduce_scatter_input, group, divide_factors + ) + reduce_scatter_output = _to_dtype_if_needed(reduce_scatter_output, orig_dtype) + # - View out and accumulate + flat_grad_offset = 0 # [0, reduce_scatter_output_numel - 1] + for padded_unsharded_size, fsdp_param in zip( + padded_unsharded_sizes, fsdp_params + ): + new_sharded_grad = torch.as_strided( + reduce_scatter_output, + size=fsdp_param.sharded_size, + stride=fsdp_param.contiguous_sharded_stride, + storage_offset=flat_grad_offset, + ) + to_accumulate_grad = fsdp_param.sharded_param.grad is not None + new_sharded_dtensor_grad = fsdp_param.to_sharded_dtensor(new_sharded_grad) + if to_accumulate_grad: + fsdp_param.sharded_param.grad += new_sharded_dtensor_grad + else: + fsdp_param.sharded_param.grad = new_sharded_dtensor_grad + padded_sharded_numel = padded_unsharded_size.numel() // world_size + flat_grad_offset += padded_sharded_numel + reduce_scatter_view_out_event = reduce_scatter_stream.record_event() + # The RS output is allocated in the RS stream and used in the default + # stream (for optimizer). To ensure its memory is not reused for later + # RSs, we do not need extra synchronization since the sharded parameters + # hold refs through the end of backward. + return reduce_scatter_view_out_event + + +def foreach_reduce_scatter_copy_in( + unsharded_grads: List[torch.Tensor], + reduce_scatter_input: torch.Tensor, + world_size: int, +) -> None: + grad_views: List[torch.Tensor] = [] + grads_to_copy: List[torch.Tensor] = [] + padded_grad_slices: List[torch.Tensor] = [] + for grad in unsharded_grads: + grad_size = grad.size() + dim0_padded_size = _get_dim0_padded_size(grad_size, world_size) + if dim0_padded_size != grad_size: + padded_grad = grad.new_empty(dim0_padded_size) + padded_grad_slices.append(padded_grad[: grad.size(0)]) + grads_to_copy.append(grad) + grad = padded_grad + grad_views.append(grad.view(world_size, -1)) + if padded_grad_slices: + torch._foreach_copy_(padded_grad_slices, grads_to_copy) + torch.cat(grad_views, dim=-1, out=reduce_scatter_input.view(world_size, -1)) + + +def _reduce_scatter( + output: torch.Tensor, + input: torch.Tensor, + group: dist.ProcessGroup, + divide_factors: Optional[Tuple[float, float]], +) -> None: + if divide_factors: + predivide_factor, postdivide_factor = divide_factors + _div_if_needed(input, predivide_factor) + dist.reduce_scatter_tensor(output, input, group=group) + _div_if_needed(output, postdivide_factor) + else: + # Using NCCL's reduce-scatter to do the division by world size saves + # extra memory read/write from a separate division kernel + dist.reduce_scatter_tensor(output, input, op=ReduceOp.AVG, group=group) + + +def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None: + if div_factor > 1: + tensor.div_(div_factor) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a33dfe7b28e9df8abc43c4b1e4bec32036af4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py @@ -0,0 +1,144 @@ +import itertools +from typing import List, Optional, Set, Tuple, Union + +import torch +import torch.distributed as dist +import torch.nn as nn + +from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh +from torch.distributed.device_mesh import _get_device_handle +from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo +from ._fsdp_state import _get_module_fsdp_state + + +def _get_post_forward_mesh_info( + reshard_after_forward: Union[bool, int], mesh_info: FSDPMeshInfo +) -> Optional[FSDPMeshInfo]: + shard_mesh_size = mesh_info.shard_mesh_size + if not isinstance(reshard_after_forward, (bool, int)): + raise ValueError( + "reshard_after_forward should be a bool or an int representing the " + f"group size to reshard to, not {reshard_after_forward}" + ) + # NOTE: `isinstance(False, int)` returns `True`. + if not isinstance(reshard_after_forward, bool) and isinstance( + reshard_after_forward, int + ): + if ( + reshard_after_forward < 1 + or reshard_after_forward > shard_mesh_size + or shard_mesh_size % reshard_after_forward != 0 + ): + raise ValueError( + "If passing reshard_after_forward as an int, it should be a " + f"factor of {shard_mesh_size}, not {reshard_after_forward}" + ) + elif reshard_after_forward == 1: + reshard_after_forward = False + elif reshard_after_forward == shard_mesh_size: + reshard_after_forward = True + post_forward_mesh_info = None + if reshard_after_forward is True: + post_forward_mesh_info = mesh_info + elif reshard_after_forward is not False: # int case + # For HSDP, we can flatten the two replicate dims into the 0th dim + post_forward_mesh_tensor = mesh_info.mesh.mesh.view(-1, reshard_after_forward) + post_forward_mesh = DeviceMesh( + mesh_info.mesh.device_type, post_forward_mesh_tensor + ) + post_forward_mesh_info = HSDPMeshInfo( + post_forward_mesh, shard_mesh_dim=1, replicate_mesh_dim=0 + ) + return post_forward_mesh_info + + +def _init_default_fully_shard_mesh() -> DeviceMesh: + """Default to global CUDA mesh if possible else global CPU mesh.""" + if not dist.distributed_c10d.is_initialized(): + dist.distributed_c10d.init_process_group() + default_pg = dist.distributed_c10d._get_default_group() + device_type = "cuda" if torch.cuda.is_available() else "cpu" + mesh = init_device_mesh(device_type, mesh_shape=(default_pg.size(),)) + return mesh + + +def _get_device_from_mesh(mesh: DeviceMesh) -> torch.device: + if mesh.device_type == "cpu": + return torch.device("cpu") + device_handle = _get_device_handle(mesh.device_type) + return torch.device(mesh.device_type, device_handle.current_device()) + + +def _get_managed_modules(root_module: nn.Module) -> List[nn.Module]: + modules: List[nn.Module] = [] + # Track visisted modules to avoid visiting shared modules multiple times + visited_modules: Set[nn.Module] = set() + + def dfs(module: nn.Module) -> None: + """ + Runs a DFS to collect managed modules, not recursing into modules with + a non-composable API or ``fully_shard`` already applied. + """ + if not _is_composable_with_fsdp(module): + return + elif module is not root_module and _get_module_fsdp_state(module) is not None: + return # nested `fully_shard` module + visited_modules.add(module) + for submodule in module.children(): + if submodule not in visited_modules: + dfs(submodule) + modules.append(module) + + dfs(root_module) + return modules + + +def _get_managed_states( + modules: List[nn.Module], +) -> Tuple[List[nn.Parameter], List[torch.Tensor]]: + params: List[nn.Parameter] = [] + buffers: List[torch.Tensor] = [] + # Track visited parameters/buffers to avoid visiting shared parameters and + # buffers multiple times + visited_params: Set[nn.Parameter] = set() + visited_buffers: Set[torch.Tensor] = set() + for module in modules: + for param in module.parameters(recurse=False): + if param not in visited_params: + params.append(param) + visited_params.add(param) + for buffer in module.buffers(recurse=False): + if buffer not in visited_buffers: + buffers.append(buffer) + visited_buffers.add(buffer) + return params, buffers + + +def _move_states_to_device( + params: List[nn.Parameter], + buffers: List[torch.Tensor], + device: torch.device, + mesh_info: FSDPMeshInfo, +) -> None: + """ + We have FSDP move states to device for simpler and faster initialization + since FSDP almost always uses CUDA for training. We move parameters/buffers + rather than modules since modules to support ignoring parameters/buffers in + the future. + """ + # TODO: De-duplicate with `_apply` after `swap_tensors` path lands: + # https://github.com/pytorch/pytorch/issues/115792 + for tensor in itertools.chain(params, buffers): + if tensor.device == device or tensor.device.type == "meta": + # Keep meta-device tensors on meta device for deferred init + continue + if isinstance(tensor, DTensor): + if (dtensor_mesh_type := tensor._spec.mesh.device_type) != device.type: + raise ValueError( + "Requires DTensor to have mesh of the same type as the FSDP mesh " + f"but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP" + ) + raise AssertionError( + f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}" + ) + tensor.data = tensor.to(device) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py new file mode 100644 index 0000000000000000000000000000000000000000..0141b8a4f461a239e290ab7a053a6048730367e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py @@ -0,0 +1,438 @@ +from dataclasses import dataclass, field +from enum import auto, Enum +from typing import cast, List, Optional, Tuple + +import torch +import torch.nn as nn + +from torch._prims_common import make_contiguous_strides_for +from torch.distributed._functional_collectives import AsyncCollectiveTensor +from torch.distributed._tensor import DTensor, Placement, Replicate, Shard +from torch.distributed._tensor.device_mesh import _mesh_resources +from torch.distributed._tensor.placement_types import DTensorSpec +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import ( + _chunk_with_empty, + _from_local_no_grad, + _get_dim0_chunked_size, + _raise_assert_with_print, + _to_dtype_if_needed, + FSDPMeshInfo, + HSDPMeshInfo, +) + +""" +[Note: FSDP tensors] +FSDP considers the following tensors: +- Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one + on the module when applying FSDP +- Sharded parameter: sharding the original parameter on dim-0 as a DTensor + over the main mesh +- All-gather input: the ``torch.Tensor`` passed to all-gather, derived from the + sharded parameter +- All-gather output: the ``torch.Tensor`` resulting from all-gathering the + all-gather input +- Unsharded parameter: parameter used for forward/backward computation, derived + from the all-gather output; autograd leaf + +We define these tensors to describe the general framework that can accomodate +extensions, where: +- all-gather-input = pre-all-gather-transform(sharded-parameter) +- unsharded-parameter = post-all-gather-transform(all-gather-output) + +For the default ``torch.Tensor`` case, the sharded parameter and all-gather +input share the same underlying tensor data, meaning that they can be thought +of as the same tensors. The same applies for the all-gather output and +unsharded parameter. For non-``torch.Tensor`` extensions, these equivalences +may no longer hold due to the pre/post-all-gather transforms. + +[Note: FSDP and autograd] +FSDP dynamically frees and allocates the unsharded parameter. Since autograd +can pack a reference to it or a view to save for backward, we use storage +resizing to implement the freeing/allocation since that preserves the aliasing. +This implies that we construct the unsharded parameter object once and write to +it in-place thereafter. For the default ``torch.Tensor` original parameter +case, the all-gather output and unsharded parameter share the same +data, so we use storage resizing on the all-gather output. +""" + + +class ShardedState(Enum): + """ + - ``SHARDED``: The sharded parameter is registered to the module. It is the + only contributor to parameter memory. + - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a + smaller world size. Since this data should not be used for computation, + we do not register it to the module. Users should reshard the module + before any in-place modifications. Both it and the sharded parameter + contribute to parameter memory. + - ``UNSHARDED``: The unsharded parameter is registered to the module. Both + it and the sharded parameter contribute to parameter memory. + """ + + SHARDED = auto() + SHARDED_POST_FORWARD = auto() + UNSHARDED = auto() + + +@dataclass +class ParamModuleInfo: + """ + For a parameter, this stores the module and the parameter name to be able + to do a parameter swap via ``setattr(module, param_name, ...)`` or to get + the parameter via ``getattr(module, param_name)``. We additionally save + shared modules and shared parameter names to update them accordingly. + """ + + # Parameter names are unprefixed, e.g. "weight", not "lin.weight" + module: nn.Module + param_name: str + shared_modules: List[nn.Module] = field(default_factory=list) + shared_param_names: List[str] = field(default_factory=list) + + +class FSDPParam: + """ + This class manages a parameter with FSDP or FSDP variants applied, + implementing dim-0 per-parameter sharding. + """ + + orig_dtype: torch.dtype + param_dtype: Optional[torch.dtype] + reduce_dtype: Optional[torch.dtype] + _orig_size: torch.Size # ND + _contiguous_orig_stride: Tuple[int, ...] + sharded_size: torch.Size # ND + contiguous_sharded_stride: Tuple[int, ...] + padded_sharded_param_size: torch.Size # ND + sharded_post_forward_size: torch.Size # ND + contiguous_sharded_post_forward_stride: Tuple[int, ...] + _sharded_param_data: torch.Tensor # 1D + sharded_param: nn.Parameter # ND + _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D + _sharded_post_forward_param: Optional[nn.Parameter] # ND + _unsharded_param: nn.Parameter # ND + _global_placements: Tuple[Placement, ...] + _global_size: torch.Size + _global_stride: Tuple[int, ...] + # DTensor attributes (only defined for DTensor `param`): + _tp_spec: DTensorSpec + + def __init__( + self, + param: nn.Parameter, + module_info: ParamModuleInfo, + mesh_info: FSDPMeshInfo, + post_forward_mesh_info: Optional[FSDPMeshInfo], + device: torch.device, + mp_policy: MixedPrecisionPolicy, + ): + self._module_info: ParamModuleInfo = module_info + self.mesh_info = mesh_info + self.post_forward_mesh_info = post_forward_mesh_info + self.device = device + self._init_sharded_param(param, device) + if self.post_forward_mesh_info: + self._init_sharded_post_forward_param_metadata(param) + self.all_gather_output = torch.empty(0) + self._param_fqn: Optional[str] = None # prefixed from root module + + @torch.no_grad() + def _init_sharded_param(self, param: nn.Parameter, device: torch.device): + if param.device != device and param.device.type != "meta": + raise AssertionError( + f"Expects the parameter to already be moved to device {device} but got {param.device}" + ) + # TODO: Replace the sharded DTensor parameter construction logic with + # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101 + # TODO: Simplify the following sharded parameter padding logic after + # https://github.com/pytorch/pytorch/issues/113045 + self.is_dtensor = isinstance(param, DTensor) + if self.is_dtensor: + self._tp_spec = cast(DTensor, param)._spec + if ( + self.mesh_info.shard_mesh_dim != 0 + or self.mesh_info.replicate_mesh_dim is not None + ): + raise NotImplementedError("Using TP with HSDP is not supported") + dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh) + dp_global_mesh = _mesh_resources.get_parent_mesh(dp_mesh) + tp_global_mesh = _mesh_resources.get_parent_mesh(tp_mesh) + if dp_global_mesh != tp_global_mesh or ( + dp_global_mesh is None or tp_global_mesh is None + ): + raise AssertionError( + "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n" + f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}" + ) + self._global_mesh = dp_global_mesh + if len(self._tp_spec.placements) != 1: + raise NotImplementedError( + f"FSDP only supports 1D TP, not {self._tp_spec.placements}" + ) + global_placements: List[Placement] = [Replicate(), Replicate()] + global_dp_mesh_dim = _mesh_resources.get_parent_mesh_dim(dp_mesh) + global_tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(tp_mesh) + assert global_dp_mesh_dim is not None # mypy + assert global_tp_mesh_dim is not None # mypy + # TODO: Hard code FSDP + TP; need to support HSDP + TP + global_placements[global_dp_mesh_dim] = Shard(0) + global_placements[global_tp_mesh_dim] = self._tp_spec.placements[0] + self._global_placements = tuple(global_placements) + self._global_size = param.size() + self._global_stride = param.stride() + param_data = cast(DTensor, param)._local_tensor + else: + self._global_mesh = self.mesh_info.mesh + self._global_placements = (Shard(0),) + self._global_size = param.size() + self._global_stride = param.stride() + param_data = param + self._orig_size = param_data.size() + self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size) + shard_rank = self.mesh_info.shard_mesh_rank + shard_world_size = self.mesh_info.shard_mesh_size + chunks = _chunk_with_empty(param_data, shard_world_size, dim=0) + sharded_param = chunks[shard_rank] + self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size()) + self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size) + padded_sharded_size = chunks[0].size() # 0th always padded + padded_sharded_param = param_data.new_zeros(padded_sharded_size) + self.padded_sharded_param_size = padded_sharded_param.size() + if sharded_param.numel() > 0: + padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param) + self._sharded_param_data = padded_sharded_param.view(-1) + self.sharded_param = nn.Parameter( + self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)]) + ) + self.sharded_param.requires_grad_(param.requires_grad) + # Let `param_data` be freed normally when its ref count reaches 0 when + # the `fully_shard` call returns to allow provided parameters to alias + self._setattr_on_modules(self.sharded_param) + self.sharded_state = ShardedState.SHARDED + + def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None: + mesh_info = self.post_forward_mesh_info + assert mesh_info is not None # mypy + param_data = param._local_tensor if isinstance(param, DTensor) else param + chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0) + self.sharded_post_forward_size = _get_dim0_chunked_size( + chunks[mesh_info.shard_mesh_rank], param_data.size() + ) + self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for( + self.sharded_post_forward_size + ) + + def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy): + param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype) + self.orig_dtype = self.sharded_param.dtype + # Clamp `param_dtype` to `None` if no casting is required + if param_dtype == self.orig_dtype: + param_dtype = None + self.param_dtype = param_dtype + self.reduce_dtype = reduce_dtype + # None indicates that the mixed precision is not enabled + + def init_all_gather_output( + self, + all_gather_input_numel: int, + world_size: int, + dtype: torch.dtype, + device: torch.device, + ): + if self.all_gather_output.numel() > 0: + return # already initialized + all_gather_output_size = torch.Size([all_gather_input_numel * world_size]) + self.all_gather_output = torch.empty( + all_gather_output_size, dtype=dtype, device=device + ) + + def init_unsharded_param(self): + if hasattr(self, "_unsharded_param"): + return # already initialized + # For the default path (no post-all-gather), the all-gather output + # gives the unsharded parameter data directly + unsharded_param = torch.as_strided( + self.all_gather_output, + self._orig_size, + self._contiguous_orig_stride, + storage_offset=0, + ) + if self.is_dtensor: + unsharded_param = _from_local_no_grad( + unsharded_param, + self._tp_spec.mesh, + self._tp_spec.placements, + self._global_size, + self._global_stride, + ) + self._unsharded_param = nn.Parameter(unsharded_param) + self._unsharded_param.requires_grad_(self.sharded_param.requires_grad) + + def to_sharded(self) -> None: + self._setattr_on_modules(self.sharded_param) + self.free_all_gather_output() + self.sharded_state = ShardedState.SHARDED + + def to_sharded_post_forward(self) -> None: + if self.is_dtensor: + raise NotImplementedError( + "Resharding to smaller mesh with TP is not supported yet" + ) + self._assert_in_states(ShardedState.UNSHARDED) + assert self.post_forward_mesh_info is not None # mypy + shard_world_size = self.post_forward_mesh_info.shard_mesh_size + if (numel := self.all_gather_output.numel()) % shard_world_size != 0: + _raise_assert_with_print( + f"All-gather output size ({numel}) must be divisible by the shard " + f"world size ({shard_world_size})" + ) + shard_rank = self.post_forward_mesh_info.shard_mesh_rank + sharded_numel = numel // shard_world_size + self._sharded_post_forward_param_data = ( + self.all_gather_output.narrow(0, sharded_numel * shard_rank, sharded_numel) + ).clone() # clone to be able to free all-gather output + sharded_post_forward_tensor = torch.as_strided( + self._sharded_post_forward_param_data, + size=self.sharded_post_forward_size, + stride=self.contiguous_sharded_post_forward_stride, + storage_offset=0, + ) + self._sharded_post_forward_param = nn.Parameter( + self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor) + ) + self._setattr_on_modules(self._sharded_post_forward_param) + self.free_all_gather_output() + self.sharded_state = ShardedState.SHARDED_POST_FORWARD + + def to_unsharded(self) -> None: + # Assume that the data has been allocated and all-gathered + set_requires_grad_if_needed(self.sharded_param, self._unsharded_param) + self._setattr_on_modules(self._unsharded_param) + if self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + # The data is allocated in the default stream via the post-forward + # reshard and must be kept alive for the next all-gather copy-in. + # Since we call this method after the copy-out, the data's lifetime + # is ensured without further synchronization. + self._sharded_post_forward_param = None + self._sharded_post_forward_param_data = None # free + self.sharded_state = ShardedState.UNSHARDED + + def _setattr_on_modules(self, param: nn.Parameter) -> None: + unsafe_setattr_param( + self._module_info.module, self._module_info.param_name, param + ) + for shared_module, shared_param_name in zip( + self._module_info.shared_modules, self._module_info.shared_param_names + ): + unsafe_setattr_param(shared_module, shared_param_name, param) + + def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor: + """ + Converts a local tensor representing either the sharded parameter or + sharded gradient to DTensor. + """ + if tensor.shape != self.sharded_size: + _raise_assert_with_print( + f"Expects size {self.sharded_size} but got {tensor.shape}" + ) + return _from_local_no_grad( + tensor, + self._global_mesh, + self._global_placements, + self._global_size, + self._global_stride, + ) + + def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor: + if tensor.shape != self.sharded_post_forward_size: + _raise_assert_with_print( + f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}" + ) + assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo) + # TODO: Prefer this DTensor to be read-only and generalize the + # placement once we support TP. + return _from_local_no_grad( + tensor, + self.post_forward_mesh_info.mesh, + (Replicate(), Shard(0)), + self._global_size, + self._global_stride, + ) + + def alloc_all_gather_output(self) -> None: + unsafe_alloc_storage(self.all_gather_output) + + def free_all_gather_output(self) -> None: + unsafe_free_storage(self.all_gather_output) + + @property + def all_gather_input(self) -> torch.Tensor: # 1D + self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD) + if self.sharded_state == ShardedState.SHARDED: + return _to_dtype_if_needed(self._sharded_param_data, self.param_dtype) + elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + return _to_dtype_if_needed( + cast(torch.Tensor, self._sharded_post_forward_param_data), + self.param_dtype, + ) + return torch.empty(0) # mypy + + @property + def unsharded_param(self) -> nn.Parameter: # ND + self._assert_in_states(ShardedState.UNSHARDED) + return self._unsharded_param + + @property + def unsharded_grad_data(self) -> torch.Tensor: + grad = self.unsharded_param.grad + assert grad is not None, "Expects unsharded_param.grad to not be None" + return self._get_grad_inner_tensor(grad) + + def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor: + if self.is_dtensor: + if isinstance(grad, AsyncCollectiveTensor): + grad = grad.wait() + grad = cast(DTensor, grad)._local_tensor + return grad + + def _assert_in_states(self, *states: ShardedState) -> None: + if self.sharded_state not in states: + _raise_assert_with_print( + f"Expects to be in one of {states}, not {self.sharded_state}" + ) + + +# NOTE: Unsafe here refers to not checking whether the storage is already +# allocated or freed, respectively. We should be safe to use them since we +# explicitly manage the state transition. +def unsafe_alloc_storage(tensor: torch.Tensor) -> None: + # Skip the already-allocated check and assume that `tensor` is the base + # tensor to save CPU overhead + tensor.untyped_storage().resize_(tensor.numel() * tensor.itemsize) + + +def unsafe_free_storage(tensor: torch.Tensor) -> None: + # Skip the already-freed check to save CPU overhead + tensor.untyped_storage().resize_(0) + + +# NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial +# CPU overhead, if the module did not override it. For FSDP, we know we do not +# need those checks when transitioning between sharded/unsharded parameters. +def unsafe_setattr_param( + module: nn.Module, param_name: str, param: nn.Parameter +) -> None: + if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__: + module._parameters[param_name] = param + else: # slow path + setattr(module, param_name, param) + + +def set_requires_grad_if_needed( + src_tensor: torch.Tensor, dst_tensor: torch.Tensor +) -> None: + # Only call `requires_grad_` if needed to avoid the Python <> C++ context + # switch overhead + if src_tensor.requires_grad != dst_tensor.requires_grad: + dst_tensor.requires_grad_(src_tensor.requires_grad) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py new file mode 100644 index 0000000000000000000000000000000000000000..88bfe0b86f373425bb9253538740875cc7cd0beb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py @@ -0,0 +1,506 @@ +import contextlib + +from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple + +import torch +import torch.distributed as dist +import torch.nn as nn + +from torch.autograd.graph import Node +from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates +from torch.utils._pytree import tree_flatten, tree_unflatten +from torch.utils.hooks import RemovableHandle +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_collectives import ( + AllGatherResult, + foreach_all_gather, + foreach_all_gather_copy_out, + foreach_reduce_scatter, +) +from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo, TrainingState +from ._fsdp_param import FSDPParam, ParamModuleInfo, ShardedState + +_ModuleToHandleDict = Dict[nn.Module, RemovableHandle] # for state dict + + +""" +[Note: Overlapping all-gather copy-in and all-gather] +For implicit forward prefetching, we want to overlap the next copy-in with the +current all-gather. We do so using a separate copy-in stream. However, since +we have the all-gather input as a view into the output, we must make sure to +copy into different memory from the current all-gather's output. Thus, we keep +a reference to the current all-gather's output and have the next FSDP parameter +group free it after its copy-in. Finally, we have the last FSDP state flush the +reference to avoid holding onto memory after forward. +""" + + +class FSDPCommContext: + """This has the communication state shared across FSDP states/parameter groups.""" + + def init(self): + # Setting the all-gather/reduce-scatter streams to be higher priority + # can help avoid some issues where their copies in/out are delayed and + # block computation + high_priority = -1 + # All-gather state and copy-in stream allow overlapping the next + # copy-in with the current all-gather in forward; copy-in overlaps with + # reduce-scatter in backward without the separate copy-in stream + self.all_gather_copy_in_stream = torch.cuda.Stream(priority=high_priority) + self.all_gather_state: Optional[AllGatherState] = None + # All-gather stream allows overlapping next all-gather with current + # forward compute + self.all_gather_stream = torch.cuda.Stream(priority=high_priority) + # Reduce-scatter stream gives separate execution "thread" for post- + # backward logic like pre/post-gradient division and reduce-scatter + self.reduce_scatter_stream = torch.cuda.Stream(priority=high_priority) + # Post-forward order for explicit backward prefetching + self.post_forward_order: List[FSDPParamGroup] = [] # will cause ref cycles + + def get_all_gather_streams( + self, training_state: TrainingState + ) -> Tuple[torch.cuda.Stream, torch.cuda.Stream]: + if training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD): + # Use separate streams for implicit prefetching + return self.all_gather_copy_in_stream, self.all_gather_stream + current_stream = torch.cuda.current_stream() + return current_stream, current_stream + + +# See [Note: Overlapping all-gather copy-in and all-gather] +class AllGatherState(NamedTuple): + all_gather_result: AllGatherResult + event: torch.cuda.Event # all-gather copy-out + + +class FSDPParamGroup: + """This class represents a parameter group to communicate together.""" + + _orig_dtype: torch.dtype + _reduce_dtype: Optional[torch.dtype] + + def __init__( + self, + params: List[nn.Parameter], + module: nn.Module, + mesh_info: FSDPMeshInfo, + post_forward_mesh_info: Optional[FSDPMeshInfo], + device: torch.device, + mp_policy: MixedPrecisionPolicy, + ): + self.module = module # permit ref cycle because 1:1 lifetime + param_module_infos = _get_param_module_infos(params, module) + self.fsdp_params = [ + FSDPParam( + param, module_info, mesh_info, post_forward_mesh_info, device, mp_policy + ) + for param, module_info in zip(params, param_module_infos) + ] + self.mesh_info = mesh_info + self.post_forward_mesh_info = post_forward_mesh_info + self.device = device + self.mp_policy = mp_policy + self._training_state = TrainingState.IDLE + # Group's sharded state always matches its parameters' sharded states + self._sharded_state = ShardedState.SHARDED + self._module_fqn: Optional[str] = None # prefixed from root module + + # - Hook state + self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {} + self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {} + + # - Communication and communication/computation overlap + self.comm_ctx = FSDPCommContext() + # Group's indices in the shared post-forward order + self._post_forward_indices: List[int] = [] + # Used to avoid mistargeted backward prefetches when the module is used + # in forward but not in backward: for each forward, we record a tuple + # of the output's grad fns and later query the autograd engine whether + # any grad fn will execute in the current backward to know to prefetch. + self.all_forward_output_grad_fns: Set[Tuple[Node, ...]] = set() + # Whether to reduce-scatter or all-reduce gradients, respectively + # (can be set to false to save communication during gradient + # accumulation); all-reducing without reduce-scatter is disallowed + self.reduce_scatter_grads: bool = True + self.all_reduce_grads: bool = True + + # - CUDA events for stream synchronization + # Holds the all-gather output buffer, sync objects, and metadata + self._all_gather_result: Optional[AllGatherResult] = None + # Holds the reduce-scatter view-out CUDA event that marks the end of + # the group's post-backward (e.g. reduce-scatter and div), which should + # be waited on at the end of backward + self._reduce_scatter_view_out_event: Optional[torch.cuda.Event] = None + # Holds the reshard-after-forward CUDA event when resharding to a + # different world size, which should be waited on in the next unshard + self._reshard_after_forward_event: Optional[torch.cuda.Event] = None + + # Initialization # + def _init_mp_dtypes(self) -> None: + for fsdp_param in self.fsdp_params: + fsdp_param.init_dtype_attrs(self.mp_policy) + orig_dtypes = {fsdp_param.orig_dtype for fsdp_param in self.fsdp_params} + if len(orig_dtypes) != 1: + # This can be relaxed if we copy-out for the reduce-scatter + raise AssertionError( + f"FSDP expects uniform original parameter dtype but got {orig_dtypes}" + ) + self._orig_dtype = next(iter(orig_dtypes)) + reduce_dtypes = {fsdp_param.reduce_dtype for fsdp_param in self.fsdp_params} + if len(reduce_dtypes) != 1: + # This can be relaxed if we issue one reduce-scatter per reduce + # dtype (but we would need a way for users to specify multiple + # reduce dtypes) + raise AssertionError( + f"FSDP expects uniform reduce dtype but got {reduce_dtypes}" + ) + self._reduce_dtype = next(iter(reduce_dtypes)) + + def _init_grad_divide_factors(self): + data_parallel_world_size = 1 + data_parallel_world_size *= self.mesh_info.shard_mesh_size + if isinstance(self.mesh_info, HSDPMeshInfo): + data_parallel_world_size *= self.mesh_info.replicate_mesh_size + if self._reduce_dtype == torch.float32: + # Use NCCL's AVG op to divide after reduction since it is more + # performant and fp32 has sufficient precision + self._grad_divide_factors: Optional[Tuple[float, float]] = None + return + # For N data parallel workers, each worker computes g_i, and they + # collectively reduce (g_1 + ... + g_N) / N. To avoid overflow and + # underflow, we divide by ~sqrt(N) before and after the reduction. + factor: int = 1 + while ( + data_parallel_world_size % factor == 0 + and data_parallel_world_size / factor > factor + ): + factor *= 2 + factor = float(factor) + self._grad_divide_factors = (factor, data_parallel_world_size / factor) + + def lazy_init(self): + param_names_on_meta = [ + fsdp_param._param_fqn + for fsdp_param in self.fsdp_params + if fsdp_param.sharded_param.device.type == "meta" + ] + if param_names_on_meta: + raise RuntimeError( + "FSDP parameters should be materialized from meta device before training, " + f"but the following were still on meta device: {param_names_on_meta}\n" + "For example, call module.to_empty(device) to materialize to device and " + "call module.reset_parameters() on each module to initialize values." + ) + # Initialize mixed precision attributes lazily in case the user changes + # the parameter dtypes after construction time but before forward + self._init_mp_dtypes() + self._init_grad_divide_factors() + self._register_state_dict_hooks() + + # Runtime # + def unshard(self, async_op: bool = False): + if self._all_gather_result is not None: # already called, pending wait + return + if self.is_unsharded: + return # no-op + if self._reshard_after_forward_event is not None: + # Resharded parameter data is allocated in the default stream and + # used in the all-gather streams + self._wait_all_gather_streams_on_event(self._reshard_after_forward_event) + self._reshard_after_forward_event = None + self._all_gather_result = foreach_all_gather( + self.fsdp_params, + self._all_gather_process_group, + async_op, + *self.comm_ctx.get_all_gather_streams(self._training_state), + self.device, + ) + + def wait_for_unshard(self): + """ + 1. In forward with implict prefetching, to overlap the current copy-out + with the next all-gather, we save a reference to the current all-gather + result to free after the next copy-out. + 2. Otherwise (explicit prefetching or in backward), we free the + all-gather result immediately after the current copy-out since we can + already overlap the current copy-out with the previous reduce-scatter. + """ + if not self._all_gather_result: + return # no preceding unshard + if self._training_state == TrainingState.FORWARD: # implicit prefetch + if prev_all_gather_state := self.comm_ctx.all_gather_state: + self._wait_all_gather_streams_on_event(prev_all_gather_state.event) + self.comm_ctx.all_gather_state = None # free the all-gather result + foreach_all_gather_copy_out( + self._all_gather_result, self.fsdp_params, self._all_gather_process_group + ) + for fsdp_param in self.fsdp_params: + fsdp_param.init_unsharded_param() # no-op after 1st call + self._to_unsharded() + all_gather_copy_out_event = torch.cuda.Event() + all_gather_copy_out_event.record() + if self._training_state == TrainingState.FORWARD: + self.comm_ctx.all_gather_state = AllGatherState( + self._all_gather_result, all_gather_copy_out_event + ) + else: + self._wait_all_gather_streams_on_event(all_gather_copy_out_event) + self._all_gather_result = None # free unless saved in `all_gather_state` + + def _wait_all_gather_streams_on_event(self, event: torch.cuda.Event): + self.comm_ctx.all_gather_copy_in_stream.wait_event(event) + self.comm_ctx.all_gather_stream.wait_event(event) + + def reshard(self): + if self._training_state == TrainingState.FORWARD: + if not self._reshard_after_forward: + return + if self._use_post_forward_mesh: + self._to_sharded_post_forward() + self._reshard_after_forward_event = torch.cuda.Event() + self._reshard_after_forward_event.record() + return + self._to_sharded() + + def pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + with torch.profiler.record_function("FSDP::pre_forward"): + self._training_state = TrainingState.FORWARD + self.unshard() + self.wait_for_unshard() + args, kwargs = self._register_post_backward_hook(args, kwargs) + return args, kwargs + + def post_forward(self, module: nn.Module, input: Any, output: Any): + with torch.profiler.record_function("FSDP::post_forward"): + self.reshard() + self._record_post_forward() + self._training_state = TrainingState.IDLE + return output + + def _record_post_forward(self) -> None: + # Since a group has one pre-backward unshard for each forward call + # before the backward, we record each usage (with multiplicity) + post_forward_index = len(self.comm_ctx.post_forward_order) + self.comm_ctx.post_forward_order.append(self) + self._post_forward_indices.append(post_forward_index) + + def pre_backward(self, forward_grad_fns: Tuple[Any, ...], *unused: Any): + with torch.profiler.record_function("FSDP::pre_backward"): + self._training_state = TrainingState.PRE_BACKWARD + self.unshard() # no-op if prefetched + self.wait_for_unshard() + # Can be already removed if running multiple `backward`s + self.all_forward_output_grad_fns.discard(forward_grad_fns) + self._prefetch_unshard() + + def post_backward(self, *unused: Any): + self._training_state = TrainingState.POST_BACKWARD + with torch.profiler.record_function("FSDP::post_backward_reshard"): + if not self.reduce_scatter_grads: + self.reshard() + return + # Save the autograd-computed gradients before resharding to only + # access the unsharded parameters when their data is present + fsdp_params_with_grad: List[FSDPParam] = [] + unsharded_grads: List[torch.Tensor] = [] + for fsdp_param in self.fsdp_params: + if fsdp_param.unsharded_param.grad is not None: + fsdp_params_with_grad.append(fsdp_param) + unsharded_grads.append(fsdp_param.unsharded_grad_data) + fsdp_param.unsharded_param.grad = None + self.reshard() + if len(fsdp_params_with_grad) == 0: + return + with torch.profiler.record_function("FSDP::post_backward_reduce"): + self._reduce_scatter_view_out_event = foreach_reduce_scatter( + fsdp_params_with_grad, + unsharded_grads, + self._reduce_scatter_process_group, + self.comm_ctx.reduce_scatter_stream, + self._orig_dtype, + self._reduce_dtype, + self.device, + self._grad_divide_factors, + ) + + def finalize_backward(self): + if self._reduce_scatter_view_out_event is not None: + torch.cuda.current_stream().wait_event(self._reduce_scatter_view_out_event) + self._reduce_scatter_view_out_event = None + self._training_state = TrainingState.IDLE + self._post_forward_indices.clear() + self.all_forward_output_grad_fns.clear() + + def _prefetch_unshard(self): + if self._training_state == TrainingState.PRE_BACKWARD: + if not self._post_forward_indices: + # Can be cleared if running multiple `backward`s + return + curr_index = self._post_forward_indices.pop() + if (target_index := curr_index - 1) < 0: + return + target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index] + if any( + torch._C._will_engine_execute_node(grad_fn) # type: ignore[attr-defined] + for grad_fns in target_fsdp_param_group.all_forward_output_grad_fns + for grad_fn in grad_fns + ): + with torch.profiler.record_function( + "FSDP::backward_prefetch" + ), target_fsdp_param_group.use_training_state( + TrainingState.PRE_BACKWARD + ): + target_fsdp_param_group.unshard() + + # Utilities # + def _to_sharded(self): + if not self.is_sharded: + for fsdp_param in self.fsdp_params: + fsdp_param.to_sharded() + self._sharded_state = ShardedState.SHARDED + + def _to_sharded_post_forward(self): + if not self.is_sharded_post_forward: + for fsdp_param in self.fsdp_params: + fsdp_param.to_sharded_post_forward() + self._sharded_state = ShardedState.SHARDED_POST_FORWARD + + def _to_unsharded(self): + if not self.is_unsharded: + for fsdp_param in self.fsdp_params: + fsdp_param.to_unsharded() + self._sharded_state = ShardedState.UNSHARDED + + @property + def is_sharded(self) -> bool: + return self._sharded_state == ShardedState.SHARDED + + @property + def is_sharded_post_forward(self) -> bool: + return self._sharded_state == ShardedState.SHARDED_POST_FORWARD + + @property + def is_unsharded(self) -> bool: + return self._sharded_state == ShardedState.UNSHARDED + + @contextlib.contextmanager + def use_training_state(self, training_state: TrainingState): + old_training_state = self._training_state + self._training_state = training_state + try: + yield + finally: + self._training_state = old_training_state + + # Hook Registration # + def _register_post_backward_hook( + self, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + if not torch.is_grad_enabled(): + return args, kwargs + args_list, args_spec = tree_flatten(args) + kwargs_list, kwargs_spec = tree_flatten(kwargs) + args_kwargs_list = list(args_list) + list(kwargs_list) + inp_tensor_indices: List[int] = [] + inp_tensors: List[torch.Tensor] = [] + for i, obj in enumerate(args_kwargs_list): + if torch.is_tensor(obj) and obj.requires_grad: + inp_tensor_indices.append(i) + inp_tensors.append(obj) + if len(inp_tensors) == 0: + return args, kwargs # no tensors that require gradients + inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors) + for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors): + args_kwargs_list[inp_tensor_idx] = inp_tensor + args_list = args_kwargs_list[: len(args_list)] + kwargs_list = args_kwargs_list[len(args_list) :] + args = tree_unflatten(args_list, args_spec) + kwargs = tree_unflatten(kwargs_list, kwargs_spec) + return args, kwargs + + def _register_state_dict_hooks(self) -> None: + assert len(self._module_to_pre_save_state_dict_hook_handle) == 0 + assert len(self._module_to_pre_load_state_dict_hook_handle) == 0 + modules_with_fsdp_params: Set[nn.Module] = { + fsdp_param._module_info.module for fsdp_param in self.fsdp_params + } + + def to_sharded_hook(*args: Any, **kwargs: Any) -> None: + self._to_sharded() + + for module in modules_with_fsdp_params: + self._module_to_pre_save_state_dict_hook_handle[ + module + ] = module.register_state_dict_pre_hook(to_sharded_hook) + self._module_to_pre_load_state_dict_hook_handle[ + module + ] = module._register_load_state_dict_pre_hook(to_sharded_hook) + + # Properties # + @property + def _reshard_after_forward(self) -> bool: + return self.post_forward_mesh_info is not None + + @property + def _use_post_forward_mesh(self) -> bool: + return ( + self._reshard_after_forward + and self.mesh_info != self.post_forward_mesh_info + ) + + @property + def _all_gather_process_group(self) -> dist.ProcessGroup: + mesh_info = ( + cast(FSDPMeshInfo, self.post_forward_mesh_info) + if self.is_sharded_post_forward + else self.mesh_info + ) + assert isinstance(mesh_info, FSDPMeshInfo) + return mesh_info.shard_process_group + + @property + def _reduce_scatter_process_group(self) -> dist.ProcessGroup: + mesh_info = self.mesh_info + assert isinstance(mesh_info, FSDPMeshInfo) + return mesh_info.shard_process_group + + +def _get_param_module_infos( + params: List[nn.Parameter], module: nn.Module +) -> List[ParamModuleInfo]: + """ + Shared parameter: lin1.weight = lin2.weight + Shared module: mlp.lin1 = mlp.lin2 + We do not remove duplicates when traversing both modules and parameters to + find shared modules' parameters and shared parameters within a module. + """ + params_set = set(params) + param_to_module_info: Dict[nn.Parameter, ParamModuleInfo] = {} + for _, submodule in module.named_modules(remove_duplicate=False): + for param_name, param in _named_parameters_with_duplicates( + submodule, recurse=False + ): + if param in params_set: + if param not in param_to_module_info: + param_to_module_info[param] = ParamModuleInfo(submodule, param_name) + else: + param_to_module_info[param].shared_modules.append(submodule) + param_to_module_info[param].shared_param_names.append(param_name) + if len(param_to_module_info) != len(params): + raise AssertionError(f"Some parameters are not in the module tree of {module}") + return [param_to_module_info[param] for param in params] + + +class RegisterPostBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor): + # All tensors in `inputs` should require gradient + ctx.param_group = param_group + return inputs + + @staticmethod + def backward(ctx, *grads: torch.Tensor): + ctx.param_group.post_backward() + return (None,) + grads diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py new file mode 100644 index 0000000000000000000000000000000000000000..b2aba4182f4666a2deab5346f4a6532f083a1a0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py @@ -0,0 +1,246 @@ +import functools + +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.autograd.graph import Node, register_multi_grad_hook +from torch.distributed._composable_state import ( + _get_module_state, + _insert_module_state, + _State, +) +from torch.distributed.utils import _to_kwargs +from torch.utils._pytree import tree_flatten, tree_map +from torch.utils.hooks import RemovableHandle +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import _cast_fp_tensor, TrainingState +from ._fsdp_param import FSDPParam +from ._fsdp_param_group import FSDPCommContext, FSDPParamGroup + + +class FSDPStateContext: + """This has state shared across FSDP states.""" + + def __init__(self): + # All FSDP states in the root state's module tree + self.all_states: List[FSDPState] = [] + # Iteration's forward root runs the once-per-forward logic; this root + # may not be the overall root set by lazy initialization in cases where + # only a submodule runs forward (e.g. encoder-only for eval) + self.iter_forward_root: Optional[FSDPState] = None + # Final callback should only be queued once per backward + self.post_backward_final_callback_queued: bool = False + # Whether to finalize backward in this backward's final callback + self.is_last_backward: bool = True + + +class FSDPState(_State): + def __init__(self): + super().__init__() + self._fsdp_param_group: Optional[FSDPParamGroup] = None + self._is_root: Optional[bool] = None # root set during lazy init + self._state_ctx = FSDPStateContext() + self._comm_ctx = FSDPCommContext() + self._training_state: TrainingState = TrainingState.IDLE + self._pre_backward_hook_handles: List[RemovableHandle] = [] + + # Define a separate init since `__init__` is called in the contract + def init( + self, module: nn.Module, device: torch.device, mp_policy: MixedPrecisionPolicy + ) -> None: + _insert_module_state(module, self) + self._module = module + self._device = device + self._mp_policy = mp_policy + self._pre_forward_hook_handle = module.register_forward_pre_hook( + self._pre_forward, prepend=True, with_kwargs=True + ) + self._post_forward_hook_handle = module.register_forward_hook( + self._post_forward, prepend=False + ) + + def _root_pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + self._lazy_init() + if self._state_ctx.iter_forward_root is not None: + return args, kwargs + self._state_ctx.iter_forward_root = self + with torch.profiler.record_function("FSDP::root_pre_forward"): + # Wait for optimizer before implicitly prefetched all-gathers + current_stream = torch.cuda.current_stream() + self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream) + self._comm_ctx.all_gather_stream.wait_stream(current_stream) + if self._device.type == "cuda": + with torch.profiler.record_function("FSDP::inputs_to_device"): + args_tuple, kwargs_tuple = _to_kwargs( + args, kwargs, self._device, False + ) # same as DDP + args, kwargs = args_tuple[0], kwargs_tuple[0] + return args, kwargs + + def _lazy_init(self) -> None: + """ + Lazy initialization represents when all modules' parallelisms have + finalized (e.g. FSDP has been applied to all desired modules). This + means that we can determine which state is the root, and we do so by + the 1st state to run forward. + """ + if self._is_root is not None: + return # no-op: already initialized + self._is_root = True + root_module = self._module + for module_name, module in root_module.named_modules(): + if (state := _get_module_fsdp_state(module)) is None: + continue + if module is not root_module: + if state._is_root is not None: + raise RuntimeError( + "FSDP state has already been lazily initialized for " + f"{module_name}\nFSDP requires running forward through " + "the root module first" + ) + state._is_root = False + self._state_ctx.all_states.append(state) + if self._fsdp_param_group: + # For the root, do not reshard after forward since for training, + # the parameters would be freed and all-gathered immediately + self._fsdp_param_group.post_forward_mesh_info = None + self._init_fqns() + self._init_shared_state() + # Run parameter group lazy inits after initializing FQNs for improved + # error messages + for state in self._state_ctx.all_states: + if state._fsdp_param_group: + state._fsdp_param_group.lazy_init() + + def _init_shared_state(self) -> None: + self._comm_ctx.init() + for state in self._state_ctx.all_states: + state._state_ctx = self._state_ctx + state._comm_ctx = self._comm_ctx + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.comm_ctx = self._comm_ctx + + def _init_fqns(self) -> None: + """Sets module and parameter FQN attributes for debugging.""" + assert self._is_root + root_module = self._module + param_to_fsdp_param: Dict[nn.Parameter, FSDPParam] = {} + module_to_fsdp_param_group: Dict[nn.Module, FSDPParamGroup] = {} + for state in self._state_ctx.all_states: + if fsdp_param_group := state._fsdp_param_group: + for fsdp_param in fsdp_param_group.fsdp_params: + param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param + module_to_fsdp_param_group[fsdp_param_group.module] = fsdp_param_group + for param_name, param in root_module.named_parameters(): + if param in param_to_fsdp_param: + param_to_fsdp_param[param]._param_fqn = param_name + for module_name, module in root_module.named_modules(): + if module in module_to_fsdp_param_group: + module_to_fsdp_param_group[module]._module_fqn = module_name + + def _pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + # When composing with module-hook-based activation checkpointing, the + # the pre-backward hook is responsible for the unshard + if self._training_state == TrainingState.PRE_BACKWARD: + return args, kwargs + self._training_state = TrainingState.FORWARD + args, kwargs = self._root_pre_forward(module, args, kwargs) + if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype: + with torch.profiler.record_function("FSDP::cast_forward_inputs"): + cast_fn = functools.partial( + _cast_fp_tensor, self._mp_policy.param_dtype + ) + args, kwargs = tree_map(cast_fn, args), tree_map(cast_fn, kwargs) + if self._fsdp_param_group: + args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs) + return args, kwargs + + def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any: + # When composing with module-hook-based activation checkpointing, the + # post-backward hook is responsible for the reshard + if self._training_state == TrainingState.PRE_BACKWARD: + return output + if self._fsdp_param_group: + output = self._fsdp_param_group.post_forward(module, input, output) + output = self._register_pre_backward_hook(output) + self._training_state = TrainingState.IDLE + if self._state_ctx.iter_forward_root is self: + if all_gather_state := self._comm_ctx.all_gather_state: + # Free the last all-gather result if needed; refer to + # [Note: Overlapping all-gather copy-in and all-gather] + self._comm_ctx.all_gather_copy_in_stream.wait_event( + all_gather_state.event + ) + self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event) + self._comm_ctx.all_gather_state = None # free the all-gather result + self._state_ctx.iter_forward_root = None + if self._mp_policy.output_dtype is not None: + with torch.profiler.record_function("FSDP::cast_forward_outputs"): + output = tree_map( + functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype), + output, + ) + return output + + def _pre_backward(self, forward_grad_fns: Tuple[Node, ...], *unused: Any) -> None: + self._training_state = TrainingState.PRE_BACKWARD + self._register_root_post_backward_final_callback() + if self._fsdp_param_group: + self._fsdp_param_group.pre_backward(forward_grad_fns, *unused) + + def _root_post_backward_final_callback(self) -> None: + with torch.profiler.record_function("FSDP::root_post_backward_callback"): + for state in self._state_ctx.all_states: + if state._fsdp_param_group and state._fsdp_param_group.is_unsharded: + # Run post-backward in case forward inputs did not require + # gradient so the autograd backward did not run + state._fsdp_param_group.post_backward() + if self._state_ctx.is_last_backward: + state._finalize_backward() + if self._state_ctx.is_last_backward: + self._comm_ctx.post_forward_order.clear() + self._state_ctx.post_backward_final_callback_queued = False + + def _finalize_backward(self) -> None: + self._training_state = TrainingState.IDLE + for handle in self._pre_backward_hook_handles: + handle.remove() + self._pre_backward_hook_handles.clear() + if self._fsdp_param_group: + self._fsdp_param_group.finalize_backward() + + def _register_pre_backward_hook(self, output: Any) -> Any: + if not torch.is_grad_enabled(): + return output + + flat_outputs, _ = tree_flatten(output) + tensors = tuple(t for t in flat_outputs if t.requires_grad) + if tensors: + grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None) + pre_backward = functools.partial(self._pre_backward, grad_fns) + handle = register_multi_grad_hook(tensors, pre_backward, mode="any") + self._pre_backward_hook_handles.append(handle) + if self._fsdp_param_group: + self._fsdp_param_group.all_forward_output_grad_fns.add(grad_fns) + return output + + def _register_root_post_backward_final_callback(self): + if self._state_ctx.post_backward_final_callback_queued: + return + self._state_ctx.post_backward_final_callback_queued = True + Variable._execution_engine.queue_callback( + self._root_post_backward_final_callback + ) + + +def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]: + state = _get_module_state(module) + if isinstance(state, FSDPState): + return state + return None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..47184af418c27832914b048b13be6e3d456eab55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py @@ -0,0 +1,246 @@ +from typing import Any, cast, Optional, Union + +import typing_extensions + +import torch +import torch.nn as nn + +from torch.distributed._composable import contract +from torch.distributed._tensor import DeviceMesh, DTensor + +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo +from ._fsdp_init import ( + _get_device_from_mesh, + _get_managed_modules, + _get_managed_states, + _get_post_forward_mesh_info, + _init_default_fully_shard_mesh, + _move_states_to_device, +) +from ._fsdp_param_group import FSDPParamGroup +from ._fsdp_state import _get_module_fsdp_state, FSDPState + + +# The decorator adds a state object to `module` that can be accessed via +# `fully_shard.state(module)`. The state object and module are 1:1. +@contract(state_cls=FSDPState) +def fully_shard( + module: nn.Module, + *, + mesh: Optional[DeviceMesh] = None, + reshard_after_forward: Union[bool, int] = True, + mp_policy: MixedPrecisionPolicy = MixedPrecisionPolicy(), +): + """ + Shard module parameters across data parallel workers. + + This function applies fully sharded data parallelism (FSDP) or a variant to + ``module``, a technique for memory savings at the cost of communication. + Parameters are sharded across ``mesh``, and in turn, so are their gradients + and optimizer states. + + The sharded parameters are all-gathered to construct the unsharded + parameters for forward or backward computation. The unsharded parameters + are freed after computation to save memory. The gradients are reduced + across the mesh and divided by the mesh size for data parallelism. The + optimizer step runs on the sharded parameters. + + Each call to ``fully_shard`` constructs one communication group that + includes the parameters in ``module.parameters()`` except those already + assigned to a group from a nested call. Each group's parameters and its + gradients are communicated together in one collective, respectively. + Constructing multiple groups across the model (e.g. "layer by layer") + allows for peak memory savings and communication/computation overlap. + + Implementation-wise, the sharded parameters are represented as + :class:`DTensor` s, sharded on dim-0, and the unsharded parameters are + represented as :class:`Tensor` s. A module forward pre-hook all-gathers the + parameters, and a module forward hook frees them. Similar backward hooks + gather parameters and later free parameters/reduce gradients. + + Args: + mesh (Optional[DeviceMesh]): This data parallel mesh defines the + sharding and device. If 1D, then parameters are fully sharded + across the 1D mesh (FSDP). If 2D, then parameters are sharded + across the 0th dim and replicated across the 1st dim (HSDP). The + mesh's device type gives the device type used for communication; + if a CUDA or CUDA-like device type, then we use the current device. + reshard_after_forward (Union[bool, int]): This controls the parameter + behavior after forward and can trade off memory and communication: + - If ``True``, then this reshards parameters after forward and + all-gathers in backward. + - If ``False``, then this keeps the unsharded parameters in memory + after forward and avoids the all-gather in backward. + - If an ``int``, then this represents the world size to reshard to + after forward. It should be a non-trivial divisor of the ``mesh`` + shard dim size (i.e. excluding 1 and the dim size itself). A choice + may be the intra-node size (e.g. ``torch.cuda.device_count()``). + This allows the all-gather in backward to be over a smaller world + size at the cost of higher memory usage than setting to ``True``. + - The root FSDP state has its value specially set to ``False`` as a + heuristic since its parameters would typically be immediately + all-gathered for backward. + - After forward, the parameters registered to the module depend on + to this: The registered parameters are the sharded parameters if + ``True``; unsharded parameters if ``False``; and the paramters + resharded to the smaller mesh otherwise. To modify the parameters + between forward and backward, the registered parameters must be the + sharded parameters. For ``False`` or an ``int``, this can be done + by manually resharding via :meth:`reshard`. + mp_policy (MixedPrecisionPolicy): This controls the mixed precision + policy, which offers parameter/reduction mixed precision for this + module. See :class:`MixedPrecisionPolicy` for details. + """ + if isinstance(module, (nn.ModuleList, nn.ModuleDict)): + raise ValueError( + f"fully_shard does not support containers that do not implement forward: {module}" + ) + mesh = mesh or _init_default_fully_shard_mesh() + if mesh.ndim not in (1, 2): + raise ValueError(f"fully_shard expects a 1D or 2D DeviceMesh but got {mesh}") + elif mesh.ndim == 1: + mesh_info = FSDPMeshInfo(mesh, shard_mesh_dim=0) + else: + mesh_info = HSDPMeshInfo(mesh, shard_mesh_dim=1, replicate_mesh_dim=0) + device = _get_device_from_mesh(mesh) + post_forward_mesh_info = _get_post_forward_mesh_info( + reshard_after_forward, mesh_info + ) + + state = fully_shard.state(module) + state.init(module, device, mp_policy) + + managed_modules = _get_managed_modules(module) + params, buffers = _get_managed_states(managed_modules) + _move_states_to_device(params, buffers, device, mesh_info) + if params: + state._fsdp_param_group = FSDPParamGroup( + params, module, mesh_info, post_forward_mesh_info, device, mp_policy + ) + + # for dynamo + for module in managed_modules: + module._is_fsdp_managed_module = True # type: ignore[assignment] + module._fsdp_use_orig_params = True # type: ignore[assignment] + + # Place FSDP leftmost for highest priority in the method resolution order + cls = module.__class__ + dct = {"__deepcopy__": unimplemented_deepcopy} + new_cls = type(f"FSDP{cls.__name__}", (FSDP, cls), dct) + module.__class__ = new_cls + return module + + +def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never: + raise AssertionError( + "FSDP does not support deepcopy. Please use state dict for serialization." + ) + + +class FSDP: + def __new__(cls, *args, **kwargs): + """ + Override ``__new__`` to remove the FSDP class and directly construct + the original class for cases like indexing into a container module. + """ + # Use index 2 since 0 is the dynamically constructed `FSDP<...>` class + # and index 1 is the `FSDP` class itself + orig_cls = cls.__mro__[2] + self = orig_cls.__new__(orig_cls, *args, **kwargs) + self.__init__(*args, **kwargs) + return self + + def reshard(self) -> None: + """ + Reshards the module's parameters, registering the sharded parameters + to the module and freeing the unsharded parameters if needed. This + method is *not* recursive. + """ + state = self._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.reshard() + + def set_is_last_backward(self, is_last_backward: bool) -> None: + """ + Sets whether the next backward is the last one, meaning that FSDP + should wait for gradient reduction to finish and clear internal data + structures used for explicit prefetching. + """ + state = self._get_fsdp_state() + state._state_ctx.is_last_backward = is_last_backward + + def set_requires_gradient_sync( + self, requires_gradient_sync: bool, recurse: bool = True + ) -> None: + """ + Sets if the module should sync gradients. This can be used to implement + gradient accumulation without communication. For HSDP, this controls + both reduce-scatter and all-reduce together. + + Args: + requires_gradient_sync (bool): Whether to reduce gradients for the + module's parameters. + recurse (bool): Whether to set for all submodules or just the + passed-in module. + """ + for module in cast(nn.Module, self).modules(): + if isinstance(module, FSDP): + state = module._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.reduce_scatter_grads = requires_gradient_sync + fsdp_param_group.all_reduce_grads = requires_gradient_sync + + def set_requires_all_reduce(self, requires_all_reduce: bool, recurse: bool = True): + """ + Sets if the module should all-reduce gradients. This can be used to + implement gradient accumulation with only reduce-scatter but not + all-reduce for HSDP. + """ + for module in cast(nn.Module, self).modules(): + if isinstance(module, FSDP): + state = module._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.all_reduce_grads = requires_all_reduce + + def _get_fsdp_state(self) -> FSDPState: + if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None: + raise AssertionError(f"No FSDP state found on {self}") + return state + + def _apply(self, *args: Any, **kwargs: Any) -> Any: + # Reshard to ensure that sharded parameters are registered + self.reshard() + ret = super()._apply(*args, **kwargs) # type: ignore[misc] + state = self._get_fsdp_state() + if not (fsdp_param_group := state._fsdp_param_group): + return ret + # TODO: Remove this padding logic once DTensor pads the local tensor: + # https://github.com/pytorch/pytorch/issues/113045 + with torch.no_grad(): + for fsdp_param in fsdp_param_group.fsdp_params: + module_info = fsdp_param._module_info + new_param = getattr(module_info.module, module_info.param_name) + if new_param is not fsdp_param.sharded_param: + if torch.__future__.get_swap_module_params_on_conversion(): + raise AssertionError( + "Expects swap_tensors to preserve object but got " + f"{new_param} instead of {fsdp_param.sharded_param}" + ) + else: + raise AssertionError( + "Please set torch.__future__.set_swap_module_params_on_conversion(True) " + "to use _apply methods with FSDP" + ) + local_tensor = new_param._local_tensor + padded_sharded_size = fsdp_param.padded_sharded_param_size + if local_tensor.size() != padded_sharded_size: + padded_local_tensor = local_tensor.new_zeros(padded_sharded_size) + padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor) + local_tensor = padded_local_tensor + fsdp_param._sharded_param_data = local_tensor.view(-1) + assert isinstance(fsdp_param.sharded_param, DTensor) # mypy + fsdp_param.sharded_param._local_tensor = local_tensor[ + : fsdp_param.sharded_size[0] + ] + return ret diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff0c799233ce4f31e1eb4a3c3f812557f104cdaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py @@ -0,0 +1,15 @@ +from .api import CheckpointException +from .default_planner import DefaultLoadPlanner, DefaultSavePlanner +from .filesystem import FileSystemReader, FileSystemWriter +from .fsspec import FsspecReader, FsspecWriter +from .metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + Metadata, + TensorStorageMetadata, +) +from .optimizer import load_sharded_optimizer_state_dict +from .planner import LoadPlan, LoadPlanner, ReadItem, SavePlan, SavePlanner, WriteItem +from .state_dict_loader import load, load_state_dict +from .state_dict_saver import async_save, save, save_state_dict +from .storage import StorageReader, StorageWriter diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b28f35c622a1bd9967b2bed28610888033a6f40d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef77995ea3d0f5b9480ffa8ffe2b0fe6859d0a21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cac2cad7fbf19646e10fbe56700bd8e9cdb32c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0224fecfbf0cf3aaca21abbbba92da26280f7102 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c8dde20fdbc153b27a63fb4f0e87bb5391c492 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a822011bcfbfd39669ebd24871390c75b8259fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a1d5a19db4cef44be660914856317058cb38852 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05fd78eb79a72afd2bdeba5558c87c0c99712d05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b996d8a503f4053658b0627607de20474bcb15d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..067bd6d53e63631f361963b8db9c845326a2da80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34034de2d578e6376ff53074ea7cfc47885a900f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61efe60350d052421b367c2420e7a189d2946af7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/fsspec.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/fsspec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d896cad8ca5c12373916f78cb61fd3ee42ab6d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/fsspec.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0107373eaf23524d94be5ae578253bcb0188c5d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93cdc8d9b6a73842bf15415c0b37c222cf751420 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..088961aa704cf643b49fa4a874d9460fadfc441e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d022f9a6f38eafc17049c5eebf15a65997e33ecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f964d93668acec62b6ed8b44e958f2f92936dfe7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1882cbb2425262cf004cc3dccc4588e9f47349a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766d436fb46f5c0d19ede555d1c4d0547724cb8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..089acb0933a70c5e9c573f972ad550c684ce2cc5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee1f597fe39c710d5cb2586552a3912c3d0c25ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7fbd952af9b2c76e0547f8e4da1a392528d4382 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..631fe2738275a06dbd857499d21b6e17680d0c30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_save_plans.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_save_plans.py new file mode 100644 index 0000000000000000000000000000000000000000..b06aabaa6b3752fb29fcad3638dc8aeb99985b45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_save_plans.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import dataclasses +from collections import defaultdict +from typing import Dict, List, Set + +from torch.distributed.checkpoint.metadata import MetadataIndex +from torch.distributed.checkpoint.planner import SavePlan, WriteItem + +__all__ = ["dedup_save_plans"] + + +def dedup_save_plans(all_plans: List[SavePlan]) -> List[SavePlan]: + """ + Removes duplicate entries from appearing on multiple SavePlans. For each duplicate across + a set of SavePlans, only the smallest SavePlan in terms of planned storage keeps the entry. + """ + + write_item_to_plan_indices: Dict[MetadataIndex, Set[int]] = defaultdict(set) + write_item_idx_to_write_item: Dict[MetadataIndex, WriteItem] = {} + for plan_idx, plan in enumerate(all_plans): + for write_item in plan.items: + # map each write item to its plan + write_item_to_plan_indices[write_item.index].add(plan_idx) + write_item_idx_to_write_item[write_item.index] = write_item + + # put item in the plan with the smallest size and remove it from the other plan_indices + to_remove: List[Set] = [set() for _ in range(len(all_plans))] + plan_to_size = [0] * len(all_plans) + for write_item_idx, plan_indices in write_item_to_plan_indices.items(): + select_plan_idx = min(plan_indices, key=lambda plan_idx: plan_to_size[plan_idx]) + + write_item = write_item_idx_to_write_item[write_item_idx] + # essentially ignores the storage size of anything that is not a tensor, since + # we don't know how much storage they represent + plan_to_size[select_plan_idx] += write_item.tensor_storage_size() or 1 + + plan_indices.remove(select_plan_idx) + for plan_idx in plan_indices: + to_remove[plan_idx].add(write_item_idx) + + for plan_idx, remove_set in enumerate(to_remove): + new_items = [ + write_item + for write_item in all_plans[plan_idx].items + if write_item.index not in remove_set + ] + all_plans[plan_idx] = dataclasses.replace(all_plans[plan_idx], items=new_items) + + return all_plans diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_tensors.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..7419e8cbb97b031396dfb3b411df0bc1c5f5c474 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_dedup_tensors.py @@ -0,0 +1,59 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import dataclasses +import logging +from typing import Dict, List + +from torch.distributed.checkpoint.metadata import MetadataIndex +from torch.distributed.checkpoint.planner import SavePlan + +__all__ = ["dedup_tensors"] + + +def init_logger() -> logging.Logger: + logger = logging.getLogger(__name__) + level = logging.INFO + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + console.setFormatter(formatter) + console.setLevel(level) + logger.addHandler(console) + logger.propagate = False + return logger + + +logger = init_logger() + + +# TODO add docstring for dedup_tensors +def dedup_tensors(all_plans: List[SavePlan]) -> List[SavePlan]: + all_plans = list(all_plans) + key_to_plan: Dict[MetadataIndex, List[int]] = {} + for plan_idx, plan in enumerate(all_plans): + for write_item in plan.items: + key_to_plan.setdefault(write_item.index, []).append(plan_idx) + + replicated_items = {k: v for k, v in key_to_plan.items() if len(v) > 1} + + # Remove duplicates by always keeping the first entry. + # Compute the per-rank remove set. + plan_to_keys: Dict[int, List[MetadataIndex]] = {} + for key, plans in replicated_items.items(): + for plan_idx in plans[1:]: + plan_to_keys.setdefault(plan_idx, []).append(key) + if len(plan_to_keys) > 0: + logger.info("Duplicate keys to remove: %s", plan_to_keys) + + for plan_idx, keys in plan_to_keys.items(): + key_set = set(keys) + # rewrite items and remove elements + new_items = [ + write_item + for write_item in all_plans[plan_idx].items + if write_item.index not in key_set + ] + all_plans[plan_idx] = dataclasses.replace(all_plans[plan_idx], items=new_items) + + return all_plans diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..3dfd7b61e3db60b17a4567b0abc986adf516e73f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py @@ -0,0 +1,15 @@ +# Mypy will not try inferring the types of any 3rd party libraries installed. +# mypy: ignore-errors + +import logging + +from torch.distributed.checkpoint.fsspec import ( # noqa: F401 # noqa: F401 + FsspecReader, + FsspecWriter, +) + +log = logging.getLogger(__name__) +log.warning( + "FSSpec Filesystem has been made public, please update your " + "import to torch.distributed.checkpoint" +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..527a67e6892fe5b7dc5bfd654ca0ffbe83139fa7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Dict, Tuple + +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE + +from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict + +""" +TODO: +Need to add ability to handle tuple, OrderedDict, NamedTuple. +Update mappings from dict to a class. +Change set_element to recreate the right type for tuple, OrderedDict, and NamedTuple. +""" + + +FLATTEN_MAPPING = Dict[str, OBJ_PATH] + + +# TODO: Update Docstring for nested_dict.py +def flatten_state_dict( + state_dict: STATE_DICT_TYPE, +) -> Tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]: + """ + Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary. + + Use ``unflatten_state_dict`` to revert this process. + Returns: + A tuple with the flatten state_dict and a mapping from original to new state_dict. + N.B. The new keys are derived from the object paths, joined by dot. + For example: ``{ 'a': {'b':...}}`` results in the key `a.b`. + """ + flattened: STATE_DICT_TYPE = {} + mappings: FLATTEN_MAPPING = {} + + def flat_copy(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None: + new_fqn = ".".join(map(str, path)) + if new_fqn in flattened: + raise ValueError(f"duplicated flatten key {new_fqn}") + flattened[new_fqn] = value + mappings[new_fqn] = path + + traverse_state_dict(state_dict, flat_copy) + return flattened, mappings + + +def unflatten_state_dict( + state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING +) -> STATE_DICT_TYPE: + """Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``.""" + nested: STATE_DICT_TYPE = {} + for key, value in state_dict.items(): + set_element(nested, mapping[key], value) + return nested diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..582dfc069924173b8f7de16a5f05a16fc98397c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py @@ -0,0 +1,103 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import copy + +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import Shard, ShardedTensor, ShardMetadata +from torch.distributed._shard.sharded_tensor.metadata import ShardedTensorMetadata +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE +from torch.distributed.remote_device import _remote_device + +from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict +from .utils import _element_wise_add, _normalize_device_info + + +# TODO: We need to refactor this code. +def _flatten_sharded_tensors(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + r""" + Transform ``state_dict`` by flattening all nested ShardedTensor instances found. + + The resulting ShardedTensor instances are only correct regarding the local shard and + MUST not be used for any other purpose but checkpointing, as no operator will work with them. + + This function should be used in conjunction with a state_dict produced by FSDP's + StateDictType.SHARDED_STATE_DICT methods. + """ + new_state_dict: STATE_DICT_TYPE = {} + + def rewrite_dict(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None: + if not isinstance(value, ShardedTensor): + set_element(new_state_dict, path, value) + return + shards = value.local_shards() + + if len(shards) == 0: + return + if len(shards) != 1: + set_element(new_state_dict, path, value) + return + + outer_shard = shards[0] + + inner_st = outer_shard.tensor + if not isinstance(inner_st, ShardedTensor): + set_element(new_state_dict, path, value) + return + + if len(inner_st.local_shards()) != 1: + raise ValueError("Cannot handle inner tensor with more than 1 shard") + inner_shard = inner_st.local_shards()[0] + + local_shards = [ + Shard( + tensor=inner_shard.tensor, + metadata=ShardMetadata( + shard_offsets=_element_wise_add( + outer_shard.metadata.shard_offsets, + inner_shard.metadata.shard_offsets, + ), + shard_sizes=inner_shard.metadata.shard_sizes, + placement=f"rank:{dist.get_rank()}/{inner_shard.tensor.device}", + ), + ) + ] + + st_meta: ShardedTensorMetadata = copy.deepcopy(value.metadata()) + other_rank = 0 if dist.get_rank() > 0 else 1 + device_info = _normalize_device_info(inner_shard.tensor.device.type, 0) + + # Remove the outer ST shard the inner ST covers + for i, shard_md in enumerate(st_meta.shards_metadata): + if shard_md.shard_offsets == outer_shard.metadata.shard_offsets: + st_meta.shards_metadata.pop(i) + break + + # Attribute other rank for the other shards + for shard_md in st_meta.shards_metadata: + shard_md.placement = _remote_device(f"rank:{other_rank}/{device_info}") + + # Add other inner shards from the inner tensor + for inner_md in inner_st.metadata().shards_metadata: + if inner_md.shard_offsets != inner_shard.metadata.shard_offsets: + st_meta.shards_metadata.append( + ShardMetadata( + shard_offsets=_element_wise_add( + outer_shard.metadata.shard_offsets, + inner_md.shard_offsets, + ), + shard_sizes=inner_md.shard_sizes, + placement=f"rank:{other_rank}/{device_info}", + ) + ) + + # Finally add this shard + st_meta.shards_metadata.append(local_shards[0].metadata) + + st = ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards=local_shards, + sharded_tensor_metadata=st_meta, + ) + set_element(new_state_dict, path, st) + + traverse_state_dict(state_dict, rewrite_dict) + return new_state_dict diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_storage_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_storage_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2b0ba566d47aa1b08e5a8d3d7067ca122ff9fe90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_storage_utils.py @@ -0,0 +1,50 @@ +import os +from typing import List, Type, Union + +from .filesystem import FileSystemReader, FileSystemWriter + +from .storage import StorageReader, StorageWriter + + +def _storage_setup( + storage: Union[StorageReader, StorageWriter, None], + checkpoint_id: Union[str, os.PathLike, None], + reader: bool = False, +) -> Union[None, StorageReader, StorageWriter]: + if storage: + if checkpoint_id is not None: + storage.reset(checkpoint_id) + return storage + + if not checkpoint_id: + raise RuntimeError( + "`checkpoint_id` must be specificed if " + "storage_reader/storage_writer is None." + ) + + targets: List[Type[Union[StorageReader, StorageWriter]]] = [] + if reader: + targets = [ + FileSystemReader, + ] + else: + targets = [ + FileSystemWriter, + ] + try: + from .fsspec import FsspecReader, FsspecWriter + + targets.append(FsspecReader if reader else FsspecWriter) + except Exception: + pass + + for target in targets: + if target.validate_checkpoint_id(checkpoint_id): + storage = target(checkpoint_id) # type: ignore[call-arg] + storage.reset(checkpoint_id) + return storage + + raise RuntimeError( + "Cannot detect which StorageReader or StorageWriter to use. " + "Please specify the storage_reader/storage_writer." + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_traverse.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_traverse.py new file mode 100644 index 0000000000000000000000000000000000000000..604b5e1a80c17e65a0683643a395f0a21d50e2eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/_traverse.py @@ -0,0 +1,167 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import ( + Callable, + cast, + Collection, + List, + Mapping, + MutableMapping, + Optional, + Tuple, + TypeVar, + Union, +) + +import torch +from torch.distributed._shard.sharded_tensor.api import ShardedTensor +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE + +PATH_ITEM = Union[str, int] +OBJ_PATH = Tuple[PATH_ITEM, ...] +T = TypeVar("T") + +STATE_DICT_ITEM = object +CONTAINER_TYPE = MutableMapping[PATH_ITEM, STATE_DICT_ITEM] + +__all__ = ["traverse_state_dict", "set_element", "get_element", "print_tensor"] + + +def _keep_visiting_tensors(value: STATE_DICT_ITEM) -> bool: + return isinstance(value, torch.Tensor) + + +# TODO: update docstring for traverse.py +def traverse_state_dict( + state_dict: STATE_DICT_TYPE, + visitor: Callable[[OBJ_PATH, STATE_DICT_ITEM], None], + keep_traversing: Callable[[STATE_DICT_ITEM], bool] = _keep_visiting_tensors, +) -> None: + """ + Invoke ``visitor`` for each value recursively in ``state_dict``. + + Traversal is short-circuited when if finds a collection for which ``keep_visiting_tensors`` evaluates + to false for all elements. + By default, all collections with at least one ``torch.Tensor`` element are traversed. + Visitor takes a path argument that is a tuple of the keys used to reach it. + """ + + # a value is terminal if it has no other containers values inside it + def _is_terminal(value: STATE_DICT_ITEM) -> bool: + values: Collection[STATE_DICT_ITEM] + if isinstance(value, Mapping): + values = value.values() + elif isinstance(value, list): + values = value + else: + return True + + for entry in values: + if isinstance(entry, (Mapping, list)) and not _is_terminal(entry): + return False + if keep_traversing is not None and keep_traversing(entry): + return False + return True + + def _traverse_obj(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None: + if _is_terminal(value): + visitor(path, value) + elif isinstance(value, Mapping): + for k, v in value.items(): + _traverse_obj(path + (str(k),), v) + elif isinstance(value, list): + for i, v in enumerate(value): + _traverse_obj(path + (i,), v) + + for key, value in state_dict.items(): + _traverse_obj((str(key),), value) + + +def set_element( + root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: STATE_DICT_ITEM +) -> None: + """Set ``value`` in ``root_dict`` along the ``path`` object path.""" + cur_container = cast(CONTAINER_TYPE, root_dict) + + def extend_list(lst: List[STATE_DICT_ITEM], idx: int) -> None: + while len(lst) <= idx: + lst.append(None) + + for i in range(1, len(path)): + prev_key = path[i - 1] + key = path[i] + def_val = cast(STATE_DICT_ITEM, {} if type(key) == str else []) + + if isinstance(cur_container, Mapping): + cur_container = cast( + CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val) + ) + else: + extend_list(cur_container, prev_key) + if cur_container[prev_key] is None: + cur_container[prev_key] = def_val + cur_container = cur_container[prev_key] + + key = path[-1] + if type(key) == int: + extend_list(cast(List[STATE_DICT_ITEM], cur_container), key) + + cur_container[key] = value + + +def get_element( + root_dict: STATE_DICT_TYPE, + path: OBJ_PATH, + default_value: Optional[T] = None, +) -> Optional[T]: + """Retrieve the value at ``path``from ``root_dict``, returning ``default_value`` if not found.""" + cur_value = cast(CONTAINER_TYPE, root_dict) + for part in path: + if type(part) is int: + if not isinstance(cur_value, list) or len(cur_value) < part: + return default_value + elif not isinstance(cur_value, Mapping) or part not in cur_value: + return default_value + + cur_value = cast(CONTAINER_TYPE, cur_value[part]) + return cast(Optional[T], cur_value) + + +def _print_nested( + value: STATE_DICT_ITEM, + prefix: str = "", + print_fun: Callable[[str], None] = print, +) -> None: + if type(value) is ShardedTensor: + print_fun(f"{prefix} ShardedTensor size: {value.size()}") + for shard in value.local_shards(): + _print_nested( + shard.tensor, + f"{shard.metadata.shard_offsets} ", + print_fun=print_fun, + ) + elif type(value) is (DTensor): + print_fun(f"{prefix} DistributedTensor size: {value.size()}") + # TODO: add local offset for _local_tensor in print_nested. + _print_nested( + value._local_tensor, + print_fun=print_fun, + ) + elif isinstance(value, torch.Tensor): + print_fun(f"{prefix} Tensor size: {value.size()}") + else: + print_fun(f"{prefix} Type: {type(value)}") + + +def print_tensor( + path: OBJ_PATH, + value: STATE_DICT_ITEM, + print_fun: Callable[[str], None] = print, +) -> None: + """ + Use this callback with traverse_state_dict to print its content. + + By default the content is printed using the builtin ``print`` but this can + be change by passing a different ``print_fun` callable. + """ + _print_nested(value, prefix=str(path), print_fun=print_fun) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py new file mode 100644 index 0000000000000000000000000000000000000000..828685103261d82c6a84010da0260a062d743a2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py @@ -0,0 +1,41 @@ +import traceback as tb +from typing import Any, Dict, Tuple + +WRAPPED_EXCEPTION = Tuple[BaseException, tb.StackSummary] + +__all__ = ["CheckpointException"] + + +def _wrap_exception(exc: BaseException) -> WRAPPED_EXCEPTION: + return (exc, tb.extract_tb(exc.__traceback__)) + + +def _is_wrapped_exception(obj: Any) -> bool: + if not isinstance(obj, tuple): + return False + if len(obj) != 2: + return False + return isinstance(obj[0], BaseException) and isinstance(obj[1], tb.StackSummary) + + +class CheckpointException(BaseException): + """Exception raised if failure was detected as part of a checkpoint load or save.""" + + def __init__(self, msg: str, failures: Dict[int, WRAPPED_EXCEPTION]): + super().__init__(msg, failures) + self._failures = failures + + @property + def failures(self) -> Dict[int, WRAPPED_EXCEPTION]: + """Return a dictionary mapping node ranks to their associated exceptions in case of failure.""" + return self._failures + + def __str__(self): + str = f"CheckpointException ranks:{self._failures.keys()}\n" + for rank, exc_pair in self._failures.items(): + exc, trace = exc_pair + str += f"Traceback (most recent call last): (RANK {rank})\n" + if trace is not None: + str += "".join(tb.format_list(trace)) + str += "".join(tb.format_exception_only(type(exc), value=exc)) + return str diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py new file mode 100644 index 0000000000000000000000000000000000000000..22f9be1964f85577a597345ec91a017842caa539 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py @@ -0,0 +1,420 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import dataclasses +import io +import logging +import operator +from collections import ChainMap +from functools import reduce +from typing import Any, cast, Dict, List, Optional, Tuple, Union + +import torch +from torch.distributed._shard._utils import narrow_tensor_by_index +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint._dedup_save_plans import dedup_save_plans +from torch.distributed.checkpoint._nested_dict import ( + FLATTEN_MAPPING, + flatten_state_dict, +) +from torch.distributed.checkpoint._sharded_tensor_utils import _flatten_sharded_tensors +from torch.distributed.checkpoint._traverse import set_element +from torch.distributed.checkpoint.metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + Metadata, + MetadataIndex, + STATE_DICT_TYPE, + STORAGE_TYPES, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import ( + LoadPlan, + LoadPlanner, + ReadItem, + SavePlan, + SavePlanner, + WriteItem, + WriteItemType, +) +from torch.distributed.checkpoint.planner_helpers import ( + _create_default_metadata_only_plan, + _create_read_items, + _create_write_items, + _init_state_dict, +) +from torch.distributed.checkpoint.utils import find_state_dict_object + +logger: logging.Logger = logging.getLogger(__name__) + + +__all__ = [ + "DefaultSavePlanner", + "DefaultLoadPlanner", + "create_default_local_load_plan", + "create_default_global_load_plan", + "create_default_local_save_plan", + "create_default_global_save_plan", +] + + +# TODO: Update docstrings for default_planner.py +class DefaultSavePlanner(SavePlanner): + mappings: FLATTEN_MAPPING + + def __init__( + self, + flatten_state_dict: bool = True, + flatten_sharded_tensors: bool = True, + dedup_replicated_tensors: Optional[bool] = None, + ) -> None: + self.flatten_state_dict = flatten_state_dict + self.flatten_sharded_tensors = flatten_sharded_tensors + self.mappings = {} + + if dedup_replicated_tensors is not None: + logger.warning( + "DefaultSavePlanner's `dedup_replicated_tensors` argument is being " + "deprecated, and no longer has any effect. Please remove this argument " + "from your call." + ) + + def set_up_planner(self, state_dict: STATE_DICT_TYPE, is_coordinator: bool) -> None: + if self.flatten_state_dict: + state_dict, self.mappings = flatten_state_dict(state_dict) + if self.flatten_sharded_tensors: + state_dict = _flatten_sharded_tensors(state_dict) + self.state_dict = state_dict + self.is_coordinator = is_coordinator + + def create_local_plan(self) -> SavePlan: + plan = create_default_local_save_plan(self.state_dict, self.is_coordinator) + if self.flatten_state_dict: + plan = dataclasses.replace(plan, planner_data=self.mappings) + self.plan = plan + + return self.plan + + def create_global_plan( + self, all_plans: List[SavePlan] + ) -> Tuple[List[SavePlan], Metadata]: + all_plans = dedup_save_plans(all_plans) + + global_plan, metadata = create_default_global_save_plan(all_plans) + + if self.flatten_state_dict: + # | does not work for Python 3.8 or older version. + # merged_mappings = reduce( + # lambda x, y: x | y, (p.planner_data for p in global_plan) + # ) + planner_data_dict = [p.planner_data for p in global_plan] + merged_mappings = dict(ChainMap(*planner_data_dict)) + metadata = dataclasses.replace(metadata, planner_data=merged_mappings) + + if not _validate_global_plan(global_plan, metadata): + raise ValueError("Failed to validate global plan") + + self.global_plan = global_plan + self.metadata = metadata + + return self.global_plan, self.metadata + + def finish_plan(self, new_plan: SavePlan) -> SavePlan: + self.plan = new_plan + return new_plan + + def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]: + object = self.lookup_object(write_item.index) + return self.transform_object(write_item, object) + + def lookup_object(self, index: MetadataIndex) -> Any: + """Extension from the planner interface to make it easy to extend the default planner.""" + return find_state_dict_object(self.state_dict, index) + + def transform_object(self, write_item: WriteItem, object: Any): + """Extension from the planner interface to make it easy to extend the default planner.""" + if write_item.type == WriteItemType.BYTE_IO: + bytes = io.BytesIO() + torch.save(object, bytes) + object = bytes + return object + + +class DefaultLoadPlanner(LoadPlanner): + """ + DefaultLoadPlanner that adds multiple features on top of LoadPlanner. + + In particular it adds the following: + + flatten_state_dict: Handle state_dict with nested dicts + flatten_sharded_tensors: For FSDP in 2D parallel mode + """ + + original_state_dict: STATE_DICT_TYPE + mappings: FLATTEN_MAPPING + + def __init__( + self, + flatten_state_dict: bool = True, + flatten_sharded_tensors: bool = True, + ) -> None: + self.flatten_state_dict = flatten_state_dict + self.flatten_sharded_tensors = flatten_sharded_tensors + self.original_state_dict = {} + self.mappings = {} + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Metadata, + is_coordinator: bool, + ) -> None: + _init_state_dict(state_dict) + self.original_state_dict = state_dict + + if self.flatten_sharded_tensors: + state_dict = _flatten_sharded_tensors(state_dict) + + if self.flatten_state_dict: + state_dict, self.mappings = flatten_state_dict(state_dict) + + self.state_dict = state_dict + self.metadata = metadata + self.is_coordinator = is_coordinator + + def create_local_plan(self) -> LoadPlan: + return create_default_local_load_plan(self.state_dict, self.metadata) + + def create_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + return create_default_global_load_plan(global_plan) + + def finish_plan(self, new_plan: LoadPlan) -> LoadPlan: + return new_plan + + def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None: + if self.flatten_state_dict: + set_element( + self.original_state_dict, + self.mappings[read_item.dest_index.fqn], + torch.load(value), + ) + else: + self.state_dict[read_item.dest_index.fqn] = torch.load(value) + + def resolve_tensor(self, read_item: ReadItem): + tensor = self.lookup_tensor(read_item.dest_index) + return self.transform_tensor(read_item, tensor) + + def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None: + pass + + def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor: + """Extension from the planner interface to make it easy to extend the default planner.""" + return find_state_dict_object(self.state_dict, index) + + def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor): + """Extension from the planner interface to make it easy to extend the default planner.""" + return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths) + + +def create_default_local_load_plan( + state_dict: Dict[str, Any], + metadata: Metadata, +) -> LoadPlan: + requests = [] + """ + Create the ``LoadPlan`` used by DefaultLoadPlanner. + + It produces one read item per value in ``state_dict`` using the metadata in ``metadata``. + + The default behavior is to match key exactly between state_dict and metadata. + It handles resharding by issuing multiple read requests against storage in order to match + load requirements. + """ + + for fqn, obj in state_dict.items(): + md = metadata.state_dict_metadata[fqn] + # Since DTensor supports submesh, adding extra check to ensure _create_read_items() + # gets called only when the current rank is part of the mesh for the corresponding DTensor. + if isinstance(obj, DTensor): + if obj.device_mesh.get_coordinate() is not None: + requests += _create_read_items(fqn, md, obj) + else: + requests += _create_read_items(fqn, md, obj) + + return LoadPlan(requests) + + +def create_default_global_load_plan( + all_plans: List[LoadPlan], +) -> List[LoadPlan]: + """ + Create global load plan used by DefaultLoadPlanner. + + The default load behavior involved no global coordination and this function + currently doesn't change the local plans. + """ + return all_plans + + +def create_default_local_save_plan( + state_dict: Dict[str, Any], is_coordinator: bool +) -> SavePlan: + """ + Create the ``SavePlan`` used by DefaultSavePlanner. + + On non-coordinator ranks, this function ignores tensors and non-tensor objects, + only producing writes for ShardedTensor objects. + + On the coordinator rank, produce writes for all values. + """ + requests = [] + for fqn, obj in state_dict.items(): + # Since DTensor supports submesh, adding extra check to ensure _create_write_items() + # gets called only when the current rank is part of the mesh for the corresponding DTensor. + if isinstance(obj, DTensor): + if obj.device_mesh.get_coordinate() is not None: + requests += _create_write_items(fqn, obj) + elif isinstance(obj, (torch.Tensor)) or is_coordinator: + requests += _create_write_items(fqn, obj) + + return SavePlan(requests) + + +def create_default_global_save_plan( + all_plans: List[SavePlan], + rewrite_index_hints: bool = True, +) -> Tuple[List[SavePlan], Metadata]: + """ + Create the global plan and metadata used by DefaultSavePlanner. + + Metadata is produced by concatenating the metadata of all ``WriteItem`` from the supplied plans. + + The only global planning change is to update index hints in all ``MetadataIndex`` objects if + ``rewrite_index_hints`` is True. + """ + md: Dict[str, STORAGE_TYPES] = {} + new_plans = [] + for plan in all_plans: + new_items = [] + for item in plan.items: + if not item.type == WriteItemType.SHARD: + assert item.index.fqn not in md + + if item.type == WriteItemType.BYTE_IO: + md[item.index.fqn] = BytesStorageMetadata() + new_items.append(item) + else: + assert item.tensor_data is not None + tensor_md = cast( + TensorStorageMetadata, + md.setdefault( + item.index.fqn, + TensorStorageMetadata( + properties=item.tensor_data.properties, + size=item.tensor_data.size, + chunks=[], + ), + ), + ) + new_item = item + if rewrite_index_hints: + new_index = dataclasses.replace( + item.index, index=len(tensor_md.chunks) + ) + new_item = dataclasses.replace(item, index=new_index) + new_items.append(new_item) + + assert ( + item.tensor_data.chunk is not None + ), f""" + Cannot create MD for tensor without bounds. + FQN: {item.index.fqn} + """ + tensor_md.chunks.append(item.tensor_data.chunk) + new_plans.append(dataclasses.replace(plan, items=new_items)) + return (new_plans, Metadata(md)) + + +def _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata: + """Return the ``Metadata`` if DefaultSavePlanner was used to checkpoint ``state_dict``.""" + plan = _create_default_metadata_only_plan(state_dict) + _, md = create_default_global_save_plan([plan]) + return md + + +def _check_box_overlap(box0: ChunkStorageMetadata, box1: ChunkStorageMetadata) -> bool: + """Check if two boxes overlap. Tuples are (offset, lengths).""" + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(box0.offsets) + for i in range(ndims): + if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]: + return False + if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]: + return False + + return True + + +def _check_box_bounds( + outer_box_size: torch.Size, inner_box: ChunkStorageMetadata +) -> bool: + for i in range(len(outer_box_size)): + if inner_box.offsets[i] < 0: + return False + if inner_box.sizes[i] < 0: + return False + if inner_box.offsets[i] + inner_box.sizes[i] > outer_box_size[i]: + return False + + return True + + +def _validate_global_plan(global_plan: List[SavePlan], metadata: Metadata) -> bool: + all_good = True + for key, value in metadata.state_dict_metadata.items(): + if isinstance(value, BytesStorageMetadata): + continue + if len(value.size) == 0: + continue + chunks_volume = 0 + for chunk_idx, chunk0 in enumerate(value.chunks): + # Compute the volume + if not _check_box_bounds(value.size, chunk0): + logger.warning( + """ + key:%s has out of bounds chunk: + tensor-size:%s chunk: %s + """, + key, + value.size, + chunk0, + ) + all_good = False + chunks_volume += reduce(operator.mul, chunk0.sizes, 1) + + # Check for overlap + for chunk1 in value.chunks[chunk_idx + 1 :]: + if _check_box_overlap(chunk0, chunk1): + logger.warning( + "key:%s has overlapping chunks: %s %s", key, chunk0, chunk1 + ) + all_good = False + + # Check whether combined chunk cover the whole tensor + tensor_volume = reduce(operator.mul, value.size, 1) + if chunks_volume != tensor_volume: + logger.warning( + """ + key:%s invalid fill tensor-volume: + %s chunks-volume: %s + """, + key, + tensor_volume, + chunks_volume, + ) + all_good = False + + return all_good diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/filesystem.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..d474afa39b2e5d37ec8c3189afc79ded102d4f97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/filesystem.py @@ -0,0 +1,618 @@ +import collections +import dataclasses +import io +import os +import pickle +import queue +import threading +from abc import ABC, abstractmethod +from contextlib import contextmanager +from dataclasses import dataclass +from pathlib import Path +from typing import ( + Callable, + cast, + Dict, + Generator, + IO, + Iterable, + Iterator, + List, + Optional, + Tuple, + Union, +) + +import torch +from torch import Tensor +from torch._utils import _get_available_device_type, _get_device_module +from torch.distributed._shard._utils import narrow_tensor_by_index +from torch.futures import Future + +from .metadata import Metadata, MetadataIndex +from .planner import ( + LoadItemType, + LoadPlan, + LoadPlanner, + ReadItem, + SavePlan, + SavePlanner, + WriteItem, + WriteItemType, +) +from .storage import StorageReader, StorageWriter, WriteResult +from .utils import _create_file_view + +__all__ = ["FileSystemWriter", "FileSystemReader"] + + +@dataclass +class _StorageInfo: + """This is the per entry storage info.""" + + relative_path: str + offset: int + length: int + + +@dataclass +class _StoragePrefix: + prefix: str + + +DEFAULT_SUFFIX = ".distcp" + + +class _TensorLoader(ABC): + @abstractmethod + def add(self, size: int, obj: object) -> None: + pass + + @abstractmethod + def start_loading(self) -> None: + pass + + @abstractmethod + def values(self) -> Iterator[Tuple[torch.Tensor, object]]: + pass + + +class _SerialCpuLoader(_TensorLoader): + def __init__(self, resolve_fun: Callable) -> None: + self.resolve_fun = resolve_fun + self.items: List[Tuple[int, object]] = [] + + def add(self, size: int, obj: object) -> None: + self.items.append((size, obj)) + + def start_loading(self) -> None: + pass + + def values(self) -> Iterator[Tuple[torch.Tensor, object]]: + for _, obj in self.items: + tensor = self.resolve_fun(obj).detach() + tensor = tensor.cpu() + if tensor.storage().size() != tensor.numel(): + tensor = tensor.clone() + yield ( + tensor, + obj, + ) + + +class _OverlappingCpuLoader(_TensorLoader): + def __init__( + self, + resolve_fun: Callable, + stream: Optional[torch.Stream] = None, + inflight_threshhold: int = 1_000_000, + ) -> None: + self.resolve_fun = resolve_fun + self.items: List[Tuple[int, object]] = [] + self.inflight_threshhold = inflight_threshhold + self.in_flight_data = 0 + self.current_items: collections.deque = collections.deque() + self.idx = 0 + self.started = False + self.device_type = ( + stream.device_type if stream else _get_available_device_type() + ) + self.device_module = _get_device_module(self.device_type) + self.stream = cast( + torch.cuda.Stream, stream or self.device_module.current_stream() + ) + if self.stream != self.device_module.current_stream(): + self.stream.wait_stream(self.device_module.current_stream()) + + @property + def _done(self) -> bool: + return self.idx >= len(self.items) + + def _drain(self) -> List[Tuple[torch.Tensor, object]]: + drained = [] + if self.in_flight_data >= self.inflight_threshhold: + self.stream.synchronize() + while self.in_flight_data >= self.inflight_threshhold: + val = self.current_items.popleft() + self.in_flight_data -= val[0].numel() * val[0].element_size() + drained.append(val) + return drained + + def _refill(self) -> None: + with self.device_module.stream(self.stream): + while not self._done and self.in_flight_data < self.inflight_threshhold: + _, obj = self.items[self.idx] + self.idx += 1 + tensor = self.resolve_fun(obj).detach() + if tensor.device.type == self.device_type: + tensor = tensor.to(device="cpu", non_blocking=True) + elif tensor.device == torch.device("cpu"): + if ( + tensor.untyped_storage().size() + != tensor.numel() * tensor.itemsize + ): + # this forces the tensor to be both contiguous and with minimal storage + tensor = tensor.clone() + + self.current_items.append( + ( + tensor, + obj, + ) + ) + self.in_flight_data += tensor.numel() * tensor.element_size() + + def _finish(self) -> Iterable[Tuple[torch.Tensor, object]]: + assert self._done + if len(self.current_items) > 0: + self.stream.synchronize() + return self.current_items + + def add(self, size: int, obj: object) -> None: + if self.started: + raise RuntimeError("cannot add items after loading started") + self.items.append((size, obj)) + + def start_loading(self) -> None: + if self.started: + return + self.started = True + self.items.sort(key=lambda x: x[0]) + self._refill() + + def values(self) -> Iterator[Tuple[torch.Tensor, object]]: + self.start_loading() + while not self._done: + drained = self._drain() + self._refill() + yield from drained + + yield from self._finish() + + +def _item_size(item: WriteItem) -> int: + size = 1 + assert item.tensor_data is not None + # can't use math.prod as PT needs to support older python + for s in item.tensor_data.size: + size *= s + + dtype = item.tensor_data.properties.dtype + return size * torch._utils._element_size(dtype) + + +def _split_by_size_and_type(bins: int, items: List[WriteItem]) -> List[List[WriteItem]]: + if bins == 1: + return [items] + + bytes_w = [wi for wi in items if wi.type == WriteItemType.BYTE_IO] + tensor_w = [wi for wi in items if wi.type != WriteItemType.BYTE_IO] + + buckets: List[List[WriteItem]] = [[] for _ in range(bins)] + bucket_sizes = [0 for _ in range(bins)] + + tensor_w.sort(key=_item_size, reverse=True) + + for i, wi in enumerate(bytes_w): + buckets[i % bins].append(wi) + + for wi in tensor_w: + # TODO replace with headq + idx = min(enumerate(bucket_sizes), key=lambda x: x[1])[0] + buckets[idx].append(wi) + bucket_sizes[idx] += _item_size(wi) + + return buckets + + +def _write_item( + stream: io.IOBase, + data: Union[io.BytesIO, torch.Tensor], + write_item: WriteItem, + storage_key: str, +) -> WriteResult: + offset = stream.tell() + + if write_item.type == WriteItemType.BYTE_IO: + assert isinstance(data, io.BytesIO) + stream.write(data.getbuffer()) + else: + assert isinstance(data, torch.Tensor) + assert data.device == torch.device("cpu") + torch.save(data, cast(IO[bytes], stream)) + length = stream.tell() - offset + + return WriteResult( + index=write_item.index, + size_in_bytes=length, + storage_data=_StorageInfo(storage_key, offset, length), + ) + + +def _write_files_from_queue( + create_stream: Callable, + file_queue: queue.Queue, + result_queue: queue.Queue, + planner: SavePlanner, + inflight_threshhold: int, + use_fsync: bool, + thread_count: int, +) -> None: + try: + while True: + file_name, storage_key, write_items = file_queue.get_nowait() + loader: _TensorLoader + + custom_backend_name = torch._C._get_privateuse1_backend_name() + custom_device_mod = getattr(torch, custom_backend_name, None) + + # TODO: Using the OverlappingCpuLoader with multiple threads creates significant + # performance degredation, observed as being related to cuda stream syncs. We + # should try to fix this and use _OverlappingCpuLoader for all threaded cases + if ( + thread_count == 1 + and ( + torch.cuda.is_available() + or (custom_device_mod and custom_device_mod.is_available()) + ) + and inflight_threshhold > 0 + ): + loader = _OverlappingCpuLoader( + planner.resolve_data, + inflight_threshhold=inflight_threshhold, + ) + else: + loader = _SerialCpuLoader( + planner.resolve_data, + ) + + tensor_w = [wi for wi in write_items if wi.type != WriteItemType.BYTE_IO] + for write_item in tensor_w: + loader.add(_item_size(write_item), write_item) + loader.start_loading() + + bytes_w = [wi for wi in write_items if wi.type == WriteItemType.BYTE_IO] + write_results = [] + + with create_stream(file_name, "wb") as stream: + for write_item in bytes_w: + data = planner.resolve_data(write_item) + write_results.append( + _write_item(stream, data, write_item, storage_key) + ) + + for tensor, write_item in loader.values(): + assert tensor.is_cpu + write_results.append( + _write_item(stream, tensor, write_item, storage_key) + ) + + if use_fsync: + try: + os.fsync(stream.fileno()) + except AttributeError: + os.sync() + result_queue.put(write_results) + except queue.Empty: + pass + + +class FileSystemBase(ABC): + @contextmanager + @abstractmethod + def create_stream( + self, path: Union[str, os.PathLike], mode: str + ) -> Generator[io.IOBase, None, None]: + ... + + @abstractmethod + def concat_path( + self, path: Union[str, os.PathLike], suffix: str + ) -> Union[str, os.PathLike]: + ... + + @abstractmethod + def rename( + self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike] + ) -> None: + ... + + @abstractmethod + def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]: + ... + + @abstractmethod + def mkdir(self, path: Union[str, os.PathLike]) -> None: + ... + + @classmethod + @abstractmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + ... + + +class FileSystem(FileSystemBase): + @contextmanager + def create_stream( + self, path: Union[str, os.PathLike], mode: str + ) -> Generator[io.IOBase, None, None]: + with cast(Path, path).open(mode) as stream: + yield cast(io.IOBase, stream) + + def concat_path( + self, path: Union[str, os.PathLike], suffix: str + ) -> Union[str, os.PathLike]: + return cast(Path, path) / suffix + + def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]: + if not isinstance(path, Path): + path = Path(path) + return path + + def rename( + self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike] + ) -> None: + cast(Path, path).rename(cast(Path, new_path)) + + def mkdir(self, path: Union[str, os.PathLike]) -> None: + cast(Path, path).mkdir(parents=True, exist_ok=True) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + if isinstance(checkpoint_id, Path): + return True + + if "://" in str(checkpoint_id): + return False + + for p in Path(checkpoint_id).parents: + if p.exists() and os.access(str(p), os.W_OK): + return True + + return False + + +class FileSystemWriter(StorageWriter): + """ + Basic implementation of StorageWriter using file IO. + + This implementation makes the following assumptions and simplifications: + + * The checkpoint path is an empty or non-existing directory. + * File creation is atomic + + The checkpoint consist of one file per write request plus + a `.metadata` file with the serialized metadata. + + """ + + def __init__( + self, + path: Union[str, os.PathLike], + single_file_per_rank: bool = True, + sync_files: bool = True, + thread_count: int = 1, + per_thread_copy_ahead: int = 10_000_000, + ) -> None: + """ + Initialize the writer pointing to `path`. + + Args: + path: directory where the checkpoint will be written to. + single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. + sync_files : force files to be synced to permanent storage. Default to True. + thread_count: Number of IO threads to use to write. Default to 1. + per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. + + N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure. + """ + super().__init__() + self.fs = FileSystem() + self.path = self.fs.init_path(path) + self.single_file_per_rank = single_file_per_rank + self.sync_files = sync_files + self.thread_count = thread_count + self.per_thread_copy_ahead = per_thread_copy_ahead + + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + if checkpoint_id: + self.path = self.fs.init_path(checkpoint_id) + + def set_up_storage_writer(self, is_coordinator: bool) -> None: + pass + + def prepare_local_plan(self, plan: SavePlan) -> SavePlan: + self.fs.mkdir(self.path) + return plan + + def prepare_global_plan(self, global_plan: List[SavePlan]) -> List[SavePlan]: + new_plans = [ + dataclasses.replace(plan, storage_data=_StoragePrefix(f"__{i}_")) + for i, plan in enumerate(global_plan) + ] + return new_plans + + def write_data( + self, + plan: SavePlan, + planner: SavePlanner, + ) -> Future[List[WriteResult]]: + storage_plan: _StoragePrefix = plan.storage_data + file_count = 0 + + def gen_file(): + nonlocal file_count + file_name = f"{storage_plan.prefix}{file_count}{DEFAULT_SUFFIX}" + file_count += 1 + return file_name + + file_queue: queue.Queue = queue.Queue() + if self.single_file_per_rank: + for bucket in _split_by_size_and_type(self.thread_count, plan.items): + file_name = gen_file() + path = self.fs.concat_path(self.path, file_name) + file_queue.put((path, file_name, bucket)) + else: + for item in plan.items: + file_name = gen_file() + path = self.fs.concat_path(self.path, file_name) + file_queue.put((path, file_name, [item])) + + result_queue: queue.Queue = queue.Queue() + + threads = [] + for _ in range(1, self.thread_count): + t = threading.Thread( + target=_write_files_from_queue, + args=( + self.fs.create_stream, + file_queue, + result_queue, + planner, + self.per_thread_copy_ahead, + self.sync_files, + self.thread_count, + ), + ) + t.start() + threads.append(t) + + _write_files_from_queue( + create_stream=self.fs.create_stream, + file_queue=file_queue, + result_queue=result_queue, + planner=planner, + inflight_threshhold=self.per_thread_copy_ahead, + use_fsync=self.sync_files, + thread_count=self.thread_count, + ) + + for t in threads: + t.join() + + res = [] + try: + while True: + res += result_queue.get_nowait() + except queue.Empty: + pass + + fut: Future[List[WriteResult]] = Future() + fut.set_result(res) + return fut + + def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None: + storage_md = dict() + for wr_list in results: + storage_md.update({wr.index: wr.storage_data for wr in wr_list}) + metadata.storage_data = storage_md + tmp_path = cast(Path, self.fs.concat_path(self.path, ".metadata.tmp")) + meta_path = cast(Path, self.fs.concat_path(self.path, ".metadata")) + with self.fs.create_stream(tmp_path, "wb") as metadata_file: + pickle.dump(metadata, metadata_file) + if self.sync_files: + try: + os.fsync(metadata_file.fileno()) + except AttributeError: + os.sync() + + self.fs.rename(tmp_path, meta_path) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.validate_checkpoint_id(checkpoint_id) + + +class FileSystemReader(StorageReader): + def __init__(self, path: Union[str, os.PathLike]) -> None: + super().__init__() + self.fs = FileSystem() + self.path = self.fs.init_path(path) + self.storage_data: Dict[MetadataIndex, _StorageInfo] = dict() + + def _slice_file(self, file, sinfo: _StorageInfo) -> io.IOBase: + return _create_file_view(file, sinfo.offset, sinfo.length) + + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + self.storage_data = dict() + if checkpoint_id: + self.path = self.fs.init_path(checkpoint_id) + + def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: + # group requests by file + per_file: Dict[str, List[ReadItem]] = dict() + for read_item in plan.items: + item_md = self.storage_data[read_item.storage_index] + path = item_md.relative_path + per_file.setdefault(path, []).append(read_item) + + for relative_path, reqs in per_file.items(): + new_path = self.fs.concat_path(self.path, relative_path) + with self.fs.create_stream(new_path, "rb") as stream: + # TODO sort by offset and cache the reading + for req in reqs: + item_md = self.storage_data[req.storage_index] + file_slice = self._slice_file(stream, item_md) + if req.type == LoadItemType.BYTE_IO: + read_bytes = io.BytesIO(file_slice.read(item_md.length)) + read_bytes.seek(0) + planner.load_bytes(req, read_bytes) + else: + tensor = cast( + Tensor, + torch.load(cast(IO[bytes], file_slice), map_location="cpu"), + ) + tensor = narrow_tensor_by_index( + tensor, req.storage_offsets, req.lengths + ) + target_tensor = planner.resolve_tensor(req).detach() + + assert ( + target_tensor.size() == tensor.size() + ), f"req {req.storage_index} mismatch sizes {target_tensor.size()} vs {tensor.size()}" + target_tensor.copy_(tensor) + planner.commit_tensor(req, target_tensor) + + fut: Future = Future() + fut.set_result(None) + return fut + + # Implementing the abstract function in StorageReader + def read_metadata(self) -> Metadata: + path = self.fs.concat_path(self.path, ".metadata") + with self.fs.create_stream(path, "rb") as metadata_file: + return pickle.load(metadata_file) + + def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: + self.storage_data = metadata.storage_data + assert self.storage_data is not None + + def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: + return plan + + def prepare_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + return global_plan + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.validate_checkpoint_id(checkpoint_id) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dae880c7ab6818ce8049d006982bfdd48fb97cf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py @@ -0,0 +1,310 @@ +import argparse +import os +from enum import Enum +from typing import cast, Dict, List, Optional, Union + +import torch +import torch.distributed as dist +from torch.distributed._shard._utils import narrow_tensor_by_index +from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter +from torch.distributed.checkpoint._nested_dict import flatten_state_dict +from torch.distributed.checkpoint._traverse import set_element +from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner +from torch.distributed.checkpoint.metadata import ( + Metadata, + STATE_DICT_TYPE, + STORAGE_TYPES, + TensorProperties, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import LoadItemType, LoadPlan, LoadPlanner +from torch.distributed.checkpoint.planner_helpers import _create_chunk_list +from torch.distributed.checkpoint.state_dict_loader import _load_state_dict +from torch.distributed.checkpoint.state_dict_saver import _save_state_dict +from torch.distributed.checkpoint.storage import StorageReader +from torch.futures import Future + + +__all__ = [ + "dcp_to_torch_save", + "torch_save_to_dcp", + "BroadcastingTorchSaveReader", + "DynamicMetaLoadPlanner", +] + + +class _EmptyStateDictLoadPlanner(DefaultLoadPlanner): + """ + Extension of DefaultLoadPlanner, which rebuilds state_dict from the saved metadata. + Useful for loading in state_dict without first initializing a model, such as + when converting a DCP checkpoint into a Torch save file. + + . N.B. `state_dict` must be an empty dictionary when used with this LoadPlanner + + .. warning:: + Because the entire state dict is initialized, It's recommended to only utilize + this LoadPlanner on a single rank or process to avoid OOM. + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Metadata, + is_coordinator: bool, + ) -> None: + assert not state_dict + + # rebuild the state dict from the metadata + for k, v in metadata.state_dict_metadata.items(): + if isinstance(v, TensorStorageMetadata): + v = torch.empty(v.size, dtype=v.properties.dtype) # type: ignore[assignment] + if k in metadata.planner_data: + set_element(state_dict, metadata.planner_data[k], v) + else: + state_dict[k] = v + + super().set_up_planner(state_dict, metadata, is_coordinator) + + +class BroadcastingTorchSaveReader(StorageReader): + """ + StorageReader for reading a Torch Save file. This reader will read the entire checkpoint + on the coordinator rank, and then broadcast and shard each tensor to all ranks. + + . N.B. Intended to be used with DynamicMetaLoadPlanner + + .. warning:: + Current implementation only supports loading Tensors. + + >>> # xdoctest: +SKIP("undefined vars") + >>> sd = {"mode": model} + >>> dcp.load( + >>> sd, + >>> storage_reader=BroadcastingTorchSaveReader(), + >>> planner=DynamicMetaLoadPlanner(), + >>> checkpoint_id="path_to_model.pt" + >>> ) + """ + + def __init__( + self, + checkpoint_id: Optional[Union[str, os.PathLike]] = None, + coordinator_rank: int = 0, + ) -> None: + self.checkpoint_id = checkpoint_id + self.coordinator_rank = coordinator_rank + + def read_metadata(self) -> Metadata: + """Extends the default StorageReader to support building the metadata file""" + # Metadata is built in planner.set_up_planner, since we are not actually reading metadata from + # the disk + return Metadata(state_dict_metadata={}) + + def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: + """ + Reads torch save data on the coordinator rank, and broadcast afterwards + this incurrs a communication cost, but avoids having to load + the entire checkpoint on each rank, hopefully preventing OOM issues + """ + planner = cast(DefaultLoadPlanner, planner) + + # data is read in on the coordinator rank, and broadcast afterwards + # this incurrs a communication cost, but it avoids having to load + # the entire checkpoint on each rank, hopefully preventing OOM issues + # TODO: read on each host, instead of only the coordinator + if self.is_coordinator: + assert self.checkpoint_id is not None + torch_state_dict = torch.load(self.checkpoint_id, map_location="cpu") + if planner.flatten_state_dict: + torch_state_dict, _ = flatten_state_dict(torch_state_dict) + else: + torch_state_dict = None + + for req in plan.items: + if req.type == LoadItemType.BYTE_IO: + raise RuntimeError( + f"Non-tensor value identified at {req.storage_index.fqn}. " + f"At this time {type(self).__name__} only supports loading Tensors." + ) + + # Broadcast the tensor from the coordinator rank + if self.is_coordinator: + tensor = torch_state_dict[req.storage_index.fqn].cuda() + else: + tensor = torch.empty_like(planner.state_dict[req.storage_index.fqn]) + + dist.broadcast(tensor, src=self.coordinator_rank, async_op=False) + + tensor = narrow_tensor_by_index(tensor, req.storage_offsets, req.lengths) + target_tensor = planner.resolve_tensor(req).detach() + assert target_tensor.size() == tensor.size(), ( + f"req {req.storage_index} mismatch sizes, " + f"{target_tensor.size()} vs {tensor.size()}" + ) + target_tensor.copy_(tensor) + planner.commit_tensor(req, target_tensor) + + fut: Future = Future() + fut.set_result(None) + return fut + + def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: + """Implementation of the StorageReader method""" + self.is_coordinator = is_coordinator + if self.is_coordinator: + assert dist.get_rank() == self.coordinator_rank + + assert self.checkpoint_id is not None + + def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: + """Implementation of the StorageReader method""" + return plan + + def prepare_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + """Implementation of the StorageReader method""" + return global_plan + + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """Implementation of the StorageReader method""" + self.checkpoint_id = checkpoint_id + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """Implementation of the StorageReader method""" + return os.path.isfile(checkpoint_id) + + +class DynamicMetaLoadPlanner(DefaultLoadPlanner): + """ + Extension of DefaultLoadPlanner, which creates a new Metadata object based on the passed in state dict, + avoiding the need to read metadata from disk. This is useful when reading formats which don't have a + metadata file, like Torch Save files. + + . N.B. Intended to be used with BroadcastingTorchSaveReader + + .. warning:: + Current implementation only supports loading Tensors. + + >>> # xdoctest: +SKIP("undefined vars") + >>> sd = {"mode": model} + >>> dcp.load( + >>> sd, + >>> storage_reader=BroadcastingTorchSaveReader(), + >>> planner=DynamicMetaLoadPlanner(), + >>> checkpoint_id="path_to_model.pt" + >>> ) + """ + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Metadata, + is_coordinator: bool, + ) -> None: + """Setups of the planner, extnding default behavior by creating the Metadata object from the state dict""" + super().set_up_planner(state_dict, metadata, is_coordinator) + + state_dict_metadata: Dict[str, STORAGE_TYPES] = {} + for key, tensor in self.state_dict.items(): + if not torch.is_tensor(tensor): + raise RuntimeError( + f"Non-tensor value identified at {key}. " + f"At this time {type(self).__name__} only supports loading Tensors." + ) + + state_dict_metadata[key] = TensorStorageMetadata( + TensorProperties(dtype=tensor.dtype), + tensor.size(), + _create_chunk_list(tensor), + ) + self.metadata = Metadata(state_dict_metadata=state_dict_metadata) + + +def dcp_to_torch_save( + dcp_checkpoint_dir: Union[str, os.PathLike], + torch_save_path: Union[str, os.PathLike], +): + """ + Given a directory containing a DCP checkpoint, this function will convert it into a + Torch save file. + + Args: + dcp_checkpoint_dir: Directory containing the DCP checkpoint. + torch_save_path: Filename to store the converted Torch save file. + + .. warning:: + To avoid OOM, it's recommended to only run this function on a single rank. + """ + sd: STATE_DICT_TYPE = {} + + _load_state_dict( + sd, + storage_reader=FileSystemReader(dcp_checkpoint_dir), + planner=_EmptyStateDictLoadPlanner(), + no_dist=True, + ) + torch.save(sd, torch_save_path) + + +def torch_save_to_dcp( + torch_save_path: Union[str, os.PathLike], + dcp_checkpoint_dir: Union[str, os.PathLike], +): + """ + Given the location of a torch save file, converts it into a DCP checkpoint. + + Args: + torch_save_path: Filename to store the converted Torch save file. + dcp_checkpoint_dir: Directory containing the DCP checkpoint. + + .. warning:: + To avoid OOM, it's recommended to only run this function on a single rank. + """ + + state_dict = torch.load(torch_save_path) + # we don't need stateful behavior here because the expectation is anything loaded by + # torch.load would not contain stateful objects. + _save_state_dict( + state_dict, storage_writer=FileSystemWriter(dcp_checkpoint_dir), no_dist=True + ) + + +if __name__ == "__main__": + + class FormatMode(Enum): + TORCH_TO_DCP = "torch_to_dcp" + DCP_TO_TORCH = "dcp_to_torch" + + # Parse command-line arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "mode", + type=str, + help="Conversion mode", + choices=[m.value for m in FormatMode], + default=FormatMode.TORCH_TO_DCP, + ) + parser.add_argument("src", type=str, help="Path to the source model") + parser.add_argument("dst", type=str, help="Path to the destination model") + args = parser.parse_args() + + print( + f"Converting checkpoint from {args.src} to {args.dst} using method: '{args.mode}'" + ) + checkpoint_missing_warning = ( + f"No checkpoint found at {args.src}. Skipping conversion." + ) + if args.mode == FormatMode.TORCH_TO_DCP: + if os.path.isfile(args.src): + torch_save_to_dcp(args.src, args.dst) + else: + print(checkpoint_missing_warning) + elif args.mode == FormatMode.DCP_TO_TORCH: + if os.path.isdir(args.src): + dcp_to_torch_save(args.src, args.dst) + else: + print(checkpoint_missing_warning) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/fsspec.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/fsspec.py new file mode 100644 index 0000000000000000000000000000000000000000..ae97bdf8d53232353c76f8210f38cb4650b1657c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/fsspec.py @@ -0,0 +1,122 @@ +# Mypy will not try inferring the types of any 3rd party libraries installed. +# mypy: ignore-errors + +import io +import os +from contextlib import contextmanager +from pathlib import Path +from typing import Generator, Optional, Union + +import fsspec +from fsspec import AbstractFileSystem +from fsspec.core import url_to_fs + +from torch.distributed.checkpoint.filesystem import ( + FileSystemBase, + FileSystemReader, + FileSystemWriter, +) + +__all__ = [ + "FsspecWriter", + "FsspecReader", +] + + +class FileSystem(FileSystemBase): + def __init__(self) -> None: + self.fs: Optional[AbstractFileSystem] = None + + @contextmanager + def create_stream( + self, path: Union[str, os.PathLike], mode: str + ) -> Generator[io.IOBase, None, None]: + assert self.fs is not None + with self.fs.transaction: + with fsspec.open(str(path), mode) as stream: + yield stream + + def concat_path( + self, path: Union[str, os.PathLike], suffix: str + ) -> Union[str, os.PathLike]: + return os.path.join(path, suffix) + + def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]: + self.fs, _ = url_to_fs(path) + return path + + def rename( + self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike] + ) -> None: + self.fs.rename(path, new_path) + + def mkdir(self, path: [str, os.PathLike]) -> None: + self.fs.makedirs(path, exist_ok=True) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + if isinstance(checkpoint_id, Path): + return False + + try: + url_to_fs(checkpoint_id) + except ValueError as e: + return False + + return True + + +class FsspecWriter(FileSystemWriter): + """ + Basic implementation of StorageWriter using FFspec. + + This implementation makes the following assumptions and simplifications: + + * The checkpoint path is an empty or non-existing directory. + * File creation is atomic + + The checkpoint consist of one file per write request plus + a `.metadata` file with the serialized metadata. + + """ + + def __init__( + self, + path: Union[str, os.PathLike], + single_file_per_rank: bool = True, + sync_files: bool = True, + thread_count: int = 1, + per_thread_copy_ahead: int = 10_000_000, + ) -> None: + """ + Initialize the writer pointing to `path`. + + Args: + path: directory where the checkpoint will be written to. + single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. + sync_files : force files to be synced to permanent storage. Default to True. + thread_count: Number of IO threads to use to write. Default to 1. + per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. + + N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure. + """ + super().__init__( + path, single_file_per_rank, sync_files, thread_count, per_thread_copy_ahead + ) + self.fs = FileSystem() + self.path = self.fs.init_path(path) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.validate_checkpoint_id(checkpoint_id) + + +class FsspecReader(FileSystemReader): + def __init__(self, path: Union[str, os.PathLike]) -> None: + super().__init__(path) + self.fs = FileSystem() + self.path = self.fs.init_path(path) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.check(checkpoint_id) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/metadata.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..bda0b12962fbc63bfc3260c5ad7bd99f6cfd6fed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/metadata.py @@ -0,0 +1,170 @@ +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, List, Optional, Sequence, Union + +import torch +from torch.distributed.checkpoint.stateful import StatefulT + +__all__ = [ + "ChunkStorageMetadata", + "TensorStorageMetadata", + "BytesStorageMetadata", + "Metadata", + "MetadataIndex", + "TensorProperties", +] + + +@dataclass +class ChunkStorageMetadata: + """ + Each chunk is expected to have the same properties of the TensorStorageMetadata + that includes it. + """ + + offsets: torch.Size + sizes: torch.Size + + +class _MEM_FORMAT_ENCODING(Enum): + """Describe the memory format of a tensor.""" + + TORCH_CONTIGUOUS_FORMAT = 0 + TORCH_CHANNELS_LAST = 1 + TORCH_PRESERVE_FORMAT = 2 + + +@dataclass +class TensorProperties: + """Properties used to create :class:`Tensor`""" + + # Regular tensor fields + dtype: torch.dtype = field(default_factory=torch.get_default_dtype) + # This field is deprecated. + layout: torch.layout = field(default=torch.strided) + # This field is deprecated. + requires_grad: bool = False + # This field is deprecated. + memory_format: torch.memory_format = field(default=torch.contiguous_format) + # This field is deprecated. + pin_memory: bool = False + + def __getstate__(self): + # Since torch.memory_format cannot be pickled! + memory_format = self.memory_format + if memory_format == torch.contiguous_format: + mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT + elif memory_format == torch.channels_last: + mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST + elif memory_format == torch.preserve_format: + mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT + else: + raise RuntimeError(f"Invalid torch.memory_format: {memory_format}") + + return ( + self.dtype, + self.layout, + self.requires_grad, + mem_format_encoding, + self.pin_memory, + ) + + def __setstate__( + self, + state, + ): + ( + self.dtype, + self.layout, + self.requires_grad, + mem_format_encoding, + self.pin_memory, + ) = state + + if mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT: + memory_format = torch.contiguous_format + elif mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST: + memory_format = torch.channels_last + elif mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT: + memory_format = torch.preserve_format + else: + raise RuntimeError( + f"Invalid torch.memory_format encoding: {mem_format_encoding}" + ) + + self.memory_format = memory_format + + @staticmethod + def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties": + return TensorProperties( + dtype=tensor.dtype, + layout=tensor.layout, + requires_grad=tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=tensor.is_pinned(), + ) + + +@dataclass +class TensorStorageMetadata: + properties: TensorProperties + size: torch.Size + chunks: List[ChunkStorageMetadata] + + +@dataclass +class BytesStorageMetadata: + pass + + +STORAGE_TYPES = Union[TensorStorageMetadata, BytesStorageMetadata] +STATE_DICT_TYPE = Dict[str, Union[StatefulT, Any]] + + +@dataclass +class Metadata: + """This class represents the metadata of the checkpoint.""" + + # Keys are the same from the `state_dict` used. + state_dict_metadata: Dict[str, STORAGE_TYPES] + # It is the responsibility of the planner and storage plugins to ensure + # backward compatibility of the planner_data and storage_data. DCP will + # also ensure the backward compatibility of the metadata in this file and + # the metadata of the built-in planner and storage plugins. + planner_data: Any = None + storage_data: Any = None + + +@dataclass(frozen=True) +class MetadataIndex: + """This class represents a lookup key for items in a state dict or Metadata.""" + + fqn: str + """Fully Qualified Name of the object""" + + offset: Optional[torch.Size] = None + """If the object is a tensor, offset into the tensor we're looking for""" + + index: Optional[int] = field(hash=False, compare=False, default=None) + """ + Index hint when searching for tensor chunk to speedup lookups (optional) + + A common representation of a sharded tensor is as a list of chunks so to + find the index in such a list you need to linear search it. + + When constructing an instance of MetadataIndex that points to that list, + one can provide the index as a hint and it will be probed first before + the linear search and thus making it significantly faster. + """ + + def __init__( + self, + fqn: str, + offset: Optional[Sequence[int]] = None, + index: Optional[int] = None, + ): + # We must use object.__setattr__ due to frozen=True + object.__setattr__(self, "fqn", fqn) + object.__setattr__(self, "index", index) + if offset is not None: + object.__setattr__(self, "offset", torch.Size(offset)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..26468d046f29a589d9d3d3c34babf86404fdbce0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py @@ -0,0 +1,348 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import dataclasses +from typing import cast, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.distributed as dist +from torch._utils import _get_device_module +from torch.distributed._shard.sharded_tensor.api import ShardedTensor +from torch.distributed._shard.sharded_tensor.metadata import ( + TensorProperties as ShardTensorProperties, +) +from torch.distributed._shard.sharded_tensor.shard import Shard +from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint._nested_dict import unflatten_state_dict +from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner +from torch.distributed.checkpoint.metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + Metadata, + MetadataIndex, + STATE_DICT_TYPE, + TensorProperties, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import LoadPlan, LoadPlanner +from torch.distributed.checkpoint.planner_helpers import ( + _create_read_items, + create_read_items_for_chunk_list, +) +from torch.distributed.checkpoint.state_dict_loader import load_state_dict +from torch.distributed.checkpoint.storage import StorageReader +from torch.distributed.checkpoint.utils import ( + _element_wise_add, + _element_wise_sub, + _normalize_device_info, +) +from torch.distributed.distributed_c10d import _get_default_group +from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor +from torch.distributed.remote_device import _remote_device + +STATE_DICT_2D_LAYOUT = Dict[str, Tuple[Optional[Sequence[int]], Sequence[int]]] + + +# TODO: Update docstrings for optimizer.py +__all__ = [ + "load_sharded_optimizer_state_dict", +] + + +def _gen_rank_device(global_rank: int, device_type: str = "cuda") -> str: + if device_type == "cpu": + return "cpu" + device_module = _get_device_module(device_type) + if device_module.is_available(): + return _normalize_device_info( + device_type, global_rank % device_module.device_count() + ) + return "cpu" + + +def _create_colwise_spec( + pg: Optional[dist.ProcessGroup] = None, +) -> ChunkShardingSpec: + pg_device_type = dist.distributed_c10d._get_pg_default_device(pg).type + if pg is None: + placements = [ + f"rank:{idx}/{_gen_rank_device(idx, pg_device_type)}" + for idx in range(dist.get_world_size()) + ] + else: + placements = [ + f"rank:{idx}/{_gen_rank_device(dist.get_global_rank(pg, idx), pg_device_type)}" + for idx in range(pg.size()) + ] + return ChunkShardingSpec( + dim=0, + placements=cast(List[Union[_remote_device, str]], placements), + ) + + +def _is_nested_tensor(val: torch.Tensor) -> bool: + if type(val) is ShardedTensor: + if len(val.local_shards()) == 0: + return False + if type(val.local_shards()[0].tensor) is ShardedTensor: + return True + if type(val.local_shards()[0].tensor) is DTensor: + raise ValueError("Cannot handle DTensor nested insided ShardedTensor") + elif type(val) is DTensor and ( + type(val._local_tensor) is DTensor or type(val._local_tensor) is ShardedTensor + ): + raise ValueError("Cannot handle nested DTensor") + return False + + +def _alloc_tensor( + props: TensorProperties, size: Sequence[int], device_type: str = "cuda" +) -> torch.Tensor: + return torch.empty( + size=size, + dtype=props.dtype, + layout=props.layout, + requires_grad=props.requires_grad, + pin_memory=props.pin_memory, + device=cast(torch.device, _get_device_module(device_type).current_device()), + ) + + +def _get_state_dict_2d_layout( + state_dict: STATE_DICT_TYPE, +) -> Tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]: + """ + Load the right TP slice of the optimizer state. + + This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata. + We take advantage of the model state_dict producing a sliced ST to figure out what we need to load. + This is pretty fragile and it might be easier for FSDP to compute this info for us. + Returns a dictionary where keys are the same of the state_dict and the value is a tuple of + (offset, size) for the current rank TP slice. + N.B. The state_dict *MUST* come from FSDP.sharded_state_dict. + """ + specs: STATE_DICT_2D_LAYOUT = {} + dp_pg: Optional[dist.ProcessGroup] = None + for key, value in state_dict.items(): + specs[key] = (None, value.size()) + if _is_nested_tensor(value): + assert ( + len(value.local_shards()) == 1 + ), "Cannot handle ST with multiple shards" + assert isinstance( + value, ShardedTensor + ), "Can only handle nested ShardedTensor" + shard = value.local_shards()[0] + specs[key] = ( + shard.metadata.shard_offsets, + shard.metadata.shard_sizes, + ) + dp_pg = shard.tensor._process_group # type: ignore[attr-defined] + + return ( + specs, + dp_pg, + ) + + +class _ReaderWithOffset(DefaultLoadPlanner): + translation: Dict[MetadataIndex, MetadataIndex] + state_dict: STATE_DICT_TYPE + metadata: Metadata + + def __init__(self, fqn_to_offset: Dict[str, Sequence[int]]) -> None: + super().__init__() + self.fqn_to_offset = fqn_to_offset + self.metadata = Metadata({}) + self.state_dict = {} + self.translation = {} + + def create_local_plan(self) -> LoadPlan: + requests = [] + self.translation = {} + for fqn, obj in self.state_dict.items(): + md = self.metadata.state_dict_metadata[fqn] + if not isinstance(obj, ShardedTensor): + requests += _create_read_items(fqn, md, obj) + continue + + if fqn not in self.fqn_to_offset: + requests += _create_read_items(fqn, md, obj) + continue + + offset = self.fqn_to_offset[fqn] + + assert len(obj.local_shards()) == 1 + original_shard = obj.local_shards()[0] + local_chunks = [ + ChunkStorageMetadata( + offsets=torch.Size( + _element_wise_add(original_shard.metadata.shard_offsets, offset) + ), + sizes=torch.Size(original_shard.metadata.shard_sizes), + ) + ] + + reqs = create_read_items_for_chunk_list( + fqn, cast(TensorStorageMetadata, md), local_chunks + ) + # TODO: The ReadItems will have a displaced MetadataIndex, fix it. + # TODO: we should change _create_sharded_read_items to have more ergonomic API + for ri in reqs: + assert ri.dest_index.offset is not None + original_offset = _element_wise_sub(ri.dest_index.offset, offset) + original_index = dataclasses.replace( + ri.dest_index, offset=torch.Size(original_offset) + ) + self.translation[ri.dest_index] = original_index + + requests += reqs + return LoadPlan(requests) + + def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor: + return super().lookup_tensor(self.translation.get(index, index)) + + +def load_sharded_optimizer_state_dict( + model_state_dict: STATE_DICT_TYPE, + optimizer_key: str, + storage_reader: StorageReader, + planner: Optional[LoadPlanner] = None, +) -> STATE_DICT_TYPE: + """ + Load a state_dict in conjunction with FSDP sharded optimizer state. + + This is the current recommended way to checkpoint FSDP. + >>> # xdoctest: +SKIP + >>> import torch.distributed.checkpoint as dist_cp + >>> # Save + >>> model: torch.nn.Model + >>> optim_params = model.parameters() + >>> optim = torch.optim.SGD(optim_params, lr=0.01) + >>> # Save + >>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT): + >>> state_dict = { + >>> "optimizer": FSDP.optim_state_dict(model, optim), + >>> "model": model.state_dict() + >>> } + >>> dist_cp.save_state_dict( + >>> state_dict=optim_state, + >>> storage_writer=dist_cp.FileSystemWriter("checkpoint"), + >>> planner=dist_cp.DefaultSavePlanner(), + >>> ) + >>> + >>> # Load + >>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT): + >>> model_state_dict = model_tp.state_dict() + >>> checkpoint = { + >>> "model": model_state_dict + >>> } + >>> dist_cp.load_state_dict( + >>> state_dict=checkpoint, + >>> storage_reader=dist_cp.FileSystemReader(checkpoint_file), + >>> planner=dist_cp.DefaultLoadPlanner(), + >>> ) + >>> model.load_state_dict(checkpoint["model_state"]) + >>> + >>> optim_state = dist_cp.load_sharded_optimizer_state_dict( + >>> model_state_dict, + >>> optimizer_key="optimizer", + >>> storage_reader=dist_cp.FileSystemReader("checkpoint"), + >>> ) + >>> + >>> flattened_osd = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state["optimizer"] + >>> ) + >>> + >>> optim.load_state_dict(flattened_osd) + """ + metadata = storage_reader.read_metadata() + + layout_specs, dp_pg = _get_state_dict_2d_layout(model_state_dict) + dp_pg_device_type = dist.distributed_c10d._get_pg_default_device(dp_pg).type + device_module = _get_device_module(dp_pg_device_type) + + if dp_pg is None: + placements = [] + for i in range(dist.get_world_size()): + device_info = _normalize_device_info( + dp_pg_device_type, i % device_module.device_count() + ) + placements.append(f"rank:{i}/{device_info}") + sharding_spec = ChunkShardingSpec(dim=0, placements=placements) # type: ignore[arg-type] + else: + sharding_spec = _create_colwise_spec(dp_pg) + + # Create a state_dict for optimizer state + state_dict: STATE_DICT_TYPE = {} + + fqn_to_offset: Dict[str, Sequence[int]] = {} + for key, value in metadata.state_dict_metadata.items(): + key_path = metadata.planner_data[key] + if key_path[0] != optimizer_key: + continue + + if isinstance(value, BytesStorageMetadata): + state_dict[key] = "" + continue + + # value: TensorStorageMetadata + if value.size.numel() == 1: + state_dict[key] = _alloc_tensor( + value.properties, value.size, dp_pg_device_type + ) + elif dp_pg is None: + state_dict[key] = _create_chunk_sharded_tensor( + _alloc_tensor(value.properties, value.size, dp_pg_device_type), + rank=dist.get_rank(), + world_size=dist.get_world_size(), + num_devices_per_node=device_module.device_count(), + pg=_get_default_group(), + ) + else: + spec_key = key_path[2] + alloc_size = layout_specs.get(spec_key, (None, value.size))[1] + + properties = ShardTensorProperties( + dtype=value.properties.dtype, + layout=value.properties.layout, + requires_grad=value.properties.requires_grad, + memory_format=value.properties.memory_format, + pin_memory=value.properties.pin_memory, + ) + + st_md = sharding_spec.build_metadata(torch.Size(alloc_size), properties) + local_shards = [] + current_rank = dist.get_rank(dp_pg) + for shard_md in st_md.shards_metadata: + if cast(_remote_device, shard_md.placement).rank() != current_rank: + continue + local_shards.append( + Shard( + tensor=_alloc_tensor( + value.properties, shard_md.shard_sizes, dp_pg_device_type + ), + metadata=shard_md, + ) + ) + + st = ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, st_md, process_group=dp_pg + ) + + if spec_key in layout_specs and layout_specs[spec_key][0] is not None: + fqn_to_offset[key] = cast(Sequence[int], layout_specs[spec_key][0]) + + state_dict[key] = st + + # Whether we unflatten before or after doesn't matter + load_state_dict( + state_dict=state_dict, + storage_reader=storage_reader, + # FIXME the type of planner is wrong in load_state_dict + planner=_ReaderWithOffset(fqn_to_offset) if dp_pg is not None else planner, + ) + + state_dict = unflatten_state_dict(state_dict, metadata.planner_data) + + return state_dict diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner.py new file mode 100644 index 0000000000000000000000000000000000000000..e295354a912a657134c12919acbfb5182cef684c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner.py @@ -0,0 +1,403 @@ +import abc +import io +from dataclasses import dataclass +from enum import auto, Enum +from functools import reduce +from typing import Any, List, Optional, Tuple, Union + +import torch + +from .metadata import ( + ChunkStorageMetadata, + Metadata, + MetadataIndex, + STATE_DICT_TYPE, + TensorProperties, +) + + +__all__ = [ + "WriteItemType", + "LoadItemType", + "TensorWriteData", + "WriteItem", + "ReadItem", + "SavePlan", + "LoadPlan", + "SavePlanner", + "LoadPlanner", +] + + +class WriteItemType(Enum): + TENSOR = auto() + SHARD = auto() + BYTE_IO = auto() + + +class LoadItemType(Enum): + TENSOR = auto() + BYTE_IO = auto() + + +@dataclass(frozen=True) +class TensorWriteData: + chunk: ChunkStorageMetadata + properties: TensorProperties + size: torch.Size + + +@dataclass(frozen=True) +class WriteItem: + """Dataclass which holds information about what needs to be written to storage.""" + + index: MetadataIndex + type: WriteItemType + + # Value present if it's a tensor write + tensor_data: Optional[TensorWriteData] = None + + def tensor_storage_size(self) -> Optional[int]: + """ + Calculates the storage size of the underlying tensor, or None if this is not a tensor write. + + Returns: + Optional[int] storage size, in bytes of underlying tensor if any. + """ + if self.tensor_data is None: + return None + + numels = reduce(lambda x, y: x * y, self.tensor_data.size, 1) + dtype_size = torch._utils._element_size(self.tensor_data.properties.dtype) + return numels * dtype_size + + +@dataclass(frozen=True) +class ReadItem: + # Read Item + type: LoadItemType + + # Index into the state_dict + dest_index: MetadataIndex + # Offsets into destination tensor + dest_offsets: torch.Size + + # Index into the checkpoint + storage_index: MetadataIndex + # Offset into the checkpoint data + storage_offsets: torch.Size + + # Size of the hypercube to copy + lengths: torch.Size + + +@dataclass(frozen=True) +class SavePlan: + items: List[WriteItem] + storage_data: Any = None + planner_data: Any = None + + +@dataclass +class LoadPlan: + items: List[ReadItem] + storage_data: Any = None + planner_data: Any = None + + +class SavePlanner(abc.ABC): + """ + Abstract class defining the protocol used by save_state_dict to plan the save process. + + SavePlanners are stateful objects that can be used to customize the whole save process. + + SavePlanner acts as an access proxy to the state_dict, so any transformation done to it + will be visible to the whole process. + + A planner subclass can expect the following sequence of calls during save_state_dict: + + 1) set_up_planner - called on all ranks. + Signals the start of a checkpoint save. + + 2) create_local_plan - called on all ranks. + Process the state_dict and produces a `SavePlan` that will be sent for global planning. + + 3) create_global_plan - called on the coordinator rank only. + Takes the SavePlan from all ranks and make any global decision. + + 4) finish_plan - called on all ranks. + This gives each rank a chance to adjust to global planning decisions. + + 5) resolve_data - called multiple times on each rank + Lookups a value on the `state_dict` for the storage layer to write. + + Users are recommended to extend DefaultSavePlanner instead of this interface directly as + most changes can be expressed by changes in a single method. + + There are 3 usual patterns of extension: + + Rewriting state_dict. This is the simplest way to extend the save process as it + doesn't requite understanding the intrincacies of how SavePlan works: + + >>> # xdoctest: +SKIP("undefined vars") + >>> class RenamePlanner(DefaultSavePlanner): + >>> def set_up_planner(self, state_dict, is_coordinator): + >>> # prefix all keys with `foo_`` + >>> super().set_up_planner({"foo_" + k: v for k, v in state_dict.items()}, is_coordinator) + + Modifying local plan and lookup in tandem. This is useful when fine control of how data is persisted + + >>> # xdoctest: +SKIP("undefined vars") + >>> class FP16Planner(DefaultSavePlanner): + >>> def create_local_plan(self): + >>> plan = super().create_local_plan() + >>> for p in plan: + >>> if p.tensor_data is not None: + >>> p.tensor_data.properties.dtype = torch.float16 + >>> return plan + >>> + >>> def resolve_data(self, write_item): + >>> item = super().resolve_data(write_item) + >>> return item if write_item.type == WriteItemType.BYTE_IO else item.to(torch.float16) + + Using the global planning step to make central decisions that can't be made individually by each rank + + >>> # xdoctest: +SKIP("undefined vars") + >>> from itertools import islice + >>> from dataclasses import replace + >>> class DDPLoadBalancingPlanner(DefaultSavePlanner): + >>> # This uses the default local plan behavior of having all non-sharded writes in rank 0 + >>> # This sample doesn't handle ShardedTensors + >>> def create_global_plan(self, all_plans): + >>> def chunk(it, size): + >>> it = iter(it) + >>> return list(iter(lambda: tuple(islice(it, size)), ())) + >>> all_plans = [ + >>> replace(plan, items=items) for plan, items in + >>> zip(all_plans, chunk(all_plans[0].items, len(all_plans))) + >>> ] + >>> return super().create_global_plan(all_plans) + + Finally, some planners need to save additional metadata in the checkpoint, this is + accomplished by having each rank contribute their data items in the local plan and + the global planner aggregate them: + + >>> # xdoctest: +SKIP("undefined vars") + >>> class SaveExtraDataPlanner(DefaultSavePlanner): + >>> def create_local_plan(self) -> SavePlan: + >>> plan = super().create_local_plan() + >>> return replace(plan, planner_data="per-rank-data") + >>> + >>> def create_global_plan(self, all_plans: List[SavePlan]) -> Tuple[List[SavePlan], Metadata]: + >>> global_plan, metadata = super().create_global_plan(all_plans) + >>> merged_data = [p.planner_data for p in global_plan] + >>> metadata = replace(metadata, planner_data=merged_data) + >>> return global_plan, metadata + """ + + @abc.abstractmethod + def set_up_planner(self, state_dict: STATE_DICT_TYPE, is_coordinator: bool) -> None: + """ + Initialize this planner to save ``state_dict``. + + Implementations should save those values as they won't be provided lated in the save process. + + This is called on all ranks. + """ + pass + + @abc.abstractmethod + def create_local_plan(self) -> SavePlan: + """ + Compute the save plan for the current rank. + + This will be aggregated and passed to create_global_plan. + Planner specific data can be passed through SavePlan::planner_data. + + This is called on all ranks. + """ + pass + + @abc.abstractmethod + def create_global_plan( + self, all_plans: List[SavePlan] + ) -> Tuple[List[SavePlan], Metadata]: + """ + Compute the global checkpoint plan and return the local plan of each rank. + + This is called on the coordinator rank only. + """ + pass + + @abc.abstractmethod + def finish_plan(self, new_plan: SavePlan) -> SavePlan: + """ + Merge the plan created by `create_local_plan` and the result of `create_global_plan`. + + This is called on all ranks. + """ + pass + + @abc.abstractmethod + def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]: + """ + Transform and prepare ``write_item`` from ``state_dict`` for storage, ensuring idempotency and thread-safety. + + Lookup the object associated with ``write_item`` in ``state_dict`` and apply any + transformation (such as serialization) prior to the storage layer consuming it. + + Called on each rank multiple times, at least once per WriteItem in the final SavePlan. + + This method should be idempotent and thread-save. StorageWriter implementations + are free to call it as frequently as they need. + + Any transformation that allocates memory should be lazily done when his method + is called in order to reduce peak memory required by checkpointing. + + When returning tensors, they can be on any device or format, they can be views too. + It's the storage layer responsibility to figure out how to save them. + """ + pass + + +class LoadPlanner: + """ + Abstract class defining the protocol used by load_state_dict to plan the load process. + + LoadPlanner are stateful objects that can be used to customize the whole load process. + + LoadPlanner acts as an access proxy to the state_dict, so any transformation done to it + will be visible to the whole process. + + A planner subclass can expect the following sequence of calls during load_state_dict: + + 1) set_up_planner - called on all ranks. + Signals the start of loading a checkpoint. + + 2) create_local_plan - called on all ranks. + Process the state_dict and produces a `LoadPlan` that will be sent for global planning. + + 3) create_global_plan - called on the coordinator rank only. + Takes the LoadPlan from all ranks and make any global decision. + + 4) load_bytes - called multiple times on each rank + This is called once per non-tensor value in state_dict. + + 5) resolve_tensor and commit_tensor - called multiple times on each rank + They are called in pair for each Tensor value in state_dict. + + Users are recommended to extend DefaultLoadPlanner instead of this interface directly as + most changes can be expressed by changes in a single method. + + There are two usual patterns of extension: + + Rewriting state_dict. This is the simplest way to extend the load process as it + doesn't requite understanding the intrincacies of how LoadPlan works. We need + to keep a reference to the original state_dict as load happens in place so + we need to be able to perform it in place + + >>> # xdoctest: +SKIP("undefined vars") + >>> class RenamePlanner(DefaultLoadPlanner): + >>> def set_up_planner(self, state_dict, metadata, is_coordinator): + >>> self.original_state_dict = state_dict + >>> state_dict = {"foo_" + k: v for k, v in state_dict.items()} + >>> + >>> if self.flatten_sharded_tensors: + >>> state_dict = _flatten_sharded_tensors(state_dict) + >>> + >>> if self.flatten_state_dict: + >>> state_dict, self.mappings = flatten_state_dict(state_dict) + >>> + >>> self.state_dict = state_dict + >>> self.metadata = metadata + >>> self.is_coordinator = is_coordinator + >>> + >>> def load_bytes(self, read_item, value): + >>> # Remove the "foo_" prefix + >>> self.original_state_dict[read_item.dest_index.fqn[4:]] = torch.load(value) + + + Modifying resolve_tensor and commit_tensor to handle load time transformation. + + >>> # xdoctest: +SKIP("undefined vars") + >>> class MetaModelMaterialize(DefaultSavePlanner): + >>> def resolve_tensor(self, read_item): + >>> tensor = super().resolve_tensor(read_item) + >>> return torch.empty_like(tensor, device="cpu") + >>> + >>> def commit_tensor(self, read_item, tensor): + >>> self.state_dict[read_item.dest_index.fqn] = tensor + """ + + @abc.abstractmethod + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Metadata, + is_coordinator: bool, + ) -> None: + """ + Initialize this instance to load data into ``state_dict``. + + . N.B. This is called on every rank. + """ + pass + + @abc.abstractmethod + def create_local_plan(self) -> LoadPlan: + """ + Create a LoadPlan based on state_dict and metadata provided by set_up_planner. + + . N.B. This is called on every rank. + """ + pass + + @abc.abstractmethod + def create_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + """ + Compute the global load plan and return plans for each rank. + + . N.B. This is called on the coordinator rank only + """ + pass + + @abc.abstractmethod + def finish_plan(self, central_plan: LoadPlan) -> LoadPlan: + """Accept the plan from coordinator and return final LoadPlan.""" + pass + + @abc.abstractmethod + def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None: + """ + Load the item described by ``read_item``and ``value``. + + This method is expected to modify in-place the underlying state_dict. + + The contents of ``value`` are defined by the SavePlanner used to produce + the checkpoint being loaded. + """ + pass + + @abc.abstractmethod + def resolve_tensor(self, read_item: ReadItem) -> torch.Tensor: + """ + Return the tensor described by ``read_item`` to be used by the StorageReader to load `read_item`. + + The tensor should alias with one on the underlying state_dict as StorageReader will replace its contents. + If, for any reason, that's not possible, the planner can use the ``commit_tensor`` method to copy the data + back to the one in state_dict. + """ + pass + + @abc.abstractmethod + def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None: + """ + Call once the StorageReader finished loading data into ``tensor``. + + The provided tensor is the same one returned by the call to ``resolve_tensor``. + This method is only needed if this LoadPlanner needs to post process ``tensor`` prior to + copying it back to the one in the state_dict. + + The contents of tensor will follow its device synchronization model. + """ + pass diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..5829ab6111e225201a0f14bece063246bb5bb7cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py @@ -0,0 +1,325 @@ +from typing import Any, cast, List + +import torch +import torch.distributed as dist +from torch._utils import _get_device_module + +from torch.distributed._shard.metadata import ShardMetadata +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._tensor import DTensor +from torch.distributed._tensor._utils import compute_local_shape_and_global_offset + +from torch.utils._pytree import tree_map_only + +from .metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + MetadataIndex, + STATE_DICT_TYPE, + STORAGE_TYPES, + TensorProperties, + TensorStorageMetadata, +) +from .planner import ( + LoadItemType, + ReadItem, + SavePlan, + TensorWriteData, + WriteItem, + WriteItemType, +) +from .resharding import ( + _check_shard_metadata_pair_overlap, + _shards_get_overlap_region_wrt_saved_tensor, +) + +__all__: List[str] = ["create_read_items_for_chunk_list"] + + +def _create_chunk_from_tensor(tensor: torch.Tensor) -> ChunkStorageMetadata: + return ChunkStorageMetadata( + offsets=torch.Size([0] * len(tensor.size())), sizes=tensor.size() + ) + + +def _chunk_for_shard(shard_md: ShardMetadata) -> ChunkStorageMetadata: + return ChunkStorageMetadata( + offsets=torch.Size(shard_md.shard_offsets), + sizes=torch.Size(shard_md.shard_sizes), + ) + + +def _sharded_tensor_metadata( + sharded_tensor: ShardedTensor, shard_md: ShardMetadata +) -> TensorWriteData: + shard_properties = sharded_tensor.metadata().tensor_properties + + properties = TensorProperties( + dtype=shard_properties.dtype, + layout=shard_properties.layout, + requires_grad=shard_properties.requires_grad, + memory_format=shard_properties.memory_format, + pin_memory=shard_properties.pin_memory, + ) + + return TensorWriteData( + chunk=_chunk_for_shard(shard_md), + properties=properties, + size=sharded_tensor.metadata().size, + ) + + +def _create_write_items_for_dtensor(fqn: str, tensor: DTensor) -> WriteItem: + sizes, offsets = compute_local_shape_and_global_offset( + tensor.shape, tensor.device_mesh, tensor.placements + ) + sizes, offsets = torch.Size(sizes), torch.Size(offsets) + + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.SHARD, + tensor_data=TensorWriteData( + chunk=ChunkStorageMetadata( + offsets=offsets, + sizes=sizes, + ), + properties=TensorProperties.create_from_tensor(tensor.to_local()), + size=tensor.size(), + ), + ) + + +def _create_write_item_for_shard( + fqn: str, sharded_tensor: ShardedTensor, shard_md: ShardMetadata +) -> WriteItem: + offsets = torch.Size(shard_md.shard_offsets) + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.SHARD, + tensor_data=_sharded_tensor_metadata(sharded_tensor, shard_md), + ) + + +def _create_write_item_for_tensor(fqn: str, tensor: torch.Tensor) -> WriteItem: + offsets = torch.Size([0] * len(tensor.size())) + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.TENSOR, + tensor_data=TensorWriteData( + chunk=ChunkStorageMetadata(offsets=offsets, sizes=tensor.size()), + properties=TensorProperties.create_from_tensor(tensor), + size=tensor.size(), + ), + ) + + +def _create_write_item_for_bytesio(fqn: str, bytes: Any): + return WriteItem( + index=MetadataIndex(fqn), + type=WriteItemType.BYTE_IO, + ) + + +def _create_read_item_for_byteio( + dest_index, dest_offset, storage_index, storage_offset, length +): + return ReadItem( + type=LoadItemType.BYTE_IO, + dest_index=dest_index, + dest_offsets=torch.Size((dest_offset,)), + storage_index=storage_index, + storage_offsets=torch.Size((storage_offset,)), + lengths=torch.Size((length,)), + ) + + +def _create_read_item_for_tensor( + dest_index, dest_offsets, storage_index, storage_offsets, lengths +): + return ReadItem( + type=LoadItemType.TENSOR, + dest_index=dest_index, + dest_offsets=torch.Size(dest_offsets), + storage_index=storage_index, + storage_offsets=torch.Size(storage_offsets), + lengths=torch.Size(lengths), + ) + + +def create_read_items_for_chunk_list( + fqn: str, + checkpoint_md: TensorStorageMetadata, + local_chunks: List[ChunkStorageMetadata], +) -> List[ReadItem]: + """ + Create a list of ``ReadItem`` based on the checkpoint and local chunks. + + This applies the resharding algorithm and computes the reads needed + to satisfy ``local_chunks`` with a checkpoint described by ``checkpoint_md``. + + Args: + fqn (str) : The state_dict FQN to pass to ``ReadItem``. + checkpoint_md (TensorStorageMetadata): metadata for a given tensor + from a checkpoint. + local_chunks (List[ChunkStorageMetadata]): Local chunks that needs to be + loaded. + + Returns: + A list of ``ReadItem`` that will satisfy all input chunks. + """ + read_items = [] + # this is a naive quadratic algo that can be optimized later + for idx, shard in enumerate(local_chunks): + for storage_idx, storage_md in enumerate(checkpoint_md.chunks): + if not _check_shard_metadata_pair_overlap(shard, storage_md): + continue + + storage_offsets = [] + dest_offsets = [] + lengths = [] + for ( + dim, + offset_for_saved_tensor, + offset_for_current_tensor, + length, + ) in _shards_get_overlap_region_wrt_saved_tensor( + saved_shard=storage_md, current_shard=shard + ): + storage_offsets.append(offset_for_saved_tensor) + dest_offsets.append(offset_for_current_tensor) + lengths.append(length) + + read_items.append( + _create_read_item_for_tensor( + dest_index=MetadataIndex(fqn, shard.offsets, idx), + dest_offsets=dest_offsets, + storage_index=MetadataIndex(fqn, storage_md.offsets, storage_idx), + storage_offsets=storage_offsets, + lengths=lengths, + ) + ) + return read_items + + +def _create_default_metadata_only_plan(state_dict: STATE_DICT_TYPE) -> SavePlan: + requests = [] + for fqn, obj in state_dict.items(): + if isinstance(obj, DTensor): + requests.append(_create_write_items_for_dtensor(fqn, obj)) + elif isinstance(obj, ShardedTensor): + for shard_md in obj.metadata().shards_metadata: + requests.append(_create_write_item_for_shard(fqn, obj, shard_md)) + elif isinstance(obj, torch.Tensor): + requests.append(_create_write_item_for_tensor(fqn, obj)) + else: + requests.append(_create_write_item_for_bytesio(fqn, obj)) + return SavePlan(requests) + + +def _create_write_items(fqn: str, object: Any) -> List[WriteItem]: + if isinstance(object, DTensor): + return [_create_write_items_for_dtensor(fqn, object)] + elif isinstance(object, ShardedTensor): + return [ + _create_write_item_for_shard(fqn, object, shard.metadata) + for shard in object.local_shards() + ] + elif isinstance(object, torch.Tensor): + return [_create_write_item_for_tensor(fqn, object)] + else: + return [_create_write_item_for_bytesio(fqn, object)] + + +def _create_chunk_from_dtensor(tensor: DTensor) -> ChunkStorageMetadata: + sizes, offsets = compute_local_shape_and_global_offset( + tensor.shape, tensor.device_mesh, tensor.placements + ) + sizes, offsets = torch.Size(sizes), torch.Size(offsets) + return ChunkStorageMetadata( + offsets=offsets, + sizes=sizes, + ) + + +def _create_chunk_list(tensor: torch.Tensor) -> List[ChunkStorageMetadata]: + if isinstance(tensor, DTensor): + local_chunks = [_create_chunk_from_dtensor(tensor)] + elif isinstance(tensor, ShardedTensor): + local_chunks = [ + _chunk_for_shard(shard.metadata) for shard in tensor.local_shards() + ] + elif isinstance(tensor, torch.Tensor): + local_chunks = [_create_chunk_from_tensor(tensor)] + else: + raise ValueError( + "Unsupported Type, expecting one of [Tensor, DTensor, ShardedTensor] " + f",but got {type(tensor)}" + ) + + return local_chunks + + +def _create_read_items(fqn: str, md: STORAGE_TYPES, obj: Any) -> List[ReadItem]: + if not isinstance(md, BytesStorageMetadata): + try: + local_chunks = _create_chunk_list(obj) + except ValueError as ex: + raise ValueError( + f"Invalid checkpoint metadata for {fqn}, " + + f"expected BytesStorageMetadata but found {type(md)}", + ) from ex + + return create_read_items_for_chunk_list(fqn, md, local_chunks) + else: + return [ + _create_read_item_for_byteio( + dest_index=MetadataIndex(fqn), + dest_offset=0, + storage_index=MetadataIndex(fqn), + storage_offset=0, + length=0, + ) + ] + + +def _init_state_dict(state_dict: STATE_DICT_TYPE) -> None: + state_dict_assigned_storage = tree_map_only( + torch.Tensor, lambda v: _init_meta_tensor(v), state_dict + ) + # The inplace version of tree_map_only, tree_map_only_ doesn't seem to work. + # So we need to temporariy update the each element in the state dict with meta tensor. + for k in state_dict.keys(): + state_dict[k] = state_dict_assigned_storage[k] + + +def _init_meta_tensor(value: Any) -> Any: + """ + Initializes tensor, moves it to device for torch.Tensor/DTensor on meta device. + """ + + device = getattr(value, "device", None) + # DCP does the initialization if it's meta tensor/DTensor. + if device == torch.device("meta"): + device_type = dist.distributed_c10d._get_pg_default_device().type + device = cast(torch.device, _get_device_module(device_type).current_device()) + if isinstance(value, DTensor): + new_local_tensor = torch.empty_like(value.to_local(), device=device) + # We need to pass shape and stride explicitly, since DTensor might be + # sharded unevenly. + dtensor = DTensor.from_local( + new_local_tensor, + device_mesh=value.device_mesh, + placements=value.placements, + shape=value.size(), + stride=value.stride(), + ) + return dtensor + elif isinstance(value, torch.Tensor): + tensor = torch.empty_like(value, device=device) + return tensor + else: + raise RuntimeError( + f"Found unsupported type {type(value)} for meta device loading." + ) + else: + return value diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py new file mode 100644 index 0000000000000000000000000000000000000000..1ebb0ba57d739e3d43ad0605eab98262f899088b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py @@ -0,0 +1,70 @@ +from typing import List, Tuple + +from torch.distributed.checkpoint.metadata import ChunkStorageMetadata + +__all__: List[str] = [] + + +def _check_shard_metadata_pair_overlap( + shard1: ChunkStorageMetadata, shard2: ChunkStorageMetadata +): + """Check if two shards overlap.""" + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(shard1.offsets) + for i in range(ndims): + if shard1.offsets[i] >= shard2.offsets[i] + shard2.sizes[i]: + return False + if shard2.offsets[i] >= shard1.offsets[i] + shard1.sizes[i]: + return False + + return True + + +def _shards_get_overlap_region_wrt_saved_tensor( + saved_shard: ChunkStorageMetadata, current_shard: ChunkStorageMetadata +) -> List[Tuple[int, int, int, int]]: + """ + Return the overlapping region between saved_shard and current_shard. + + There returned list has the same number of elements as the tensor's dimension. + For each element, we produce a tuple with the following contents: + (dimension, `saved_shard` offset, `current_shard` offset, length) + + Offsets are relative to each shard. + """ + narrows = [] + for dim, ( + saved_shard_offset, + current_shard_offset, + saved_shard_size, + current_shard_size, + ) in enumerate( + zip( + saved_shard.offsets, + current_shard.offsets, + saved_shard.sizes, + current_shard.sizes, + ) + ): + min_range_end = min( + saved_shard_offset + saved_shard_size, + current_shard_offset + current_shard_size, + ) + + length = min_range_end - max(current_shard_offset, saved_shard_offset) + + if saved_shard_offset > current_shard_offset: + offset_for_saved_tensor = 0 + offset_for_current_tensor = saved_shard_offset - current_shard_offset + else: + offset_for_saved_tensor = current_shard_offset - saved_shard_offset + offset_for_current_tensor = 0 + + narrows.append( + (dim, offset_for_saved_tensor, offset_for_current_tensor, length) + ) + + return narrows diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..2993e4a96ad830bdac7b4843370420078a940da7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict.py @@ -0,0 +1,1079 @@ +import contextlib +import functools +import gc +from dataclasses import asdict, dataclass, field +from itertools import chain +from typing import ( + Any, + Callable, + cast, + Dict, + Iterable, + List, + no_type_check, + Optional, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import ( + _gather_state_dict, + _offload_state_dict_to_cpu, +) +from torch.distributed._tensor import DTensor +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + _CHECKPOINT_PREFIX, +) +from torch.distributed.fsdp import ( + FullOptimStateDictConfig, + FullStateDictConfig, + FullyShardedDataParallel as FSDP, + OptimStateDictConfig, + ShardedOptimStateDictConfig, + ShardedStateDictConfig, + StateDictConfig, + StateDictType, +) +from torch.distributed.fsdp._common_utils import ( + _get_module_fsdp_state_if_fully_sharded_module, + FSDP_WRAPPED_MODULE, +) +from torch.nn.modules.module import _IncompatibleKeys +from torch.nn.parallel import DistributedDataParallel as DDP + + +FLAT_PARAM = "_flat_param" +PG = "param_groups" +PG_PREFIX = f"{PG}." +STATE = "state" +STATE_PREFIX = f"{STATE}." +PARAMS = "params" +FQNS_T = Set[str] + +_patched_state_dict: Set[Callable] = set() + + +PrimitiveType = Union[DTensor, ShardedTensor, torch.Tensor, int, float, str] +ValueType = Union[ + PrimitiveType, List[PrimitiveType], Tuple[PrimitiveType], Dict[str, "ValueType"] +] +DictValueType = Dict[str, ValueType] +ListDictValueType = List[DictValueType] +OptimizerStateType = Dict[str, Union[DictValueType, ListDictValueType]] + + +@contextlib.contextmanager +def gc_context(): + is_enabled = gc.isenabled() + gc.disable() + try: + yield + finally: + # TODO: add logging for the gc details/time + gc.collect() + if is_enabled: + gc.enable() + + +@dataclass +class StateDictOptions: + """ + This dataclass specifies how get_state_dict/set_state_dict will work. + + - ``full_state_dict``: if this is set to True, all the tensors in the + returned state_dict will be gathered. No ShardedTensor and DTensor + will be in the returned state_dict. + + - ``cpu_offload``: offload all the tensors to cpu. To prevent CPU OOM, if + ``full_state_dict`` is also true, then only the rank0 will get the + state_dict and all other ranks will get empty state_dict. + + - ``ignore_frozen_params``: if the value is True, the returned state_dict + won't contain any frozen parameters -- the ``requires_grad`` is False. + The default value is False. + + - ``keep_submodule_prefixes``: when ``submodules`` is not None, this option + indicates whether to keep the submodule prefixes from the state_dict keys. + or example, if the submodule is ``module.pretrain`` and the full FQN of + the parameter is ``pretrain.layer1.weight`` of the param. When this option + is True, the parameter's key in the returned state_dict will be + ``pretrain.layer1.weight``. If the options is False, the key will be + ``layer1.weight``. + Note that if ``keep_submodule_prefixes`` is False, there may be conflicted + FQNs, hence there should be only one submodule in ``submodules``. + + - ``strict``: the ``strict`` option when ``set_state_dict`` calls + model.load_state_dict(). + The default value is False. + """ + + full_state_dict: bool = False + cpu_offload: bool = False + ignore_frozen_params: bool = False + keep_submodule_prefixes: bool = True + strict: bool = True + + +@dataclass +class _StateDictInfo(StateDictOptions): + fqn_param_mapping: Dict[ + Union[str, torch.Tensor], Union[FQNS_T, torch.Tensor] + ] = field(default_factory=dict) + all_fqns: Set[str] = field(default_factory=set) + submodule_prefixes: Set[str] = field(default_factory=set) + handle_model: bool = True + handle_optim: bool = True + fsdp_context: Callable = contextlib.nullcontext + fsdp_modules: List[nn.Module] = field(default_factory=list) + + +def _get_fqns( + model: nn.Module, + name: str, + skip_ddp_prefix: bool = True, + skip_compiler_prefix: bool = True, +) -> FQNS_T: + """ + This API is used to convert the name of a parameter to the FQNs. For FSDP + without `use_orig_params`, the name of FlatParameter can be mapped to + multiple original parameters. As a result, the return type of this function + is `Set[str]`. + + Args: + module (nn.Module): the root model. + name (str): the name + skip_ddp_prefix (bool): whether to skip DDP's `module` prefix + + Returns: + The canonical FQNs based on the model traversal. + """ + if "." not in name: + return {name.replace(_CHECKPOINT_PREFIX, "")} + + obj_names = name.split(".") + fqn_obj_names = [] + curr_obj = model + for i, curr_obj_name in enumerate(obj_names): + if isinstance(curr_obj, DDP): + assert curr_obj_name == "module" + curr_obj = curr_obj.module + if not skip_ddp_prefix: + fqn_obj_names.append(curr_obj_name) + elif isinstance(curr_obj, FSDP): + if i < len(obj_names) - 1 and obj_names[i + 1] == FLAT_PARAM: + prefix = ".".join(fqn_obj_names) + flat_param = getattr(curr_obj, FLAT_PARAM) + if prefix: + prefix = f"{prefix}." + # FSDP already handles removal of checkpoint prefix, so we can return + # directly + return {f"{prefix}{fqn}" for fqn in flat_param._fqns} + curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE) + if curr_obj_name != FSDP_WRAPPED_MODULE: + fqn_obj_names.append(curr_obj_name) + curr_obj = getattr(curr_obj, curr_obj_name) + elif isinstance(curr_obj, torch._dynamo.eval_frame.OptimizedModule): + assert curr_obj_name == "_orig_mod" + curr_obj = curr_obj._orig_mod + if not skip_compiler_prefix: + fqn_obj_names.append(curr_obj_name) + else: + fqn_obj_names.append(curr_obj_name) + curr_obj = getattr(curr_obj, curr_obj_name) + + return {".".join(fqn_obj_names).replace(_CHECKPOINT_PREFIX, "")} + + +def _verify_options( + model: nn.Module, + optims: Tuple[torch.optim.Optimizer, ...], + optim_only: bool, + *, + submodules: Optional[Set[nn.Module]] = None, + options: Optional[StateDictOptions] = None, +) -> _StateDictInfo: + """ + Verify the model and options passed by the user and generates _StateDictInfo. + """ + if optim_only and not optims: + raise RuntimeError( + "Optimizers are not passed in but optim_only is set to True." + ) + + options = options or StateDictOptions() + + fqn_param_mapping: Dict[ + Union[str, torch.Tensor], Union[Set[str], torch.Tensor] + ] = {} + all_fqns = set() + for name, param in chain(model.named_parameters(), model.named_buffers()): + fqns = _get_fqns(model, name) + fqn_param_mapping[param] = fqns + for fqn in fqns: + fqn_param_mapping[fqn] = param + all_fqns.add(fqn) + + submodule_prefixes = set() + if submodules: + submodules = set(submodules) + for name, module in model.named_modules(): + if module not in submodules: + continue + fqns = _get_fqns(model, name) + assert len(fqns) == 1, "Submodule FQN should only have 1 instance" + for fqn in fqns: + submodule_prefixes.add(f"{fqn}.") + + fsdp_modules = FSDP.fsdp_modules(model) + state_dict_config: StateDictConfig + optim_state_dict_config: OptimStateDictConfig + fsdp_context: Callable + if fsdp_modules: + # FSDP API only work if at least one FSDP instance exists. + if options.full_state_dict: + state_dict_config = FullStateDictConfig( + offload_to_cpu=options.cpu_offload, rank0_only=options.cpu_offload + ) + optim_state_dict_config = FullOptimStateDictConfig( + offload_to_cpu=options.cpu_offload, rank0_only=options.cpu_offload + ) + state_dict_type = StateDictType.FULL_STATE_DICT + else: + state_dict_config = ShardedStateDictConfig( + offload_to_cpu=options.cpu_offload, + ) + optim_state_dict_config = ShardedOptimStateDictConfig( + offload_to_cpu=options.cpu_offload, + ) + state_dict_type = StateDictType.SHARDED_STATE_DICT + + fsdp_context = functools.partial( + FSDP.state_dict_type, + module=model, + state_dict_type=state_dict_type, + state_dict_config=state_dict_config, + optim_state_dict_config=optim_state_dict_config, + ) + else: + fsdp_context = contextlib.nullcontext + + return _StateDictInfo( + **asdict(options), + fqn_param_mapping=fqn_param_mapping, + all_fqns=all_fqns, + submodule_prefixes=submodule_prefixes, + fsdp_context=fsdp_context, + fsdp_modules=cast(List[nn.Module], fsdp_modules), + handle_model=not optim_only, + handle_optim=(len(optims) > 0), + ) + + +def _verify_state_dict( + model_state_dict: Dict[str, ValueType], + optim_state_dict: OptimizerStateType, + info: _StateDictInfo, +) -> None: + # FSDP root must exist otherwise FSDP state_dict will be incorrect. + has_fsdp_root = False + for module in info.fsdp_modules: + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + assert fsdp_state is not None, "Expected a fsdp_state with a fsdp module." + if fsdp_state._is_root: + has_fsdp_root = True + break + if info.fsdp_modules and not has_fsdp_root: + raise RuntimeError("The model has FSDP modules but no FSDP root module exists.") + + # Verify if the model_state_dict and optim_state_dict are valid. This API + # should give the users an explicit error message to debug or report. + if ( + info.handle_model + and not model_state_dict + and not info.submodule_prefixes + and not info.ignore_frozen_params + and not (info.cpu_offload and info.full_state_dict) + and info.strict + ): + raise RuntimeError( + "The option indicates that model state_dict is required to save " + "or load, but model state_dict is empty." + f"rank = {dist.get_rank()=}." + ) + + if info.handle_optim: + if not (optim_state_dict and optim_state_dict[STATE]) and not ( + info.cpu_offload and info.full_state_dict + ): + raise RuntimeError( + "The option indicates that model state_dict is required to save, " + f"or load but optim state_dict is empty. {optim_state_dict}" + ) + + for key in model_state_dict.keys(): + if FLAT_PARAM in key: + raise RuntimeError( + f"{key} contains {FLAT_PARAM}. This can happen if the model " + "is not the root module." + ) + + +def _state_dict_fn(obj: Union[nn.Module, torch.optim.Optimizer], api: str) -> Callable: + call = getattr(obj, api) + if call in _patched_state_dict: + call = functools.partial(getattr(obj.__class__, api), self=obj) + return call + + +def _get_model_state_dict( + model: nn.Module, info: _StateDictInfo +) -> Dict[str, ValueType]: + if not info.handle_model: + return {} + + with info.fsdp_context(): + state_dict = _state_dict_fn(model, "state_dict")() + + for key in list(state_dict.keys()): + fqns = _get_fqns(model, key) + assert len(fqns) == 1 + fqn = next(iter(fqns)) + if fqn != key: + # As we only support FSDP, DDP, and TP, the only cases are + # wrapper-based DDP and compiler. Verify if the assumption + # is correct. + def verify(key, fqn) -> bool: + if len(fqn) >= len(key): + return False + fqn_split = fqn.split(".") + key_split = key.split(".") + fqn_idx = 0 + for key_idx, key_name in enumerate(key_split): + if key_name == fqn_split[fqn_idx]: + fqn_idx += 1 + if fqn_idx == len(fqn_split): + return key_idx == len(key_split) - 1 + elif key_name in ("module", "_orig_mod"): + continue + else: + return False + return True + + if not verify(key, fqn): + raise RuntimeError(f"An unexpected key, {key}, exists. FQN is {fqn}") + state_dict[fqn] = state_dict.pop(key) + + if info.submodule_prefixes: + new_state_dict: Dict[str, ValueType] = {} + # TODO: make this faster. + for fqn in state_dict.keys(): + for prefix in info.submodule_prefixes: + if not fqn.startswith(prefix): + continue + if info.keep_submodule_prefixes: + new_state_dict[fqn] = state_dict[fqn] + else: + new_fqn = fqn[len(prefix) :] + new_state_dict[new_fqn] = state_dict[fqn] + state_dict = new_state_dict + + if info.ignore_frozen_params: + for key, param in model.named_parameters(): + if param.requires_grad: + continue + fqns = _get_fqns(model, key) + for fqn in fqns: + state_dict.pop(fqn) + + for key, p in list(state_dict.items()): + if p.is_meta: + state_dict.pop(key) + + if info.full_state_dict: + ranks_only = tuple() if not info.cpu_offload else (0,) + return _gather_state_dict( + state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only + ) + elif info.cpu_offload: + return _offload_state_dict_to_cpu(state_dict) + else: + return state_dict + + +def _load_model_state_dict( + model: nn.Module, + state_dict: Dict[str, ValueType], + info: _StateDictInfo, +) -> _IncompatibleKeys: + if not info.handle_model or not state_dict: + return _IncompatibleKeys({}, {}) + + for key, _ in chain(model.named_parameters(), model.named_buffers()): + fqns = _get_fqns(model, key) + fqns_with_prefix = _get_fqns( + model, key, skip_ddp_prefix=False, skip_compiler_prefix=False + ) + for fqn, fqn_with_prefix in zip(fqns, fqns_with_prefix): + if fqn != fqn_with_prefix: + state_dict[fqn_with_prefix] = state_dict.pop(fqn) + + with info.fsdp_context(): + return cast( + _IncompatibleKeys, + _state_dict_fn(model, "load_state_dict")( + state_dict=state_dict, strict=info.strict + ), + ) + + +def _init_optim_state(optim: torch.optim.Optimizer) -> None: + """ + Initialize optim states by calling the step() with zero grads. + """ + if optim.state: + # The optimizer state is initialized. + return + + for param_group in optim.param_groups: + for param in param_group[PARAMS]: + if param.grad is not None: + raise RuntimeError( + "state_dict can only be used if the optimizer " + "states are initialized (usually after one step() with " + "gradients) or gradients are None. For the later case, " + "state_dict will fake the gradients as zero " + "to initialize the optimizer states. However, the " + "gradients are not None." + ) + if param.requires_grad: + param.grad = torch.zeros_like(param) + optim.step(closure=None) + optim.zero_grad(set_to_none=True) + + +def _get_optim_state_dict( + model: nn.Module, + optimizers: Tuple[torch.optim.Optimizer, ...], + info: _StateDictInfo, +) -> OptimizerStateType: + if not info.handle_optim: + return {} + + optim_state_dict: OptimizerStateType = {STATE: {}, PG: []} + for optim in optimizers: + _init_optim_state(optim) + osd = _state_dict_fn(optim, "state_dict")() + if info.fsdp_modules: + with info.fsdp_context(): + osd = FSDP.optim_state_dict(model, optim, osd) + + # We need to specially handle FlatParameter FSDP as + # FlatParameter FSDP converts the FQNs. + # There are no easy ways to do this conversion systematically. + # We can only use a string replacment without correctness check. + if not osd: + continue + for k in list(osd[STATE].keys()): + if "_orig_mod" in k: + osd[STATE][k.replace("_orig_mod.", "")] = osd[STATE].pop(k) + for g in osd[PG]: + params = [k.replace("_orig_mod.", "") for k in g[PARAMS]] + g[PARAMS] = params + else: + params = list(chain.from_iterable(g[PARAMS] for g in optim.param_groups)) + param_pid_mapping = dict(zip(params, range(len(params)))) + fqn_pid_mapping = {} + for key, param in model.named_parameters(): + fqns = _get_fqns(model, key) + assert len(fqns) == 1 + fqn = next(iter(fqns)) + if param not in param_pid_mapping: + continue + pid = param_pid_mapping[param] + fqn_pid_mapping[fqn] = pid + fqn_pid_mapping[pid] = fqn + + for key in list(osd[STATE].keys()): + fqn = fqn_pid_mapping[key] + osd[STATE][fqn] = osd[STATE].pop(key) + + for group in osd[PG]: + group[PARAMS] = [fqn_pid_mapping[pid] for pid in group[PARAMS]] + + if not osd: + continue + + cast(DictValueType, optim_state_dict[STATE]).update(osd[STATE]) + cast(ListDictValueType, optim_state_dict[PG]).extend(osd[PG]) + + if info.full_state_dict: + ranks_only = tuple() if not info.cpu_offload else (0,) + return _gather_state_dict( + optim_state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only + ) + elif info.cpu_offload: + return _offload_state_dict_to_cpu(optim_state_dict) + else: + return optim_state_dict + + +def _split_optim_state_dict( + model: nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: OptimizerStateType, + info: _StateDictInfo, +) -> OptimizerStateType: + """ + Extract the corresponding optim state_dict from ``optim_state_dict`` for + ``optim`` and return the result optim state_dict. + + Args: + model (nn.Module): the root model. + optim (torch.optim.Optimizer): the optimizer. + optim_state_dict (Dict[str, ValueType]): the superset optim state_dict that + contains the optim state_dict of ``optim``. + info (_StateDictInfo): state dict information. + + Returns: + The optim state_dict of ``optim``. + """ + + state: DictValueType = {} + pg_state: ListDictValueType = [] + return_osd: OptimizerStateType = {STATE: state, PG: pg_state} + pg_mapping: Dict[int, int] = {} + + for param_group in optim.param_groups: + pg_state.append({PARAMS: []}) + for param in param_group[PARAMS]: + for fqn in info.fqn_param_mapping[param]: + params = pg_state[-1][PARAMS] + assert isinstance(params, list) + params.append(fqn) + if param.requires_grad: + state[fqn] = cast(DictValueType, optim_state_dict[STATE])[fqn] + for loaded_param_group in cast(ListDictValueType, optim_state_dict[PG]): + params = loaded_param_group[PARAMS] + assert isinstance(params, list) + if fqn in params: + pg_mapping[id(loaded_param_group)] = len(return_osd[PG]) - 1 + + for param_group in cast(ListDictValueType, optim_state_dict[PG]): + idx = pg_mapping.get(id(param_group), -1) + if idx == -1: + continue + for key, value in param_group.items(): + if key == PARAMS: + continue + # TODO: check if value is the same if exists. + pg_state[idx][key] = value + + return return_osd + + +def _load_optim_state_dict( + model: nn.Module, + optimizers: Tuple[torch.optim.Optimizer, ...], + state_dict: OptimizerStateType, + info: _StateDictInfo, +) -> None: + if not info.handle_optim: + return + + for optim in optimizers: + optim_state_dict = _split_optim_state_dict(model, optim, state_dict, info) + if info.fsdp_modules: + # We need to specially handle FlatParameter FSDP as + # FlatParameter FSDP converts the FQNs. + for original_fqn, _ in model.named_parameters(): + fqns = _get_fqns(model, original_fqn) + fqns_with_compiler = _get_fqns( + model, original_fqn, skip_compiler_prefix=False + ) + if fqns == fqns_with_compiler: + continue + + assert len(fqns) == 1 + fqn = fqns.pop() + fqn_with_compiler = fqns_with_compiler.pop() + for g in optim_state_dict[PG]: + val = cast(Dict[str, Any], g) + params = [ + key.replace(fqn, fqn_with_compiler) for key in val[PARAMS] + ] + val[PARAMS] = params + osd_state = cast(DictValueType, optim_state_dict[STATE]) + for k in list(osd_state.keys()): + if fqn in k: + osd_state[k.replace(fqn, fqn_with_compiler)] = osd_state.pop(k) + + with info.fsdp_context(): + optim_state_dict = FSDP.optim_state_dict_to_load( + model, optim, optim_state_dict + ) + + # Note that we do not have to convert the FQN back to param id here if + # order in optim.param_groups[idx][PARAMS] is the same as the one in + # optim_state_dict[PG][idx][PARAMS]. + _init_optim_state(optim) + _state_dict_fn(optim, "load_state_dict")(state_dict=optim_state_dict) + + +def get_model_state_dict( + model: nn.Module, + *, + submodules: Optional[Set[nn.Module]] = None, + options: Optional[StateDictOptions] = None, +) -> Dict[str, ValueType]: + """ + Return the model state_dict of ``model``. + + See ``get_state_dict`` for the detail usage. + + Args: + model (nn.Module): the nn.Module to the model. + submodules: Optional[Set[nn.Module]]: only return the model parameters + that belong to the submodules. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be returned. See + `StateDictOptions` for the details. + + Returns: + The state_dict for ``model``. + + :rtype: typing.Dict[str, ValueType] + """ + with gc_context(): + info = _verify_options( + model, + tuple(), + optim_only=False, + submodules=submodules, + options=options, + ) + model_state_dict = _get_model_state_dict(model, info) + _verify_state_dict(model_state_dict, {}, info) + return model_state_dict + + +def get_optimizer_state_dict( + model: nn.Module, + optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], + *, + submodules: Optional[Set[nn.Module]] = None, + options: Optional[StateDictOptions] = None, +) -> OptimizerStateType: + """ + Return the combined state_dict for optimizers. + + See ``get_state_dict`` for the detail usage. + + Args: + model (nn.Module): the nn.Module to the model. + optimizers (Union[None, Optimizer, Iterable[Optimizer]]): + The optimizers that are used to optimize ``model``. + submodules: Optional[Set[nn.Module]]: only return the model parameters + that belong to the submodules. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be returned. See + `StateDictOptions` for the details. + + Returns: + The state_dict for ``optimizers``. + + :rtype: OptimizerStateType + """ + with gc_context(): + optimizers = ( + (optimizers,) + if isinstance(optimizers, torch.optim.Optimizer) + else tuple(optimizers) + ) + info = _verify_options( + model, + optimizers, + optim_only=True, + submodules=submodules, + options=options, + ) + optim_state_dict = _get_optim_state_dict(model, optimizers, info) + _verify_state_dict({}, optim_state_dict, info) + return optim_state_dict + + +def get_state_dict( + model: nn.Module, + optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], + *, + submodules: Optional[Set[nn.Module]] = None, + options: Optional[StateDictOptions] = None, +) -> Tuple[Dict[str, ValueType], OptimizerStateType]: + """ + Return the model state_dict and optimizers state_dict. + + ``get_state_dict`` can process any module that is parallelized by PyTorch + FSDP/fully_shard, DDP/replicate, tensor_parallel/parallelize_module, and any + combination of these parallelisms. The main functions of ``get_state_dict`` + are: 1.) returning a model and optimizer state_dict that can be resharded + with a different number of trainers and/or different parallelisms. + 2.) hiding the parallelism-specific state_dict APIs. Users don't have to call + these APIs. + 3.) sanity checking the result state_dict. + + The keys of the result state dictionary are the canonical FQNs (Fully + Qualified Names). A canonical FQN refers to the FQN based on a parameter's + position in an nn.Module hierarchy. More specifically, a canonical FQN to a + parameter is the FQN returned by ``module.named_parameters()`` or + ``module.named_buffers()`` when the module is not distributed by any + parallelisms. Since the optimizer internally uses parameter IDs to represent + a parameter, there will be a conversion from the parameter IDs to the + canonical FQNs when calling this API. + + ``get_state_dict`` can also process a module that is not parallelized. In + such a case, ``get_state_dict`` only performs one function -- converting the + optimizer parameter IDs to the canonical FQNs. + + Example: + >>> # xdoctest: +SKIP + >>> import torch + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> from torch.distributed.checkpoint.state_dict import get_state_dict + + >>> fsdp_model = FSDP(copy.deepcopy(model)) + >>> fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) + >>> ddp_model = DDP(copy.deepcopy(model)) + >>> ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) + + + >>> ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim) + >>> fsdp_state_dict, fsdp_optim_state_dict = get_state_dict(fsdp_model, fsdp_optim) + + >>> # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(), + >>> # the asserts will fail. + >>> assert ddp_state_dict == fsdp_state_dict + >>> assert ddp_optim_state == fsdp_optim_state_dict + + + Args: + model (nn.Module): the nn.Module to the model. + optimizers (Union[None, Optimizer, Iterable[Optimizer]]): + The optimizers that are used to optimize ``model``. + submodules: Optional[Set[nn.Module]]: only return the model parameters + that belong to the submodules. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be returned. See + `StateDictOptions` for the details. + + Returns: + ``Tuple`` that contain model state_dict and optimizer state_dict. + + :rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType] + """ + + with gc_context(): + optimizers = ( + (optimizers,) + if isinstance(optimizers, torch.optim.Optimizer) + else tuple(optimizers) + ) + info = _verify_options( + model, + optimizers, + optim_only=False, + submodules=submodules, + options=options, + ) + model_state_dict = _get_model_state_dict(model, info) + optim_state_dict = _get_optim_state_dict(model, optimizers, info) + _verify_state_dict(model_state_dict, optim_state_dict, info) + return model_state_dict, optim_state_dict + + +def _unflatten_model_state_dict( + model: nn.Module, + state_dict: Union[Dict[nn.Module, Dict[str, ValueType]], Dict[str, ValueType]], +) -> Dict[str, ValueType]: + if not state_dict: + return {} + + if isinstance(next(iter(state_dict.keys())), nn.Module): + cast_state_dict = cast(Dict[nn.Module, Dict[str, ValueType]], state_dict) + new_state_dict: Dict[str, ValueType] = {} + for submodule, sub_state_dict in cast_state_dict.items(): + for name, m in model.named_modules(): + if m != submodule: + continue + + fqns = _get_fqns(model, name) + assert len(fqns) == 1, "FQNs for a submodule should only have 1 element" + prefix = f"{next(iter(fqns))}." + new_state_dict.update( + {prefix + subfqn: value for subfqn, value in sub_state_dict.items()} + ) + return new_state_dict + else: + return cast(Dict[str, ValueType], state_dict) + + +def set_model_state_dict( + model: nn.Module, + model_state_dict: Dict[str, ValueType], + *, + options: Optional[StateDictOptions] = None, +) -> _IncompatibleKeys: + """Load the model state_dict. + + The counterpart of ``get_model_state_dict`` to set the state_dict to the + model. See ``set_state_dict`` for the detail usage. + + Args: + model (nn.Module): the nn.Module to the model. + model_state_dict: (Dict[str, ValueType]): + the model state_dict to load. If the key of the ``model_state_dict`` + is nn.Module, the key is a submodule of ``model`` and the value should + be the state_dict of the submodule. When loading the state_dict, + the prefix of the submodule will be append to the state_dict. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be loaded. See + `StateDictOptions` for the details. + + Returns: + ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: + * **missing_keys** is a list of str containing the missing keys + * **unexpected_keys** is a list of str containing the unexpected keys + + :type model_state_dict: typing.Dict[str, ValueType] + """ + model_state_dict: Dict[str, ValueType] = _unflatten_model_state_dict( + model, model_state_dict + ) + with gc_context(): + info = _verify_options(model, tuple(), optim_only=False, options=options) + + _verify_state_dict(model_state_dict, {}, info) + return _load_model_state_dict(model, model_state_dict, info) + + +def set_optimizer_state_dict( + model: nn.Module, + optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], + *, + optim_state_dict: OptimizerStateType, + options: Optional[StateDictOptions] = None, +) -> None: + """Load the optimizers state_dict. + + The counterpart of ``get_optimizer_state_dict`` to set the state_dict to the + optimizers. See ``set_state_dict`` for the detail usage. + + Args: + model (nn.Module): the nn.Module to the model. + optimizers (Union[Optimizer, Iterable[Optimizer]]): + The optimizers that are used to optimize ``model``. + optim_state_dict: OptimizerStateType: + the optimizer state_dict to load. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be loaded. See + `StateDictOptions` for the details. + + Returns: + None + + :type optim_state_dict: typing.OptimizerStateType + """ + with gc_context(): + optimizers = ( + (optimizers,) + if isinstance(optimizers, torch.optim.Optimizer) + else tuple(optimizers) + ) + info = _verify_options(model, optimizers, optim_only=True, options=options) + + _verify_state_dict({}, optim_state_dict, info) + _load_optim_state_dict(model, optimizers, optim_state_dict, info) + + +def set_state_dict( + model: nn.Module, + optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], + *, + model_state_dict: Dict[str, ValueType], + optim_state_dict: OptimizerStateType, + options: Optional[StateDictOptions] = None, +) -> _IncompatibleKeys: + """Load the model state_dict and optimizers state_dict. + + The counterpart of ``get_state_dict`` to set the state_dict to the model and + optimizers. The given ``model_state_dict`` and ``optim_state_dict`` do not + have to be returned by ``get_state_dict`` but must meet the following + requirements: 1) all FQNs are canonical FQNs as defined in ``get_state_dict``, + 2) if a tensor is sharded, it must be either a ShardedTensor or DTensor, + 3) optimizer state_dict cannot contain the parameter IDs; the keys should be + the canonical FQNs. + + Args: + model (nn.Module): the nn.Module to the model. + optimizers (Union[Optimizer, Iterable[Optimizer]]): + The optimizers that are used to optimize ``model``. + model_state_dict: (Union[Dict[nn.Module, Dict[str, ValueType]], Dict[str, ValueType]]): + the model state_dict to load. If the key of the ``model_state_dict`` + is nn.Module, the key is a submodule of ``model`` and the value should + be the state_dict of the submodule. When loading the state_dict, + the prefix of the submodule will be append to the state_dict. + optim_state_dict: OptimizerStateType: + the optimizer state_dict to load. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be loaded. See + `StateDictOptions` for the details. + + Returns: + ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: + * **missing_keys** is a list of str containing the missing keys of the model state_dict. + * **unexpected_keys** is a list of str containing the unexpected keys of the model state_dict. + + :type model_state_dict: typing.Dict[str, ValueType] + :type optim_state_dict: typing.OptimizerStateType + """ + + model_state_dict: Dict[str, ValueType] = _unflatten_model_state_dict( + model, model_state_dict + ) + with gc_context(): + optimizers = ( + (optimizers,) + if isinstance(optimizers, torch.optim.Optimizer) + else tuple(optimizers) + ) + info = _verify_options( + model, optimizers, optim_only=not model_state_dict, options=options + ) + + _verify_state_dict(model_state_dict, optim_state_dict, info) + _load_optim_state_dict(model, optimizers, optim_state_dict, info) + return _load_model_state_dict(model, model_state_dict, info) + + +# TODO: correct the state_dict function signature. +# TODO: this API is not yet fully tested. Make it private +@no_type_check +def _patch_model_state_dict( + model: nn.Module, + *, + options: Optional[StateDictOptions] = None, +) -> None: + """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model``. + + Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model`` to + be a partial function to call ``get_state_dict`` and ``set_state_dict``. + + Example: + from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + from torch.distributed.checkpoint.state_dict import patch_model_state_dict + + model = fsdp(model) + patch_model_state_dict(model) + + Args: + model (nn.Module): the nn.Module to the model. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be loaded. See + `StateDictOptions` for the details. + Returns: + None + """ + + _state_dict_call = functools.partial( + get_model_state_dict, + model=model, + options=options, + ) + + def state_dict_call(): + return _state_dict_call() + + model.state_dict = state_dict_call + + _load_state_dict_call = functools.partial( + set_model_state_dict, + model=model, + options=options, + ) + + def load_state_dict_call(state_dict: Dict[str, Any]): + _load_state_dict_call(model_state_dict=state_dict) + + model.load_state_dict = load_state_dict_call + + _patched_state_dict.add(state_dict_call) + _patched_state_dict.add(load_state_dict_call) + + +# TODO: correct the load_state_dict function signature. +# TODO: this API is not yet fully tested. Make it private +@no_type_check +def _patch_optimizer_state_dict( + model: nn.Module, + *, + optimizers: Tuple[torch.optim.Optimizer, ...], + options: Optional[StateDictOptions] = None, +) -> None: + """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers``. + + Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers`` to + be a partial function to call ``get_state_dict`` and ``set_state_dict``. + + Note that if there are multiple optimizers, all of the optimizers will be patched. + So users only need to call one of the state_dict() to get the full result. + + Example: + from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + from torch.distributed.checkpoint.state_dict import patch_model_state_dict + + model = fsdp(model) + patch_model_state_dict(model) + + Args: + model (nn.Module): the nn.Module to the model. + options (StateDictOptions): the options to control how + model state_dict and optimizer state_dict should be loaded. See + `StateDictOptions` for the details. + Returns: + None + """ + + _state_dict_call = functools.partial( + get_optimizer_state_dict, + model=model, + optimizers=optimizers, + options=options, + ) + + def state_dict_call(): + return _state_dict_call() + + _load_state_dict_call = functools.partial( + set_optimizer_state_dict, + model=model, + optimizers=optimizers, + options=options, + ) + + def load_state_dict_call(state_dict: Dict[str, Any]): + _load_state_dict_call(optim_state_dict=state_dict) + + _patched_state_dict.add(state_dict_call) + _patched_state_dict.add(load_state_dict_call) + optimizers = ( + (optimizers,) + if isinstance(optimizers, torch.optim.Optimizer) + else tuple(optimizers) + ) + for optim in optimizers: + optim.state_dict = state_dict_call + optim.load_state_dict = load_state_dict_call diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..fdc90961115ac2656690a705672f1846f9ef25fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py @@ -0,0 +1,218 @@ +import os +import warnings +from typing import Any, cast, Dict, Optional, Union + +import torch +import torch.distributed as dist +from torch.distributed.checkpoint.stateful import Stateful + +from ._storage_utils import _storage_setup +from .default_planner import DefaultLoadPlanner +from .planner import LoadPlanner +from .storage import StorageReader +from .utils import _all_gather_keys, _api_bc_check, _DistWrapper, _profile + +__all__ = ["load_state_dict", "load"] + + +def load_state_dict( + state_dict: Dict[str, Any], + storage_reader: StorageReader, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[LoadPlanner] = None, +) -> None: + """This method is deprecated. Please switch to 'load'.""" + warnings.warn( + "'load_state_dict' is deprecated and will be removed in future versions. " + "Please use 'load' instead." + ) + storage_reader.reset() + with _profile(): + # TODO: test returning `load` here instead. + return _load_state_dict( + state_dict, + storage_reader, + process_group, + coordinator_rank, + no_dist, + planner, + ) + + +@_api_bc_check +def load( + state_dict: Dict[str, Any], + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_reader: Optional[StorageReader] = None, + planner: Optional[LoadPlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> None: + """ + Load a distributed ``state_dict`` in SPMD style. + + Each rank will try to read the least amount of data necessary + to fullfill the requested `state_dict`. When loading :class:`ShardedTensor` + or :class:`DTensor` instances, each rank only reads data for their local shards. + + For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``), + load will first call ``state_dict`` before attempting deserialization, followed by + ``load_state_dict`` once the deserialization is complete. + + .. warning:: + All tensors in ``state_dict`` must be allocated on their + destination device *prior to* calling this function. + + All non-tensor data is loaded using `torch.load()` and modified in place + on state_dict. + + .. warning:: + Users must call `load_state_dict` on the root module to ensure load + pos-processing and non-tensor data properly propagates. + + .. note: + If no process group is initialized, this function can assumesbe the intent + is to load a checkpoint into the local process. This can be useful in the + case of local inference, and when using regular Tensors (as opposed to DTensor + or ShardedTensor) + + .. note: + Rank 0 is assumed to be the coordinator rank. + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_reader (Optional[StorageReader]): + Instance of StorageWriter used to perform reads. If this is not + specified, DCP will automatically infer the reader based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[LoadPlanner]): + Instance of LoadPlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + None. + + Examples + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + >>> optimizer = Adagrad(my_model.parameters()) + >>> model_state_dict = my_model.state_dict() + >>> fs_storage_reader = torch.distributed.checkpoint.FileSystemReader("/checkpoint/1") + + >>> torch.distributed.checkpoint.load_state_dict( + >>> state_dict=model_state_dict, + >>> storage_reader=fs_storage_reader, + >>> ) + + >>> # module.load_state_dict() function might have customized steps + >>> # to flush the state_dict, must call it to + >>> # ensure correct behavior. + >>> my_model.load_state_dict(model_state_dict) + + .. note:: + load_state_dict uses collectives to coordinate reads across ranks. + For NCCL-based process groups, internal tensor representations of + objects must be moved to the GPU device before communication takes place. + In this case, the device used is given by ``torch.cuda.current_device()`` + and it is the user's responsibility to ensure that this is set so that each + rank has an individual GPU, via ``torch.cuda.set_device()``. + """ + + no_dist = not (dist.is_available() and dist.is_initialized()) + if no_dist: + warnings.warn( + "torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process." + ) + + with _profile(): + storage_reader = cast( + StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True) + ) + + if no_dist: + keys = list(state_dict.keys()) + else: + keys = _all_gather_keys(state_dict, process_group) + if keys != sorted(state_dict.keys()): + warnings.warn( + "Detected mismatched keys in state dict after all gather!" + " This behavior is unsupported and may cause errors may cause errors." + ) + + statetful_sd = {} + for key in keys: + if key not in state_dict: + continue + elem = state_dict[key] + statetful_sd[key] = ( + elem.state_dict() if isinstance(elem, Stateful) else elem + ) + + _load_state_dict( + state_dict=statetful_sd, + storage_reader=storage_reader, + process_group=process_group, + no_dist=no_dist, + planner=planner, + ) + for key in keys: + if key not in state_dict: + continue + elem = state_dict[key] + if isinstance(elem, Stateful): + elem.load_state_dict(statetful_sd[key]) + state_dict[key] = elem + + +def _load_state_dict( + state_dict: Dict[str, Any], + storage_reader: StorageReader, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[LoadPlanner] = None, +) -> None: + torch._C._log_api_usage_once("torch.distributed.checkpoint.load_state_dict") + + distW = _DistWrapper(process_group, not no_dist, coordinator_rank) + if planner is None: + planner = DefaultLoadPlanner() + + def local_step(): + assert planner is not None + metadata = storage_reader.read_metadata() + planner.set_up_planner(state_dict, metadata, distW.is_coordinator) + storage_reader.set_up_storage_reader(metadata, distW.is_coordinator) + + local_plan = planner.create_local_plan() + local_plan = storage_reader.prepare_local_plan(local_plan) + return local_plan + + def global_step(all_local_plans): + assert planner is not None + all_local_plans = planner.create_global_plan(all_local_plans) + all_local_plans = storage_reader.prepare_global_plan(all_local_plans) + return all_local_plans + + central_plan = distW.reduce_scatter("plan", local_step, global_step) + + def read_data(): + assert planner is not None + final_local_plan = planner.finish_plan(central_plan) + all_reads = storage_reader.read_data(final_local_plan, planner) + + all_reads.wait() + return None + + _ = distW.all_gather("read", read_data) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..80f13bc8d39cc576ecf3ee785608e596c4148f75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py @@ -0,0 +1,288 @@ +import os +import warnings +from concurrent.futures import Future, ThreadPoolExecutor +from typing import cast, Optional, Union + +import torch +import torch.distributed as dist +from torch.distributed._state_dict_utils import _offload_state_dict_to_cpu +from torch.distributed.checkpoint.stateful import Stateful +from torch.distributed.distributed_c10d import _get_default_group + +from ._storage_utils import _storage_setup +from .default_planner import DefaultSavePlanner +from .metadata import Metadata, STATE_DICT_TYPE +from .planner import SavePlanner +from .storage import StorageWriter +from .utils import _api_bc_check, _DistWrapper, _profile + + +__all__ = ["save_state_dict", "save", "async_save"] + + +def save_state_dict( + state_dict: STATE_DICT_TYPE, + storage_writer: StorageWriter, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[SavePlanner] = None, +) -> Metadata: + """This method is deprecated. Please switch to 'save'.""" + warnings.warn( + "'save_state_dict' is deprecated and will be removed in future versions." + "Please use 'save' instead." + ) + + storage_writer.reset() + + # TODO: test returning `save` here instead. + with _profile(): + return _save_state_dict( + state_dict, + storage_writer, + process_group, + coordinator_rank, + no_dist, + planner, + ) + + +@_api_bc_check +def save( + state_dict: STATE_DICT_TYPE, + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_writer: Optional[StorageWriter] = None, + planner: Optional[SavePlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> Metadata: + """ + Save a distributed model in SPMD style. + + This function is different from ``torch.save()`` as it handles + ``ShardedTensor`` , and ``DTensor`` by having each rank only save their local shards. + + For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``), + save will call ``state_dict`` before serialization. + + .. warning:: + There is no guarantees of Backwards Compatibility across PyTorch versions + for saved state_dicts. + + .. warning:: + If using the `process_group` argument, make sure that only its ranks + call `save_state_dict` and that all data in state_dict belong to it. + + .. note:: + When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of + the shard_group should be calling `save_state_dict` and the corresponding process + group needs to be passed in. + + .. note:: + If no process group is available, this function assumes the intention is to save the + state_dict in the local process. + + .. note: + Rank 0 is assumed to be the coordinator rank. + + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_writer (Optional[StorageWriter]): + Instance of StorageWriter used to perform writes. If this is not + specified, DCP will automatically infer the writer based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[SavePlanner]): + Instance of SavePlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + Metadata: Metadata object for the saved checkpoint. + + Example: + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + + >>> state_dict = {"model": my_model} + + >>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1") + >>> torch.distributed.checkpoint.save( + >>> state_dict=state_dict, + >>> storage_writer=fs_storage_writer, + >>> ) + + .. note:: + save_state_dict uses collectives to coordinate writes across ranks. + For NCCL-based process groups, internal tensor representations of + objects must be moved to the GPU device before communication takes place. + In this case, the device used is given by ``torch.cuda.current_device()`` + and it is the user's responsibility to ensure that this is set so that + each rank has an individual GPU, via ``torch.cuda.set_device()``. + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint.save") + + no_dist = not (dist.is_available() and dist.is_initialized()) + if no_dist: + warnings.warn( + "torch.distributed is unavailable or uninitialized, assuming the intent is to save in a single process." + ) + + with _profile(): + storage_writer = cast( + StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False) + ) + + return _save_state_dict( + state_dict=_stateful_to_state_dict(state_dict), + storage_writer=storage_writer, + process_group=process_group, + no_dist=no_dist, + planner=planner, + ) + + +def async_save( + state_dict: STATE_DICT_TYPE, + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_writer: Optional[StorageWriter] = None, + planner: Optional[SavePlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> Future: + """Asynchronous version of ``save_state_dict``. This code first de-stages the state_dict on CPU, and then calls + `save` in a separate thread. + + .. warning:: + This feature is experimental and subject to change. + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_writer (Optional[StorageWriter]): + Instance of StorageWriter used to perform writes. If this is not + specified, DCP will automatically infer the writer based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[SavePlanner]): + Instance of SavePlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + Future: A future holding the resultant Metadata object from `save`. + + Example: + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + + >>> state_dict = {"model": my_model} + + >>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1") + >>> checkpoint_future = torch.distributed.checkpoint.async_save( + >>> state_dict=state_dict, + >>> storage_writer=fs_storage_writer, + >>> ) + >>> + >>> # ... do some work ... + >>> + >>> checkpoint_future.result() + + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint.async_save") + + pg = process_group or _get_default_group() + assert ( + torch.device("cpu") in pg._device_types # type: ignore[attr-defined] + ), "A CPU backend must be enabled for async save; try initializing process group with 'cpu:gloo,cuda:ncc'" + + cpu_state_dict = _offload_state_dict_to_cpu(_stateful_to_state_dict(state_dict)) + + executor = ThreadPoolExecutor(max_workers=1) + f = executor.submit( + save, + cpu_state_dict, + checkpoint_id=checkpoint_id, + storage_writer=storage_writer, + planner=planner, + process_group=process_group, + ) + f.add_done_callback(lambda f: executor.shutdown(wait=False)) + + return f + + +def _stateful_to_state_dict(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + """Creates a shallow copy of `state_dict` where `state_dict` is called for each Stateful object.""" + stateful_state_dict = {} + for key, elem in state_dict.items(): + stateful_state_dict[key] = ( + elem.state_dict() if isinstance(elem, Stateful) else elem + ) + return stateful_state_dict + + +def _save_state_dict( + state_dict: STATE_DICT_TYPE, + storage_writer: StorageWriter, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[SavePlanner] = None, +) -> Metadata: + torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict") + + distW = _DistWrapper(process_group, not no_dist, coordinator_rank) + if planner is None: + planner = DefaultSavePlanner() + assert planner is not None + + global_metatadata = None + + def local_step(): + assert planner is not None + planner.set_up_planner(state_dict, distW.is_coordinator) + storage_writer.set_up_storage_writer(distW.is_coordinator) + local_plan = planner.create_local_plan() + local_plan = storage_writer.prepare_local_plan(local_plan) + return local_plan + + def global_step(all_local_plans): + nonlocal global_metatadata + + assert planner is not None + all_local_plans, global_metatadata = planner.create_global_plan(all_local_plans) + all_local_plans = storage_writer.prepare_global_plan(all_local_plans) + return all_local_plans + + central_plan = distW.reduce_scatter("plan", local_step, global_step) + + def write_data(): + assert planner is not None + final_local_plan = planner.finish_plan(central_plan) + all_writes = storage_writer.write_data(final_local_plan, planner) + + all_writes.wait() + return all_writes.value() + + def finish_checkpoint(all_results): + assert global_metatadata is not None + storage_writer.finish(metadata=global_metatadata, results=all_results) + return global_metatadata + + return distW.all_reduce("write", write_data, finish_checkpoint) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py new file mode 100644 index 0000000000000000000000000000000000000000..577af4197600d1a22d454d342389dd3027b64db5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py @@ -0,0 +1,43 @@ +from typing import Any, Dict, runtime_checkable, TypeVar + +from typing_extensions import Protocol + + +__all__ = ["Stateful", "StatefulT"] + + +@runtime_checkable +class Stateful(Protocol): + """ + Stateful protocol for objects that can be checkpointed and restored. + """ + + def state_dict(self) -> Dict[str, Any]: + """ + Objects should return their state_dict representation as a dictionary. + The output of this function will be checkpointed, and later restored in + `load_state_dict()`. + + .. warning:: + Because of the inplace nature of restoring a checkpoint, this function + is also called during `torch.distributed.checkpoint.load`. + + + Returns: + Dict: The objects state dict + """ + + ... + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + """ + Restore the object's state from the provided state_dict. + + Args: + state_dict: The state dict to restore from + """ + + ... + + +StatefulT = TypeVar("StatefulT", bound=Stateful) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0e87aca7abc787ac220d6d1cf590163614e1ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py @@ -0,0 +1,279 @@ +import abc +import os +from dataclasses import dataclass +from typing import Any, List, Union + +from torch.futures import Future + +from .metadata import Metadata, MetadataIndex +from .planner import LoadPlan, LoadPlanner, SavePlan, SavePlanner + +__all__ = ["WriteResult", "StorageWriter", "StorageReader"] + + +@dataclass(frozen=True) +class WriteResult: + index: MetadataIndex + + size_in_bytes: int + storage_data: Any + + +class StorageWriter(abc.ABC): + """ + Interface used by ``save_state_dict`` to write to storage. + + One StorageWriter instance acts as both the coordinator and the follower + in a distributed checkpoint. As part of initialization, each instance + is told its role. + + A subclass should expect the following sequence of calls. + + 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. + 1) (all ranks) set_up_storage_writer() + 2) (all ranks) prepare_local_plan() + 3) (coordinator) prepare_global_plan() + 4) (all ranks) write_data() + 5) (coordinator) finish() + """ + + @abc.abstractmethod + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """ + Calls to indicates a brand new checkpoint write is going to happen. + A checkpoint_id may be present if users set the checkpoint_id for + this checkpoint write. The meaning of the checkpiont_id is + storage-dependent. It can be a path to a folder/file or a key for + a key-value storage. + + Args: + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + """ + ... + + @abc.abstractmethod + def set_up_storage_writer(self, is_coordinator: bool) -> None: + """ + Initialize this instance. + + Args: + is_coordinator (bool): Whether this instance is responsible for coordinating + the checkpoint. + """ + pass + + @abc.abstractmethod + def prepare_local_plan(self, plan: SavePlan) -> SavePlan: + """ + Perform storage-specific local planning. + + While this method can produce a completely different plan, the recommended + way is to store storage specific data in SavePlan::storage_data. + + Args: + plan (SavePlan): The local plan from the ``SavePlanner`` in use. + + Returns: + A transformed ``SavePlan`` after storage local planning + """ + pass + + @abc.abstractmethod + def prepare_global_plan(self, plans: List[SavePlan]) -> List[SavePlan]: + """ + Perform centralized planning of storage. + + This method is only called on the coordinator instance. + + While this method can produce a completely different plan, the preferred + way is to store storage specific data in SavePlan::storage_data. + + Args: + plans: A list of ``SavePlan`` instances, one for each rank. + + Returns: + A list of transformed ``SavePlan`` after storage global planning + """ + pass + + @abc.abstractmethod + def write_data( + self, plan: SavePlan, planner: SavePlanner + ) -> Future[List[WriteResult]]: + """ + Write all items from ``plan`` using ``planner`` to resolve the data. + + A subclass should call ``SavePlanner::resolve_data`` on each item + from the plan to get access to the underlying object to write. + + Subclasses should lazily call `resolve_data` as it can allocate memory. + In case of tensors, make following assumptions: + + - They might be on any device, including not matching the one on ``WriteItem::tensor_data`` + - They might be views or not contiguous. Only the projection needs to be saved. + + Args: + plan (SavePlan): The save plan to execute. + planner (SavePlanner): Planner object to be used to resolve items to data. + + Returns: + A future that completes to a list of WriteResult + """ + pass + + @abc.abstractmethod + def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None: + """ + Write the metadata and marks the current checkpoint as successful. + + The actual format/schema used for serializing `metadata` is an + implementation detail. The only requirement is that it's recoverable + in to the same object graph. + + Args: + metadata (Metadata): metadata for the new checkpoint + results: A list of WriteResults from all ranks. + + Returns: + None + """ + pass + + @classmethod + @abc.abstractmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """ + Check if the given checkpoint_id is supported by the stroage. This allow + us to enable automatic storage selection. + """ + ... + + +class StorageReader(abc.ABC): + """ + Interface used by ``load_state_dict`` to read from storage. + + One StorageReader instance acts as both the coordinator and the follower + in a distributed checkpoint. As part of initialization, each instance + is told its role. + + A subclass should expected the following sequence of calls by ``load_state_dict``: + + 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. + 1) (all ranks) read_metadata() + 2) (all ranks) set_up_storage_reader() + 3) (all ranks) prepare_local_plan() + 4) (coordinator) prepare_global_plan() + 5) (all ranks) read_data() + """ + + @abc.abstractmethod + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """ + Calls to indicates a brand new checkpoint read is going to happen. + A checkpoint_id may be present if users set the checkpoint_id for + this checkpoint read. The meaning of the checkpiont_id is + storage-dependent. It can be a path to a folder/file or a key for + a key-value storage. + + Args: + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is more like a key-value store. + (Default: ``None``) + """ + ... + + @abc.abstractmethod + def read_metadata(self) -> Metadata: + """ + Read the checkpoint metadata. + + Returns: + The metadata object associated with the checkpoint being loaded. + + """ + pass + + @abc.abstractmethod + def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: + """ + Initialize this instance. + + Args: + metadata (Metadata): The metadata schema to use. + is_coordinator (bool): Whether this instance is responsible for coordinating + the checkpoint. + """ + pass + + @abc.abstractmethod + def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: + """ + Perform storage-specific local planning. + + While this method can produce a completely different plan, the recommended + way is to store storage specific data in LoadPlan::storage_data. + + Args: + plan (LoadPlan): The local plan from the ``LoadPlan`` in use. + + Returns: + A transformed ``LoadPlan`` after storage local planning + """ + pass + + @abc.abstractmethod + def prepare_global_plan(self, plans: List[LoadPlan]) -> List[LoadPlan]: + """ + Perform centralized planning of storage loading. + + This method is only called on the coordinator instance. + + While this method can produce a completely different plan, the preferred + way is to store storage specific data in LoadPlan::storage_data. + + Args: + plans: A list of ``LoadPlan`` instances, one for each rank. + + Returns: + A list of transformed ``LoadPlan`` after storage global planning + """ + pass + + @abc.abstractmethod + def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: + """ + Read all items from ``plan`` using ``planner`` to resolve the data. + + A subclass should call ``LoadPlanner::load_bytes`` to deserialize a BytesIO + object into the right place. + + A subclass should call ``LoadPlanner::resolve_tensor`` to get access to the + tensors that in should load data into. + + It's the StorageLayer responsibility to properly schedule any cross device copies + required. + + Args: + plan (LoadPlan): The local plan to execute on + planner (LoadPlanner): The planner object to use to resolve items. + + Returns: + A future that completes once all reads are finished. + """ + pass + + @classmethod + @abc.abstractmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """ + Check if the given checkpoint_id is supported by the stroage. This allow + us to enable automatic storage selection. + """ + ... diff --git a/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d781d9839beaa713f36281a54a591cb061681a3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py @@ -0,0 +1,429 @@ +import cProfile +import inspect +import io +import itertools +import os +import warnings +from contextlib import contextmanager +from functools import wraps +from pstats import Stats +from typing import Any, Callable, cast, Dict, List, Optional, Sequence, TypeVar, Union + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharded_tensor.shard import Shard +from torch.distributed._tensor import DTensor + +from .api import ( + _is_wrapped_exception, + _wrap_exception, + CheckpointException, + WRAPPED_EXCEPTION, +) +from .metadata import MetadataIndex, STATE_DICT_TYPE + +__all__ = ["find_tensor_shard", "find_state_dict_object"] + +T = TypeVar("T") +R = TypeVar("R") + + +def _get_failure_dict( + results: List[Union[T, WRAPPED_EXCEPTION]] +) -> Dict[int, WRAPPED_EXCEPTION]: + return cast( + Dict[int, WRAPPED_EXCEPTION], + {i: err for i, err in enumerate(results) if _is_wrapped_exception(err)}, + ) + + +def _all_gather_keys( + local_dict: Dict[Any, Any], group: Optional[dist.ProcessGroup] = None +) -> List[Any]: + """Gathers all keys, and returns them sorted.""" + keys = list(local_dict.keys()) + gathered_keys: List[List[Any]] = [None] * dist.get_world_size() # type: ignore[list-item] + + dist.all_gather_object(gathered_keys, keys, group=group) + return sorted(set(itertools.chain.from_iterable(gathered_keys))) + + +class _DistWrapper: + """ + This is a wrapper around PG that provides a series of features around object collectives. + + It works without distributed initialized, where most collectives turns into nops. + + All variants that take functions are exception robust, meaning that if one or more + ranks raise errors, all ranks will observe those. + """ + + def __init__( + self, + group: Optional[dist.ProcessGroup], + use_dist: bool, + coordinator_rank: int, + ): + self.group = group + self.use_dist = use_dist + self.coordinator_rank = coordinator_rank + if self.use_dist: + self.rank = dist.get_rank(group) + self.is_coordinator = self.rank == coordinator_rank + else: + self.rank = 0 + self.is_coordinator = True + + def get_rank(self) -> int: + return self.rank + + def get_world_size(self) -> int: + if self.use_dist: + return dist.get_world_size(self.group) + return 1 + + def broadcast_object(self, object: Optional[T]) -> T: + """Implement functionality similar to c10d::broadcast_object_list but without distributed enabled.""" + object_list = [object] + if self.use_dist: + dist.broadcast_object_list( + object_list=object_list, + group=self.group, + src=self.coordinator_rank, + ) + return cast(T, object_list[0]) + + def gather_object(self, object: T) -> Optional[List[T]]: + """Implement functionality similar to c10d::gather_object but without distributed enabled.""" + if self.use_dist: + gather_objs = ( + cast(List[T], [None] * dist.get_world_size(self.group)) + if self.is_coordinator + else None + ) + + dist.gather_object( + obj=object, + object_gather_list=gather_objs if self.is_coordinator else None, + dst=self.coordinator_rank, + group=self.group, + ) + result = gather_objs + else: + result = [object] + return result + + def all_gather_object(self, object: T) -> List[T]: + """Implement functionality similar to c10d::all_gather_object but without distributed enabled.""" + if self.use_dist: + gather_objs = cast(List[T], [None] * dist.get_world_size(self.group)) + + dist.all_gather_object( + object_list=gather_objs, obj=object, group=self.group + ) + else: + gather_objs = [object] + return gather_objs + + def scatter_object(self, object_list: Optional[List[T]]) -> T: + """Implement functionality similar to c10d::scatter_object but without distributed enabled.""" + if self.use_dist: + gather_result = cast(List[T], [None]) + dist.scatter_object_list( + scatter_object_output_list=gather_result, + scatter_object_input_list=object_list if self.is_coordinator else None, + src=self.coordinator_rank, + group=self.group, + ) + + local_reply = gather_result[0] + else: + assert object_list is not None + local_reply = object_list[0] + return local_reply + + def reduce_scatter( + self, + step: str, + map_fun: Callable[[], T], + reduce_fun: Callable[[List[T]], List[R]], + ) -> R: + """ + Compute a value on each rank, then do centralized reduce on a single rank, followed by a scatter. + + This method operates in the following way: + Run ``map_fun`` on all ranks + Gather results on rank 0 + Call ``reduce_fun`` on all those values + Scatter to each rank part of the result. + """ + local_data: Union[WRAPPED_EXCEPTION, T] + try: + local_data = map_fun() + except BaseException as e: + local_data = _wrap_exception(e) + + all_data = self.gather_object(local_data) + all_results: Optional[List[Union[R, CheckpointException]]] = None + if self.is_coordinator: + assert all_data is not None + node_failures = _get_failure_dict(all_data) + + if len(node_failures) == 0: + try: + # N.B. why can't mypy cast List[R] to List[Union[R, WRAPPED_EXCEPTION]]? + all_results = cast( + List[Union[R, CheckpointException]], + reduce_fun(cast(List[T], all_data)), + ) + except BaseException as e: + node_failures[self.rank] = _wrap_exception(e) + + if len(node_failures) > 0: + all_results = [ + CheckpointException(step, node_failures) + ] * self.get_world_size() + + result = self.scatter_object(all_results) + if isinstance(result, CheckpointException): + raise result + return result + + def all_reduce( + self, + step: str, + map_fun: Callable[[], T], + reduce_fun: Callable[[List[T]], R], + ) -> R: + """ + Compute a value on each rank, then do centralized reduce on a single rank, followed by a broadcast. + + This method operates in the following way: + Run ``map_fun`` on all ranks + Gather results on rank 0 + Call ``reduce_fun`` on all those values + Broadcast the reduced value to all ranks. + """ + local_data: Union[T, WRAPPED_EXCEPTION] + try: + local_data = map_fun() + except BaseException as e: + local_data = _wrap_exception(e) + + all_data = self.gather_object(local_data) + result: Optional[Union[R, CheckpointException]] = None + if self.is_coordinator: + assert all_data is not None + node_failures = _get_failure_dict(all_data) + if len(node_failures) == 0: + try: + result = reduce_fun(cast(List[T], all_data)) + except BaseException as e: + node_failures[self.rank] = _wrap_exception(e) + + if len(node_failures) > 0: + result = CheckpointException(step, node_failures) + + final_result = self.broadcast_object(result) + if isinstance(final_result, CheckpointException): + raise final_result + return cast(R, final_result) + + def all_gather( + self, + step: str, + map_fun: Callable[[], T], + ) -> List[T]: + """ + Compute a value on each rank, then all_gather them. + + This method operates in the following way: + Run ``map_cp`` on all ranks + all_gather the values to all ranks + """ + result: Union[T, WRAPPED_EXCEPTION] + try: + result = map_fun() + except BaseException as e: + result = _wrap_exception(e) + + all_results = self.all_gather_object(result) + + node_failures = _get_failure_dict(all_results) + if len(node_failures) > 0: + raise CheckpointException(step, node_failures) + return cast(List[T], all_results) + + def broadcast( + self, + step: str, + map_fun: Callable[[], T], + ) -> T: + """ + Compute a value on rank 0 and broadcast it. + + This method operates in the following way: + Run ``map_cp`` on rank 0 + broadcast the value + """ + result: Optional[Union[T, CheckpointException]] = None + if self.is_coordinator: + try: + result = map_fun() + except BaseException as e: + result = CheckpointException(step, {self.rank: _wrap_exception(e)}) + final_result = self.broadcast_object(result) + if isinstance(final_result, CheckpointException): + raise final_result + return cast(T, final_result) + + +def _find_shard(tensor: ShardedTensor, index: MetadataIndex) -> Shard: + if index.offset is None: + raise ValueError( + f"Cannot lookup {index.fqn} since its a ShardedTensor and no offset was provided" + ) + + shards = tensor.local_shards() + # index fast path + if index.index is not None: + if ( + len(shards) > index.index + and torch.Size(shards[index.index].metadata.shard_offsets) == index.offset + ): + return shards[index.index] + + for shard in shards: + if torch.Size(shard.metadata.shard_offsets) == index.offset: + return shard + raise ValueError(f"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'") + + +def find_tensor_shard(tensor: torch.Tensor, index: MetadataIndex) -> torch.Tensor: + if isinstance(tensor, DTensor): + return tensor.to_local() + if isinstance(tensor, ShardedTensor): + return _find_shard(tensor, index).tensor + if index.offset is not None: + # special case looking up a tensor by origin + if index.offset == torch.Size([0] * len(tensor.size())): + return tensor + raise ValueError( + f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'" + ) + return tensor + + +def find_state_dict_object(state_dict: STATE_DICT_TYPE, index: MetadataIndex) -> Any: + if index.fqn not in state_dict: + raise ValueError(f"Could not find FQN: '{index.fqn}'") + obj = state_dict[index.fqn] + + if isinstance(obj, torch.Tensor): + return find_tensor_shard(obj, index) + elif index.offset is not None: + raise ValueError( + f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'" + ) + return obj + + +def _element_wise_add(a: Sequence[int], b: Sequence[int]) -> List[int]: + return [i_a + i_b for i_a, i_b in zip(a, b)] + + +def _element_wise_sub(a: Sequence[int], b: Sequence[int]) -> List[int]: + return [i_a - i_b for i_a, i_b in zip(a, b)] + + +class _ReaderView(io.IOBase): + def __init__(self, base_stream: io.IOBase, offset: int, len: int): + super().__init__() + self.offset = offset + self.len = len + self.base_stream = base_stream + self.seek(0) + + def seek(self, __offset: int, __whence: int = os.SEEK_SET) -> int: + if __whence == os.SEEK_SET: + __offset = self.offset + __offset + elif __whence == os.SEEK_END: + __whence = os.SEEK_SET + __offset = (self.offset + self.len) - __offset + return self.base_stream.seek(__offset, __whence) + + def tell(self) -> int: + return self.base_stream.tell() - self.offset + + def readable(self) -> bool: + return self.base_stream.readable() + + def seekable(self) -> bool: + return self.base_stream.seekable() + + def readinto(self, b): + return self.base_stream.readinto(b) # type: ignore[attr-defined] + + def read(self, size=-1): + return self.base_stream.read(size) + + +def _create_file_view(file: io.IOBase, offset: int, length: int) -> io.IOBase: + # FIXME (kumpera) torch.load fails if we wrap with io.BufferedReader + return _ReaderView(file, offset, length) + + +def _normalize_device_info(device_type: str, device_id: int) -> str: + """Device info normalization.""" + if device_type == "cpu": + return "cpu" + return f"{device_type}:{device_id}" + + +# TODO: integrate with distributed logging flag +ENABLE_PROFILE = False + + +@contextmanager +def _profile(): + # Only log the profiling when it is enable and is on rank0 or dist is not + # avaiable. + if ENABLE_PROFILE and (not dist.is_available() or dist.get_rank() == 0): + profiler = cProfile.Profile() + profiler.enable() + try: + yield + finally: + profiler.disable() + stats = Stats(profiler) + stats.sort_stats("time").print_stats(10) + else: + yield + + +def _api_bc_check(func): + @wraps(func) + def inner_func(*args, **kwargs) -> Any: + if len(args) == 2: + warnings.warn( + f"The argument order of {func.__name__} has been changed. " + "Please check the document to avoid future breakages." + ) + sig = inspect.signature(func) + kwonlyargs = [ + p.name for p in sig.parameters.values() if p.kind == p.KEYWORD_ONLY + ] + if "storage_writer" in kwonlyargs: + assert "storage_writer" not in kwargs, (args, kwargs) + kwargs["storage_writer"] = args[1] + elif "storage_reader" in kwonlyargs: + assert "storage_reader" not in kwargs, (args, kwargs) + kwargs["storage_reader"] = args[1] + else: + raise RuntimeError(f"Unexpected kwonlyargs = {kwonlyargs}") + return func(args[0], **kwargs) + else: + return func(*args, **kwargs) + + return inner_func diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82886d3c774b78de0526b2bce2bfc06db6f23a92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/__init__.py @@ -0,0 +1,34 @@ +""" +:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list +of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the +optimizer locally on the workers where the parameters live. The distributed +optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to +apply the gradients on each worker. +""" +import torch +from torch import optim + +from .apply_optimizer_in_backward import ( + _apply_optimizer_in_backward, + _get_in_backward_optimizers, +) +from .functional_adadelta import _FunctionalAdadelta + +from .functional_adagrad import _FunctionalAdagrad +from .functional_adam import _FunctionalAdam +from .functional_adamax import _FunctionalAdamax +from .functional_adamw import _FunctionalAdamW +from .functional_rmsprop import _FunctionalRMSprop +from .functional_rprop import _FunctionalRprop +from .functional_sgd import _FunctionalSGD +from .named_optimizer import _NamedOptimizer +from .utils import as_functional_optim + + +# DistributedOptimizer imports torch.distributed.rpc names, so gate availability +# based on RPC being available. +if hasattr(torch._C, "_rpc_init"): + from .optimizer import DistributedOptimizer + +from .post_localSGD_optimizer import PostLocalSGDOptimizer +from .zero_redundancy_optimizer import ZeroRedundancyOptimizer diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd182cca5736fbd9df7373e984e92235ef12617 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py @@ -0,0 +1,118 @@ +from typing import Any, Dict, Iterable, List, no_type_check, Type + +import torch + +__all__: List[str] = [] + +# WeakTensorKeyDictionary to store relevant meta-data for the Tensor/Parameter +# without changing it's life-time. +# NOTE: Alternative is to add the meta-data as an attribute to the tensor, +# but that will serialize the meta-data if Tensor is serialized. +param_to_optim_hook_handle_map = torch.utils.weak.WeakTensorKeyDictionary() +param_to_acc_grad_map = torch.utils.weak.WeakTensorKeyDictionary() + +@no_type_check +def _apply_optimizer_in_backward( + optimizer_class: Type[torch.optim.Optimizer], + params: Iterable[torch.nn.Parameter], + optimizer_kwargs: Dict[str, Any], + register_hook: bool = True, +) -> None: + """ + Upon ``backward()``, the optimizer specified for each parameter will fire after + the gradient has been accumulated into the parameter. + + Note - gradients for these parameters will be set to None after ``backward()``. + This means that any other optimizer not specified via `_apply_optimizer_in_backward` + over this parameter will be a no-op. + + Args: + optimizer_class: (Type[torch.optim.Optimizer]): Optimizer to apply to parameter + params: (Iterator[nn.Parameter]): parameters to apply optimizer state to + optimizer_kwargs: (Dict[str, Any]): kwargs to pass to optimizer constructor + register_hook: (bool): whether to register a hook that runs the optimizer + after gradient for this parameter is accumulated. This is the default + way that optimizer in backward is implemented, but specific use cases + (such as DDP) may wish to override this to implement custom behavior. + (Default = True) + + Example:: + params_generator = model.parameters() + param_1 = next(params_generator) + remainder_params = list(params_generator) + + apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02}) + apply_optimizer_in_backward(torch.optim.Adam, remainder_params, {"lr": .04}) + + model(...).sum().backward() # after backward, parameters will already + # have their registered optimizer(s) applied. + + """ + torch._C._log_api_usage_once( + "torch.distributed.optim.apply_optimizer_in_backward" + ) + + @no_type_check + def _apply_optimizer_in_backward_to_param(param: torch.nn.Parameter) -> None: + # view_as creates a node in autograd graph that allows us access to the + # parameter's AccumulateGrad autograd function object. We register a + # hook on this object to fire the optimizer when the gradient for + # this parameter is ready (has been accumulated into .grad field) + + # Don't create a new acc_grad if we already have one + # i.e. for shared parameters or attaching multiple optimizers to a param. + if param not in param_to_acc_grad_map: + param_to_acc_grad_map[param] = param.view_as(param).grad_fn.next_functions[0][0] + + optimizer = optimizer_class([param], **optimizer_kwargs) + + if not hasattr(param, "_in_backward_optimizers"): + param._in_backward_optimizers = [] # type: ignore[attr-defined] + # TODO: Remove these attributes once we have a better way of accessing + # optimizer classes and kwargs for a parameter. + param._optimizer_classes = [] # type: ignore[attr-defined] + param._optimizer_kwargs = [] # type: ignore[attr-defined] + + param._in_backward_optimizers.append(optimizer) # type: ignore[attr-defined] + param._optimizer_classes.append(optimizer_class) # type: ignore[attr-defined] + param._optimizer_kwargs.append(optimizer_kwargs) # type: ignore[attr-defined] + + if not register_hook: + return + + def optimizer_hook(*_unused) -> None: + for opt in param._in_backward_optimizers: # type: ignore[attr-defined] + opt.step() + + param.grad = None + + handle = param_to_acc_grad_map[param].register_hook(optimizer_hook) # type: ignore[attr-defined] + if param not in param_to_optim_hook_handle_map: + param_to_optim_hook_handle_map[param] = [] + param_to_optim_hook_handle_map[param].append(handle) + + for param in params: + _apply_optimizer_in_backward_to_param(param) + + +def _get_in_backward_optimizers(module: torch.nn.Module) -> List[torch.optim.Optimizer]: + """ + Return a list of in-backward optimizers applied to ``module``'s parameters. Note that these + optimizers are not intended to directly have their ``step`` or ``zero_grad`` methods called + by the user and are intended to be used for things like checkpointing. + + Args: + module: (torch.nn.Module): model to retrieve in-backward optimizers for + + Returns: + List[torch.optim.Optimizer]: the in-backward optimizers. + + Example:: + _apply_optimizer_in_backward(torch.optim.SGD, model.parameters(), {'lr': 0.01}) + optims = _get_optimizers_in_backward(model) + """ + optims: List[torch.optim.Optimizer] = [] + for param in module.parameters(): + optims.extend(getattr(param, "_in_backward_optimizers", [])) + + return optims diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..5335df17e089041d942fdf7462f7c38bdfc7fc5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py @@ -0,0 +1,196 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adam Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdam: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0.0, + amsgrad: bool = False, + maximize: bool = False, + foreach: bool = False, + fused: bool = False, + _allow_empty_param_list: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + self.defaults = { + "lr": lr, + "eps": eps, + "beta1": betas[0], + "beta2": betas[1], + "weight_decay": weight_decay, + } + self.amsgrad = amsgrad + self.maximize = maximize + self.foreach = foreach + self.fused = fused + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step_param(self, param: Tensor, grad: Optional[Tensor]): + """ + Similar to step, but operates on a single parameter and optionally a + gradient tensor. + """ + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + has_complex = torch.is_complex(param) + if grad is not None: + params_with_grad.append(param) + grads.append(grad) + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + with torch.no_grad(): + F.adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + has_complex=has_complex, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + has_complex = False + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + has_complex=has_complex, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py new file mode 100644 index 0000000000000000000000000000000000000000..f3acd4d271ef3e044956818c942fba5d21ddc50e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py @@ -0,0 +1,117 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adamax Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdamax: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0.0, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + self.defaults = { + "lr": lr, + "eps": eps, + "beta1": betas[0], + "beta2": betas[1], + "weight_decay": weight_decay, + } + self.foreach = foreach + self.maximize = maximize + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_infs = [] + state_steps: List[Tensor] = [] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_inf"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_infs.append(state["exp_inf"]) + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adamax( + params_with_grad, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=self.defaults["eps"], + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..4324760df8d533f6f38e89b9183983b7742133c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py @@ -0,0 +1,122 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional RMSprop Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalRMSprop: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + alpha: float = 0.99, + eps: float = 1e-8, + weight_decay: float = 0.0, + momentum: float = 0.0, + centered: bool = False, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "alpha": alpha, + "eps": eps, + "weight_decay": weight_decay, + "momentum": momentum, + } + self.centered = centered + self.foreach = foreach + self.maximize = maximize + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + square_avgs = [] + grad_avgs = [] + momentum_buffer_list = [] + lr = self.defaults["lr"] + alpha = self.defaults["alpha"] + eps = self.defaults["eps"] + momentum = self.defaults["momentum"] + weight_decay = self.defaults["weight_decay"] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(params, gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["square_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if momentum > 0: + state["momentum_buffer"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.centered: + state["grad_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + square_avgs.append(state["square_avg"]) + if momentum > 0: + momentum_buffer_list.append(state["momentum_buffer"]) + if self.centered: + grad_avgs.append(state["grad_avg"]) + + state["step"] += 1 + + with torch.no_grad(): + F.rmsprop( + params_with_grad, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + lr=lr, + alpha=alpha, + eps=eps, + weight_decay=weight_decay, + momentum=momentum, + centered=self.centered, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac6487b0acb3747e63b0ca1a9168d130a809531 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py @@ -0,0 +1,100 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Rprop Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalRprop: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + etas: Tuple[float, float] = (0.5, 1.2), + step_sizes: Tuple[float, float] = (1e-6, 50), + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + } + self.etas = etas + self.step_sizes = step_sizes + self.foreach = foreach + self.maximize = maximize + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + prevs = [] + step_sizes = [] + lr = self.defaults["lr"] + etaminus, etaplus = self.etas + step_size_min, step_size_max = self.step_sizes + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(params, gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["prev"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + state["step_size"] = torch.full_like(gradient, lr) + + state = self.state[param] + prevs.append(state["prev"]) + step_sizes.append(state["step_size"]) + + state["step"] += 1 + + with torch.no_grad(): + F.rprop( + params_with_grad, + grads, + prevs, + step_sizes, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..4a807a605571979fd50d96a70a3b25f1ee507a99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py @@ -0,0 +1,160 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional SGD Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalSGD: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + momentum: float = 0.0, + dampening: float = 0.0, + weight_decay: float = 0.0, + nesterov: bool = False, + maximize: bool = False, + foreach: bool = False, + fused: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "momentum": momentum, + "dampening": dampening, + "weight_decay": weight_decay, + } + self.nesterov = nesterov + self.maximize = maximize + self.foreach = foreach + self.fused = fused + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step_param(self, param: Tensor, grad: Optional[Tensor]): + """Similar to self.step, but operates on a single parameter and + its gradient. + """ + # TODO: Once step_param interface is robust, refactor step to call + # step param on each param. + weight_decay = self.defaults["weight_decay"] + momentum = self.defaults["momentum"] + dampening = self.defaults["dampening"] + lr = self.defaults["lr"] + params = [param] + momentum_buffer_list: List[Optional[Tensor]] = [] + grads = [] + + has_sparse_grad = False + if grad is not None: + grads.append(grad) + if grad.is_sparse: + has_sparse_grad = True + if param not in self.state: + self.state[param] = {} + state = self.state[param] + if "momentum_buffer" not in state: + momentum_buffer_list.append(None) + else: + momentum_buffer_list.append(state["momentum_buffer"]) + + with torch.no_grad(): + F.sgd( + params, + grads, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=self.nesterov, + maximize=self.maximize, + has_sparse_grad=has_sparse_grad, + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + # update momentum_buffer in state + state = self.state[param] + momentum_buffer = momentum_buffer_list[0] + if momentum_buffer is not None: + state["momentum_buffer"] = momentum_buffer + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + momentum_buffer_list: List[Optional[Tensor]] = [] + lr = self.defaults["lr"] + weight_decay = self.defaults["weight_decay"] + momentum = self.defaults["momentum"] + dampening = self.defaults["dampening"] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_sparse_grad = False + for param, gradient in zip(params, gradients): + if gradient is not None: + params_with_grad.append(param) + grads.append(gradient) + if gradient.is_sparse: + has_sparse_grad = True + + if param not in self.state: + self.state[param] = {} + + state = self.state[param] + if "momentum_buffer" not in state: + momentum_buffer_list.append(None) + else: + momentum_buffer_list.append(state["momentum_buffer"]) + + with torch.no_grad(): + F.sgd( + params_with_grad, + grads, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=self.nesterov, + maximize=self.maximize, + has_sparse_grad=has_sparse_grad, + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + + # update momentum_buffers in state + for i, p in enumerate(params_with_grad): + state = self.state[p] + momentum_buffer = momentum_buffer_list[i] + if momentum_buffer is not None: + state["momentum_buffer"] = momentum_buffer diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..28edbe39d80e62bffd0773b3ad7a6e9776f61717 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py @@ -0,0 +1,331 @@ +import logging +import warnings + +from copy import deepcopy +from typing import Any, Callable, Collection, Dict, List, Mapping, Optional, Union, overload + +import torch +import torch.nn as nn +from torch import optim +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + + +__all__: List[str] = [] + +logger = logging.getLogger(__name__) + + +class _NamedOptimizer(optim.Optimizer): + """ + ``_NamedOptimizer`` takes a dict of parameters and exposes ``state_dict`` by parameter key. + + We replace the original key (number) in an optim to the + fully qualified name (FQN) string. User can initialize the optim as they + initialize a PyTorch optim, the only difference is that they also need to + pass in the FQN of each parameters. + + Args: + named_parameters (Mapping[str, Union[torch.Tensor, ShardedTensor]]): + Mapping from FQN to parameter. + optimizer_class (optim.Optimizer): + The class of optimizer to instantiate. + param_groups (Collection[Mapping[str, Any]]): + `param_groups` to pass to optimizer if specified. + The key of the inner map needs to be FQNs. + Default: None + module (nn.Module): the module whose parameters to updated + by the optimizer. + args: arguments to pass to the optimizer constructor. + kwargs: arguments to pass to the optimizer constructor. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch import optim + >>> from torch.distributed.optim import _NamedOptimizer + >>> + >>> # Define the named optimizer. + >>> m = Model(...) + >>> named_optim = _NamedOptimizer(m.named_parameters(), optim.SGD) + >>> # Forward pass + backward pass. + >>> named_optim.step() + >>> ... + >>> # Call state_dict for the named optimizer returns a FQN state_dict. + >>> named_optim.state_dict() + + Warning: This API is still in development and subject to change. + + TODO: Add tutorial for _NamedOptimizer. + TODO: Add documentation in the docstring for the public attributes + like self.param_groups and self.named_parameters. + """ + + def __init__( + self, + named_parameters: Mapping[str, Union[torch.Tensor, ShardedTensor]], + optimizer_class: optim.Optimizer, + param_groups: Optional[Collection[Mapping[str, Any]]] = None, + module: Optional[nn.Module] = None, + *args, + **kwargs, + ) -> None: + torch._C._log_api_usage_once("torch.distributed.optim._NamedOptimizer") + self.param_groups: Collection[Mapping[str, Any]] = param_groups # type: ignore[assignment] + self._param_groups_check() + self.named_parameters = dict(named_parameters) + params_for_optimizer = ( + self.named_parameters.values() if param_groups is None else param_groups + ) + self._optimizer = optimizer_class( # type: ignore[operator] + params_for_optimizer, + *args, + **kwargs, + ) + self.module = module + if param_groups is None: + self.ordered_param_keys = list(self.named_parameters.keys()) + else: + warnings.warn( + "Since we pass in param_groups, we will use param_groups to " + "initialize the optimizer, not all parameters of the module." + ) + param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type] + ordered_param_keys = [] + for group in param_groups: + for param in group["params"]: + if param not in param_to_key: + raise ValueError( + f"Expect param name {param} found in param group but is missing." + ) + ordered_param_keys.append(param_to_key[param]) + self.ordered_param_keys = ordered_param_keys + # Update param_groups from optimizer. + self.param_groups = self._optimizer.param_groups + + def _param_groups_check(self): + if self.param_groups is not None: + for param_group in self.param_groups: + assert isinstance(param_group, dict), "param group must be a dict" + assert "params" in param_group, "param group must contain key params" + params = param_group["params"] + if isinstance(params, torch.Tensor): + params = [params] + params = list(params) + for param in params: + if not isinstance(param, torch.Tensor): + raise TypeError( + "optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param) + ) + param_group["params"] = params + + def state_dict(self) -> Dict[str, Any]: + """ + Return the ``state_dict`` of the optimizer. + + Instead of using number to index + parameters, we will use module fully qualified name (FQN) as the key. + """ + state_dict = self._optimizer.state_dict() + param_groups = state_dict["param_groups"] + + ret_state = { + self.ordered_param_keys[st_key]: state_val + for st_key, state_val in state_dict["state"].items() + } + + ret_groups = [] + for group in param_groups: + param_keys = [] + for param in group["params"]: + param_keys.append(self.ordered_param_keys[param]) + ret_group = {"params": sorted(param_keys)} + for k, v in group.items(): + if k != "params": + ret_group[k] = deepcopy(v) + ret_groups.append(ret_group) + + return self._post_state_dict({"state": ret_state, "param_groups": ret_groups}) + + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """ + Perform a single optimization step. + + This will call :meth:`torch.optim.Optimizer.step` on the wrapped + optimizer. + """ + return self._optimizer.step(closure=closure) + + @property + def state(self) -> Mapping[torch.Tensor, Any]: # type: ignore[override] + return self._optimizer.state + + def load_state_dict(self, state_dict: Mapping[str, Any]) -> None: + """ + Define the default behavior to load a state_dict for ``_NamedOptimizer``. + + Sample Code + ``` + my_model = MyModule() + optimizer = _NamedOptimizer(my_model.named_parameters(), Adagrad) + ... + + optim_state_dict = optimizer.state_dict() + ... + ... + + optimizer.load_state_dict(optim_state_dict) + ... + ``` + Args: + state_dict (Dict[str, Any]) : A ``state_dict`` to load into the optimizer. + Note that this state dict update is performed in place. + + .. note:: PyTorch is using lazy init to initialize the optim states. + So it is possible that there is no optim state when user call + ``load_state_dict`` and for ``_NamedOptimizer`` we make it stricter + that users can only call ``load_state_dict`` after the state is initialized. + By doing this, we can validate the optim ``state_dict`` to be loaded. + """ + new_state_dict = self._optimizer.state_dict() + state_dict = self._pre_load_state_dict(state_dict) + state = state_dict["state"] + new_state = new_state_dict["state"] + if len(new_state) == 0: + raise ValueError( + "Expects the optim to be initialized before load but found not initialized." + ) + + for idx, param_key in enumerate(self.ordered_param_keys): + # When the conditional training is performed, not all parameters are updated in the optim. + if param_key not in state.keys(): + continue + if len(state[param_key]) != len(new_state[idx]): + raise ValueError( + f"Expects equal length as {len(new_state[idx])} for parameter {param_key} but found: {len(state[param_key])}" + ) + # Iterate through all optimizer states. + for state_key, state_val in new_state[idx].items(): + if state_key not in state[param_key]: + raise ValueError( + f"Expects state {state_key} for parameter {param_key} but not found." + ) + + src_state_val = state[param_key][state_key] + if isinstance(state_val, ShardedTensor): + assert isinstance(src_state_val, ShardedTensor) + num_shards = len(state_val.local_shards()) + num_new_shards = len(src_state_val.local_shards()) + if num_shards != num_new_shards: + raise ValueError( + f"Expects equal number of shards as {num_new_shards} but found {num_shards} for {param_key}/{state_key}" + ) + for shard, src_shard in zip( + state_val.local_shards(), src_state_val.local_shards() + ): + shard.tensor.detach().copy_(src_shard.tensor) + elif isinstance(state_val, torch.Tensor): + assert isinstance(src_state_val, torch.Tensor) + state_val.detach().copy_(src_state_val) + else: + new_state[idx][state_key] = deepcopy(src_state_val) + + # Load param_groups of state_dict + src_param_groups = state_dict["param_groups"] + new_param_groups = new_state_dict["param_groups"] + + src_group_map = {} + for group in src_param_groups: + param_keys = list(group["params"]) + src_group_map[_gen_param_group_key(param_keys)] = group + new_group_map = {} + for new_group in new_param_groups: + param_keys = [] + for param_key in new_group["params"]: + param_keys.append(self.ordered_param_keys[param_key]) # type: ignore[call-overload] + new_group_map[_gen_param_group_key(param_keys)] = new_group + for group_key, new_group in new_group_map.items(): + # When not all parameters are used in training or receive gradient, aka., not all parameters + # would be in the param_group. Thus we skip the group_key here. + if group_key not in src_group_map: + continue + src_group = src_group_map[group_key] + if len(src_group) != len(new_group): + raise ValueError( + f"Expects equal param_group size as {len(new_group)} for group {group_key} but found {len(src_group)}." + ) + for k in src_group: + if k not in new_group: + raise ValueError( + f"Expects group key {k} to be in group {group_key} in `state_dict` but is missing." + ) + if k != "params": + new_group[k] = deepcopy(src_group[k]) + + self._optimizer.load_state_dict(new_state_dict) + + def add_param_group(self, param_group: Mapping[str, Any]) -> None: + """ + Add a param group to the :class:`_NamedOptimizer` s `param_groups`. + + Warning: This API is still in development and subject to change. + """ + assert isinstance(param_group, dict), "param group must be a dict" + + params = param_group["params"] + if isinstance(params, torch.Tensor): + param_group["params"] = [params] + else: + param_group["params"] = list(params) + + param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type] + for param in param_group["params"]: + if param not in param_to_key: + raise ValueError("some parameters are not in the module") + self.ordered_param_keys.append(param_to_key[param]) + + self._optimizer.add_param_group(param_group) + # Update param_groups from optimizer. + self.param_groups = self._optimizer.param_groups + + def init_state(self) -> None: + """ + Run a dummy optimizer step, which allows to initialize optimizer state because we do lazy init for most optimizers. + + This allows doing in-place loading of optimizer state from a checkpoint. + """ + for param in self.named_parameters.values(): + if param.requires_grad: + t = torch.zeros_like(param) + param.grad = torch.autograd.Variable(t) + # Calling ``step`` will load the initial state for optimizer states. + self.step(closure=None) + + def _pre_load_state_dict(self, state_dict) -> Dict[str, Any]: + # TODO(chienchin): This API should be FSDP agnostic and should support + # general user hooks. + if isinstance(self.module, FSDP): + return FSDP.optim_state_dict_to_load( + self.module, self._optimizer, state_dict, is_named_optimizer=True + ) + return state_dict + + def _post_state_dict(self, state_dict) -> Dict[str, Any]: + # TODO(chienchin): This API should be FSDP agnostic and should support + # general user hooks. + if isinstance(self.module, FSDP): + FSDP.optim_state_dict(self.module, self._optimizer, state_dict) + return state_dict + + +def _gen_param_group_key(param_keys: List[str]) -> str: + """Concatenate all param keys as a unique indentifier for one param group.""" + return "/".join(sorted(param_keys)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8246c667509d9e40c99ca89ddd26aec735254763 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py @@ -0,0 +1,254 @@ +import logging + +from collections import defaultdict +from threading import Lock +from typing import List, Optional + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.jit as jit +import torch.nn as nn +from torch import Tensor +from torch.distributed.rpc import RRef +from .utils import functional_optim_map + +__all__ = ["DistributedOptimizer"] + +logger = logging.getLogger(__name__) + + +# XXX: we define a _ScriptModuleOptimizer here to explicitly +# compile the FunctionalOptimizer class into TorchScript +# This is because ScriptClass instance still lives in +# python unless you explicitly compile it as an attribute +# in ScriptModule or pass it to a ScriptFunction +# _ScriptLocalOptimizerInterface serves as a common +# interface type for Optimizer ScriptModules. +# +# TODO (wanchaol): remove this once we added TorchScript +# class reference semantics +@jit.interface +class _ScriptLocalOptimizerInterface: + def step(self, autograd_ctx_id: int) -> None: + pass + + +class _ScriptLocalOptimizer(nn.Module): + # TorchScript does not support multithread concurrent compiling. + # request_callback might invoke concurrent compiling, so we + # serialize the compiling with a lock + compile_lock = Lock() + + def __init__(self, optim_cls, local_params_rref, *args, **kwargs): + super().__init__() + self._local_params = [rref.local_value() for rref in local_params_rref] + self.optim = optim_cls(self._local_params, *args, **kwargs) + + @jit.export + def step(self, autograd_ctx_id: int): + all_local_grads = dist_autograd.get_gradients(autograd_ctx_id) + # apply functional optimizer step with a list of gradients + grads: List[Optional[Tensor]] = [ + all_local_grads[p] if p in all_local_grads else None + for p in self._local_params + ] + + self.optim.step(grads) + + +# TODO (wanchaol): remove/merge this with ScriptLocalOptimizer once +# we have converted all to functional optimizer in distributed.optim +class _LocalOptimizer: + # Ideally we would only need to share a lock for instances of + # _LocalOptimizer that deal with the same parameters. We are + # making a simplifying assumption here that if there is more + # than one instance of _LocalOptimizer per worker, they will + # be optimizing the same parameters (e.g. each data parallel + # trainer will create its own instance of _LocalOptimizer but + # they will all optimize the same parameters on each worker) + global_lock = Lock() + + def __init__(self, optim_cls, local_params_rref, *args, **kwargs): + self._local_params = [rref.local_value() for rref in local_params_rref] + self.optim = optim_cls(self._local_params, *args, **kwargs) + + def step(self, autograd_ctx_id): + all_local_grads = dist_autograd.get_gradients(autograd_ctx_id) + + with _LocalOptimizer.global_lock: + for param, grad in all_local_grads.items(): + param.grad = grad + self.optim.step() + + +def _new_local_optimizer(optim_cls, local_params_rref, *args, **kwargs): + return rpc.RRef(_LocalOptimizer(optim_cls, local_params_rref, *args, **kwargs)) + + +def _local_optimizer_step(local_optim_rref, autograd_ctx_id): + local_optim = local_optim_rref.local_value() + local_optim.step(autograd_ctx_id) + + +# new/step functions combined with _ScriptLocalOptimizer to provide GIL-free optimizer +def _new_script_local_optimizer(optim_cls, local_params_rref, *args, **kwargs): + optim = _ScriptLocalOptimizer(optim_cls, local_params_rref, *args, **kwargs) + + with _ScriptLocalOptimizer.compile_lock: + script_optim = jit.script(optim) + return rpc.RRef(script_optim, _ScriptLocalOptimizerInterface) + + +@jit.script +def _script_local_optimizer_step( + local_optim_rref: RRef[_ScriptLocalOptimizerInterface], autograd_ctx_id: int +) -> None: + local_optim = local_optim_rref.local_value() + local_optim.step(autograd_ctx_id) + + +def _wait_for_all(rpc_futs): + # TODO: improve error propagation + exception = None + results = [] + for fut in rpc_futs: + try: + results.append(fut.wait()) + except Exception as e: + results.append(e) + exception = e + if exception is not None: + raise exception + return results + + +class DistributedOptimizer: + """ + DistributedOptimizer takes remote references to parameters scattered + across workers and applies the given optimizer locally for each parameter. + + This class uses :meth:`~torch.distributed.autograd.get_gradients` in order + to retrieve the gradients for specific parameters. + + Concurrent calls to + :meth:`~torch.distributed.optim.DistributedOptimizer.step`, + either from the same or different clients, will + be serialized on each worker -- as each worker's optimizer can only work + on one set of gradients at a time. However, there is no guarantee that + the full forward-backward-optimizer sequence will execute for one client + at a time. This means that the gradients being applied may not correspond + to the latest forward pass executed on a given worker. Also, there is no + guaranteed ordering across workers. + + `DistributedOptimizer` creates the local optimizer with TorchScript enabled + by default, so that optimizer updates are not blocked by the Python Global + Interpreter Lock (GIL) in the case of multithreaded training (e.g. Distributed + Model Parallel). This feature is currently enabled for most optimizers. You + can also follow `the recipe`__ in PyTorch tutorials to enable TorchScript support + for your own custom optimizers. + + Args: + optimizer_class (optim.Optimizer): the class of optimizer to + instantiate on each worker. + params_rref (list[RRef]): list of RRefs to local or remote parameters + to optimize. + args: arguments to pass to the optimizer constructor on each worker. + kwargs: arguments to pass to the optimizer constructor on each worker. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> import torch.distributed.autograd as dist_autograd + >>> import torch.distributed.rpc as rpc + >>> from torch import optim + >>> from torch.distributed.optim import DistributedOptimizer + >>> + >>> with dist_autograd.context() as context_id: + >>> # Forward pass. + >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) + >>> loss = rref1.to_here() + rref2.to_here() + >>> + >>> # Backward pass. + >>> dist_autograd.backward(context_id, [loss.sum()]) + >>> + >>> # Optimizer. + >>> dist_optim = DistributedOptimizer( + >>> optim.SGD, + >>> [rref1, rref2], + >>> lr=0.05, + >>> ) + >>> dist_optim.step(context_id) + + __ https://github.com/pytorch/tutorials/pull/1465 + """ + + def __init__(self, optimizer_class, params_rref, *args, **kwargs): + torch._C._log_api_usage_once("torch.distributed.optim.DistributedOptimizer") + per_worker_params_rref = defaultdict(list) + for param in params_rref: + per_worker_params_rref[param.owner()].append(param) + + if optimizer_class in functional_optim_map and jit._state._enabled: + optim_ctor = functional_optim_map.get(optimizer_class) + else: + optim_ctor = optimizer_class + self.is_functional_optim = optim_ctor != optimizer_class + + if self.is_functional_optim: + optimizer_new_func = _new_script_local_optimizer + else: + logger.warning( + "Creating the optimizer %s without TorchScript support, " + "this might result in slow computation time in multithreading environment" + "(i.e. Distributed Model Parallel training on CPU) due to the Python's " + "Global Interpreter Lock (GIL). Please file an issue if you need this " + "optimizer in TorchScript. ", + optimizer_class + ) + optimizer_new_func = _new_local_optimizer + + remote_optim_futs = [] + for worker, param_rrefs in per_worker_params_rref.items(): + remote_optim_rref_fut = rpc.rpc_async( + worker, + optimizer_new_func, + args=(optim_ctor, param_rrefs) + args, + kwargs=kwargs, + ) + remote_optim_futs.append(remote_optim_rref_fut) + + self.remote_optimizers = _wait_for_all(remote_optim_futs) + + def step(self, context_id): + """ + Performs a single optimization step. + + This will call :meth:`torch.optim.Optimizer.step` on each worker + containing parameters to be optimized, and will block until all workers + return. The provided ``context_id`` will be used to retrieve the + corresponding :class:`~torch.distributed.autograd.context` that + contains the gradients that should be applied to the parameters. + + Args: + context_id: the autograd context id for which we should run the + optimizer step. + """ + dist_autograd._is_valid_context(context_id) + + optimizer_step_func = ( + _script_local_optimizer_step + if self.is_functional_optim + else _local_optimizer_step + ) + + rpc_futs = [] + for optimizer in self.remote_optimizers: + rpc_futs.append( + rpc.rpc_async( + optimizer.owner(), + optimizer_step_func, + args=(optimizer, context_id), + ) + ) + _wait_for_all(rpc_futs) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f1717685966ad94dbdf9e0ac084f755d11ceca24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py @@ -0,0 +1,109 @@ +import warnings + +import torch +import torch.distributed.algorithms.model_averaging.averagers as averagers + + +class PostLocalSGDOptimizer(torch.optim.Optimizer): + r""" + Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD `_, + This optimizer runs local optimizer at every step. + After the warm-up stage, it averages parameters periodically afer the local optimizer is applied. + + Args: + optim: The local optimizer. + averager: A model averager instance to run post-localSGD algorithm. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch + >>> import torch.distributed as dist + >>> import torch.distributed.algorithms.model_averaging.averagers as averagers + >>> import torch.nn as nn + >>> from torch.distributed.optim import PostLocalSGDOptimizer + >>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import ( + >>> PostLocalSGDState, + >>> post_localSGD_hook, + >>> ) + >>> + >>> model = nn.parallel.DistributedDataParallel( + >>> module, device_ids=[rank], output_device=rank + >>> ) + >>> + >>> # Register a post-localSGD communication hook. + >>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100) + >>> model.register_comm_hook(state, post_localSGD_hook) + >>> + >>> # Create a post-localSGD optimizer that wraps a local optimizer. + >>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as + >>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``. + >>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01) + >>> opt = PostLocalSGDOptimizer( + >>> optim=local_optim, + >>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100) + >>> ) + >>> + >>> # In the first 100 steps, DDP runs global gradient averaging at every step. + >>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default), + >>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer. + >>> for step in range(0, 200): + >>> opt.zero_grad() + >>> loss = loss_fn(output, labels) + >>> loss.backward() + >>> opt.step() + """ + + def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager): + self.optim = optim + self.param_groups = self.optim.param_groups + self.averager = averager + + @property + def state(self): + return self.optim.state + + def __repr__(self): + return self.optim.__repr__() + + def state_dict(self): + r""" + This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`, + but adds an extra entry to record model averager's step to the checkpoint + to ensure reload does not cause unnecessary warm up again. + """ + optim_state_dict = self.optim.state_dict() + optim_state_dict["step"] = self.averager.step + return optim_state_dict + + def load_state_dict(self, state_dict): + r""" + This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`, + but also restores model averager's step value to the one + saved in the provided ``state_dict``. + + If there is no ``"step"`` entry in ``state_dict``, + it will raise a warning and initialize the model averager's step to 0. + """ + self.optim.load_state_dict(state_dict) + if "step" in state_dict: + self.averager.step = state_dict["step"] + else: + warnings.warn( + "Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0." + ) + self.averager.step = 0 + + def step(self): + r""" + Performs a single optimization step (parameter update). + """ + self.optim.step() + self.averager.average_parameters(params=self.param_groups) + + def zero_grad(self, set_to_none: bool = True): # type: ignore[override] + self.optim.zero_grad(set_to_none=set_to_none) + + def add_param_group(self, param_group): + self.optim.add_param_group(param_group) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py b/venv/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3be3b0181536e32da5c172aac5e6c0de906e3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py @@ -0,0 +1,1651 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +r"""Zero Redundancy Optimizer.""" +import collections +import copy +import enum +import inspect +import io +import logging +from itertools import chain +from typing import Any, Callable, Dict, List, Optional, Set, Type, Union + +import torch +import torch.distributed as dist +from torch.distributed.algorithms.join import Join, Joinable, JoinHook +from torch.distributed.optim.utils import functional_optim_map +from torch.optim import Optimizer + + +logger = logging.getLogger(__name__) + +__all__ = ["ZeroRedundancyOptimizer"] + + +# Credits: classy_vision/generic/distributed_util.py +def _recursive_copy_to_device( + value: Any, + non_blocking: bool, + device: torch.device, +) -> Any: + r""" + Recursively searches lists, tuples, dicts and copies tensors to device if possible. + + Non-tensor values are passed as-is in the result. + + .. note: These are all copies, so if there are two objects that reference + the same object, then after this call, there will be two different objects + referenced on the device. + """ + if isinstance(value, torch.Tensor): + return value.to(device, non_blocking=non_blocking) + + if isinstance(value, (list, tuple)): + values = [ + _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) + for val in value + ] + return values if isinstance(value, list) else tuple(values) + + if isinstance(value, collections.abc.Mapping): + return { + key: _recursive_copy_to_device( + val, non_blocking=non_blocking, device=device + ) + for key, val in value.items() + } + + return value + + +def _is_trainable(param: torch.Tensor) -> bool: + r"""Return if a parameter is trainable, where trainability is equivalent to requiring a gradient.""" + return param.requires_grad + + +def _broadcast_object( + obj: Any, + src_rank: int, + group: object = dist.group.WORLD, + device: torch.device = torch.device("cpu"), +) -> Any: + r""" + Broadcasts an object to the given group. + + It will be sending the object if called from the source rank and receiving + the object otherwise. + + Arguments: + obj: object to broadcast; only used if called on the source rank. + src_rank (int): source rank. + group (``ProcessGroup``, optional): group used for the broadcast + (default: ``dist.group.WORLD``). + device (``torch.device``, optional): device to send from or receive + to (default: ``torch.device("cpu")``). + + Returns: + The broadcasted object. + """ + if dist.get_rank() == src_rank: + # Send the object + buffer = io.BytesIO() + torch.save(obj, buffer) + data = bytearray(buffer.getbuffer()) + length_tensor = torch.LongTensor([len(data)]).to(device) + data_send_tensor = torch.ByteTensor(data).to(device) + dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) + dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False) + else: + # Receive the object + length_tensor = torch.LongTensor([0]).to(device) + dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) + data_recv_tensor = torch.empty( + [int(length_tensor.item())], dtype=torch.uint8, device=device + ) + dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False) + buffer = io.BytesIO(data_recv_tensor.cpu().numpy()) + obj = torch.load(buffer, map_location=device) + return obj + + +class _ZeROJoinHook(JoinHook): + def __init__(self, zero): + assert isinstance(zero, ZeroRedundancyOptimizer), ( + "ZeRO join hook requires passing in a ZeroRedundancyOptimizer " + "instance as the state" + ) + self.zero = zero + super().__init__() + + def main_hook(self): + """ + Perform an optimizer step. + + This step updates the joined process's shard of + the parameters and broadcasts those parameters. + """ + self.zero.step() + + +class _DDPBucketAssignment: + r""" + Represent a :class:`DistributedDataParallel` bucket assignment. + + This means that a (possibly non-strict) subset of the parameters corresponding to + a DDP bucket assigned to a rank to update. + + Attributes: + bucket_index (int): index of the bucket determined by the DDP gradient + bucket all-reduce order. + parameters (List[torch.Tensor]): model parameters in the bucket + assigned to this rank. + offset (int): offset into the :class:`GradBucket` 's :meth:`parameters` + giving the index of the first element in the passed-in + ``parameters``; this equivalently indexes into the + :class:`GradBucket` 's :meth:`gradients`. + device (torch.device): device on which the parameters are stored. + tensor (torch.Tensor): flattened tensor giving the data of the + parameter subset assigned to the rank. + """ + + def __init__( + self, + bucket_index: int, + parameters: List[torch.Tensor], + offset: int, + ): + self.bucket_index = bucket_index + self.parameters = parameters + self.offset = offset + if len(self.parameters) == 0: + raise ValueError("Empty bucket assignment") + # DDP guarantees all parameters in the bucket have the same device + self.device: torch.device = self.parameters[0].device + self.tensor: Optional[torch.Tensor] = None + + +class _OverlapStatus(enum.IntEnum): + r""" + Define possible statuses that :class:`ZeroRedundancyOptimizer` can be in when overlapping with :class:`DistributedDataParallel`. + + Attributes: + ``UNINITIALIZED``: The ZeRO instance is effectively uninitialized and + is waiting for DDP to finalize its bucketing. + ``DDP_HAS_REBUILT_BUCKETS``: DDP has rebuilt its buckets, meaning that + its bucketing is finalized. The ZeRO instance can now collect the + necessary information about the DDP bucketing. + ``INITIALIZED``: The ZeRO instance is fully initialized and can now + optimize parameters. + """ + + UNINITIALIZED = 0 + DDP_HAS_REBUILT_BUCKETS = 1 + INITIALIZED = 2 + + +class _OverlapInfo: + r""" + Information needed by :class:`ZeroRedundancyOptimizer` to overlap with :class:`DistributedDataParallel`. + + Arguments: + world_size (int): world size of the process group being used. + + Attributes: + shard_buckets (bool): if ``True``, then the assignment of each + :class:`DistributedDataParallel` bucket is partitioned across + possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e. + across possibly multiple ranks) to approximate uniformity following + a threshold given by the total parameter size divided by the world + size; if ``False``, then each bucket is wholly assigned to a single + :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank); + this should be set to the value passed into the hook constructor. + status (_OverlapStatus): current status; see :class:`_OverlapStatus` + for more information. + params_per_bucket (List[List[torch.Tensor]]): ``params_per_bucket[i]`` + gives the model parameters in the ``i``th bucket. + params_per_rank (List[List[torch.Tensor]]): ``params_per_rank[i]`` + gives the model parameters assigned to the ``i``th rank, where the + parameters are grouped by increasing bucket indices. + offsets (Dict[int, int]): maps from bucket index to the offset in + ``self.params_per_rank[rank]`` giving the index of the first + parameter in that bucket, where ``rank`` is this process's own + rank; the keys of this :class:`dict` are the bucket indices + assigned to this rank. + num_bucket_assignments (int): total number of bucket assignments across + all ranks; this is equal to the number of + :class:`DistributedDataParallel` gradient buckets if + ``shard_buckets=False`` and possibly greater otherwise. + total_size (int, optional): total size of all buckets (i.e. sum of + ``param.numel()`` for all ``param`` across all buckets) if + ``shard_buckets=True``; otherwise, ``None``. + broadcast_handles (List[Work]): :class:`list` of async work handles for + the parameter broadcasts. + bucket_index_to_future (Dict[int, torch.futures.Future]): + :class:`dict` mapping bucket index to the corresponding all-reduce + future. + bucket_index_to_bucket (Dict[int, dist.GradBucket]): :class:`dict` + mapping bucket index to the corresponding bucket. + bucket_indices_seen (List[int]): :class:`list` of the bucket indices + seen on this iteration. + """ + + def __init__(self, world_size) -> None: + self.status: _OverlapStatus = _OverlapStatus.UNINITIALIZED + self.shard_buckets: bool = False + + # Modified per bucket reconstruction + self.params_per_bucket: List[List[torch.Tensor]] = [] + self.params_per_rank: List[List[torch.Tensor]] = [[] for _ in range(world_size)] + self.offsets: Dict[int, int] = {} + # Group Ranks + self.assigned_ranks_per_bucket: List[Set[int]] = [] + self.num_bucket_assignments: int = 0 + self.total_size: Optional[int] = None + + # Modified per iteration + self.broadcast_handles: List[Any] = [] + self.bucket_indices_seen: List[int] = [] + # Used by `hook_with_zero_step()` + self.bucket_index_to_future: Dict[int, torch.futures.Future] = {} + self.bucket_index_to_bucket: Dict[int, dist.GradBucket] = {} + + def wait_for_broadcasts(self) -> None: + r""" + Wait for all parameter broadcasts. + + This function should be called once all broadcasts have been scheduled, + meaning ``self.broadcast_handles`` is filled. This clears ``self.broadcast_handles`` + in preparation for the next iteration. + """ + assert ( + len(self.broadcast_handles) == self.num_bucket_assignments + ), f"Missing at least one broadcast handle on rank {dist.get_rank()}" + _ = [x.wait() for x in self.broadcast_handles] + self.broadcast_handles.clear() + + def clear_per_iter_info(self) -> None: + r""" + Clear the data structures that are modified per-iteration. + + This function should be called at the end of an iteration. + """ + self.bucket_indices_seen.clear() + self.bucket_index_to_future.clear() + self.bucket_index_to_bucket.clear() + + +class ZeroRedundancyOptimizer(Optimizer, Joinable): + r""" + Wrap an arbitrary :class:`optim.Optimizer ` and shards its states across ranks in the group. + + The sharing is done as described by ZeRO_. + + The local optimizer instance in each rank is only + responsible for updating approximately ``1 / world_size`` parameters and + hence only needs to keep ``1 / world_size`` optimizer states. After + parameters are updated locally, each rank will broadcast its parameters to + all other peers to keep all model replicas in the same state. + ``ZeroRedundancyOptimizer`` can be used in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel` to reduce per-rank peak + memory consumption. + + ``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number + of parameters at each rank. Each parameter belongs to a single rank and is + not divided among ranks. The partition is arbitrary and might not match the + the parameter registration or usage order. + + Arguments: + params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s + or :class:`dict` s giving all parameters, which will be sharded + across ranks. + + Keyword Args: + optimizer_class (:class:`torch.nn.Optimizer`): the class of the local + optimizer. + process_group (``ProcessGroup``, optional): ``torch.distributed`` + ``ProcessGroup`` (default: ``dist.group.WORLD`` initialized by + :meth:`torch.distributed.init_process_group`). + parameters_as_bucket_view (bool, optional): if ``True``, parameters are + packed into buckets to speed up communication, and ``param.data`` + fields point to bucket views at different offsets; if ``False``, + each individual parameter is communicated separately, and each + ``params.data`` stays intact (default: ``False``). + overlap_with_ddp (bool, optional): if ``True``, :meth:`step` is + overlapped with :class:`DistributedDataParallel` 's gradient + synchronization; this requires (1) either a functional optimizer + for the ``optimizer_class`` argument or one with a functional + equivalent and (2) registering a DDP communication hook + constructed from one of the functions in ``ddp_zero_hook.py``; + parameters are packed into buckets matching those in + :class:`DistributedDataParallel`, meaning that the + ``parameters_as_bucket_view`` argument is ignored. + If ``False``, :meth:`step` runs disjointly after the backward pass + (per normal). + (default: ``False``) + **defaults: any trailing arguments, which are forwarded to the local + optimizer. + + Example:: + + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> from torch.distributed.optim import ZeroRedundancyOptimizer + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> model = nn.Sequential(*[nn.Linear(2000, 2000).to(rank) for _ in range(20)]) + >>> ddp = DDP(model, device_ids=[rank]) + >>> opt = ZeroRedundancyOptimizer( + >>> ddp.parameters(), + >>> optimizer_class=torch.optim.Adam, + >>> lr=0.01 + >>> ) + >>> ddp(inputs).sum().backward() + >>> opt.step() + + .. warning:: + Currently, ``ZeroRedundancyOptimizer`` requires that all of the + passed-in parameters are the same dense type. + + .. warning:: + If you pass ``overlap_with_ddp=True``, be wary of the following: Given + the way that overlapping :class:`DistributedDataParallel` with + :class:`ZeroRedundancyOptimizer` is currently implemented, the first + two or three training iterations do not perform parameter updates in + the optimizer step, depending on if ``static_graph=False`` or + ``static_graph=True``, respectively. This is because it needs + information about the gradient bucketing strategy used by + :class:`DistributedDataParallel`, which is not finalized until the + second forward pass if ``static_graph=False`` or until the third + forward pass if ``static_graph=True``. To adjust for this, one option + is to prepend dummy inputs. + + .. warning:: ZeroRedundancyOptimizer is experimental and subject to change. + + .. _ZeRO: https://arxiv.org/abs/1910.02054 + + """ + + def __init__( + self, + params, + optimizer_class: Type[Optimizer], + process_group: Optional[Any] = None, + parameters_as_bucket_view: bool = False, + overlap_with_ddp: bool = False, + **defaults: Any, + ): + r"""Init.""" + # Perform type and assumption checks on the input parameters + params = self._verify_and_init_params(params) + self._verify_same_dense_param_type() + + # NOTE: The parent constructor uses `add_param_group()` which is + # partially overloaded in ZeroRedundancyOptimizer, so we use the + # `initialized` flag to dissociate the behaviour of `add_param_group()` + # between the parent and child. + self.initialized = False + + Optimizer.__init__(self, params, defaults) + Joinable.__init__(self) + # Now, all parameters are held in both `self._all_params` and + # `self.param_groups` + + # Internal data structures (`_cache` indicates lazily evaluated) + self._param_to_rank_cache: Dict[torch.Tensor, int] = {} + self._param_to_index_cache: Dict[torch.Tensor, int] = {} + self._partition_parameters_cache: List[List[Dict]] = [] + self._index_to_param_cache: List[torch.Tensor] = [] + self._device_to_params_per_rank_cache: Dict[ + torch.device, List[List[torch.Tensor]] + ] = {} + self._bucket_assignments_per_rank_cache: List[ + Dict[int, _DDPBucketAssignment] + ] = [] + self._is_trainable_mask = self._get_is_trainable_mask() + + # Default device for collective communication and buckets + self._default_device = self._all_params[0].device + + self.process_group = ( + process_group if process_group is not None else dist.group.WORLD + ) + self.world_size: int = dist.get_world_size(self.process_group) + self.rank: int = dist.get_rank(self.process_group) + self.global_rank: int = dist.distributed_c10d.get_global_rank( + self.process_group, self.rank + ) + + self._overlap_with_ddp: bool = overlap_with_ddp + self._optim_defaults = defaults + self._optim_constructor = self._get_optimizer_constructor(optimizer_class) + + # If `overlap_with_ddp=True`, local optimizer initialization is delayed + # to run time after the necessary information has been collected + if not overlap_with_ddp: + self._init_local_optimizer() + else: + self._overlap_info: _OverlapInfo = _OverlapInfo(self.world_size) + if parameters_as_bucket_view: + logger.warning( + "`parameters_as_bucket_view=True` will be ignored since " + "`overlap_with_ddp=True`; instead, a different bucketing " + "strategy will be used" + ) + + # `self._buckets` is used if `parameters_as_bucket_view=True`, in + # which case parameter data is flattened into contiguous bucket tensors + self.parameters_as_bucket_view = parameters_as_bucket_view + self._buckets: List[List[torch.Tensor]] = [] + self._build_param_buckets() + + # Optional consolidated optimizer state, only populated if this rank + # is the target in `consolidate_state_dict()` + self._all_state_dicts: List[Dict[str, Any]] = [] + + self.initialized = True + + def _clear_cache(self) -> None: + r"""Clear the cached data structures giving partition information.""" + self._partition_parameters_cache.clear() + self._param_to_rank_cache.clear() + self._index_to_param_cache.clear() + self._param_to_index_cache.clear() + self._device_to_params_per_rank_cache.clear() + self._bucket_assignments_per_rank_cache.clear() + + def add_param_group(self, param_group: Dict[str, Any]) -> None: + r""" + Add a parameter group to the :class:`Optimizer` 's ``param_groups``. + + This can be useful when fine tuning a pre-trained network, as frozen + layers can be made trainable and added to the :class:`Optimizer` as + training progresses. + + Arguments: + param_group (dict): specifies the parameters to be optimized and + group-specific optimization options. + + .. warning:: This method handles updating the shards on all partitions + but needs to be called on all ranks. Calling this on a subset of + the ranks will cause the training to hang because communication + primitives are called depending on the managed parameters and + expect all the ranks to participate on the same set of parameters. + """ + if self.initialized and self._overlap_with_ddp: + raise RuntimeError( + "ZeroRedundancyOptimizer with `overlap_with_ddp=True` only " + "supports a single parameter group" + ) + + super().add_param_group(param_group) + # NOTE: The rest of the method assumes that the call to the parent's + # `add_param_group()` appends the new parameter group and preserves + # the previous parameter-group ordering + + if self.initialized: + # Force a re-partitioning of the parameters + self._clear_cache() + param_groups = self._partition_parameters()[self.rank] + # NOTE: All parameters in the old parameter groups should be + # assigned to the same ranks so that the local optimizers do not + # need to be reinitialized + + # Add the parameters assigned to this rank from the new parameter + # group to the local optimizer, if any + if len(param_groups) == len(self.optim.param_groups) + 1: + self.optim.add_param_group(param_groups[-1]) + + # Update the bucketing strategy accordingly + if self.parameters_as_bucket_view: + self._build_param_buckets() + + def consolidate_state_dict(self, to: int = 0) -> None: + r""" + Consolidate a list of ``state_dict`` s (one per rank) on the target rank. + + Arguments: + to (int): the rank that receives the optimizer states (default: 0). + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt. + + .. warning:: This needs to be called on all ranks. + """ + self._check_overlap_initialized() + + # Sync the exposed `param_groups` attributes to the local optimizer in + # case they have been updated + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + # Pull the sharded state from all ranks and store them in rank order + empty_messenger = torch.tensor( + [0], dtype=torch.uint8, device=self._default_device + ) + + # NOTE: We wastefully use `broadcast()` (e.g. instead of `gather()`) + # due to compatibility issues with NCCL backend; a possible follow-up + # is to move all sharded state management to RPC RRef + self._all_state_dicts = [] + for rank in range(self.world_size): + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + if self.rank == to: + # Consolidate all local `state_dict`s on this rank, storing on + # CPU to save GPU memory + if rank == self.rank: + # Directly append own optimizer state + self._all_state_dicts.append( + _recursive_copy_to_device( + self.optim.state_dict(), + non_blocking=True, + device=torch.device("cpu"), + ) + ) + else: + # Receive the optimizer state from the source rank + local_state_dict = _broadcast_object( + empty_messenger, + src_rank=global_rank, + group=self.process_group, + device=self._default_device, + ) + self._all_state_dicts.append( + _recursive_copy_to_device( + local_state_dict, + non_blocking=True, + device=torch.device("cpu"), + ) + ) + else: + if rank == self.rank: + # Send the optimizer state to the target rank + _ = _broadcast_object( + self.optim.state_dict(), + src_rank=self.global_rank, + group=self.process_group, + device=self._default_device, + ) + elif rank != to: + # Discard the received object; `broadcast()` is used for + # compatibility reasons + _ = _broadcast_object( + empty_messenger, + src_rank=global_rank, + group=self.process_group, + device=self._default_device, + ) + + def _verify_params_per_rank( + self, + params_per_rank: List[List[torch.Tensor]], + ) -> None: + r""" + Verify ``params_per_rank`` for :meth:`_partition_parameters`. + + The verification is done by checking that ``params_per_rank`` has length equal + to the world size and that it does not contain any parameters not passed into the + :class:`ZeroRedundancyOptimizer` constructor. + + The parameters in ``params_per_rank`` being a strict subset of those + passed into the constructor is valid since some parameters may be + frozen. + + Raises: + ValueError: if ``params_per_rank`` does not have length equal to + the world size or if it contains a parameter that was not + passed into the :class:`ZeroRedundancyOptimizer` constructor. + """ + if len(params_per_rank) != self.world_size: + raise ValueError( + "`params_per_rank` must have length equal to the world size" + ) + all_params_set = set(self._all_params) + for params in params_per_rank: + for param in params: + if param not in all_params_set: + raise ValueError( + "Passing a new parameter in `params_per_rank` that " + "was not passed into the ZeroRedundancyOptimizer " + "constructor" + ) + + def _partition_param_group( + self, param_group: Dict[str, Any], params_per_rank: List[List[torch.Tensor]] + ) -> None: + r""" + Partition the parameter group ``param_group`` according to ``params_per_rank``. + + The partition will modify the ``self._partition_parameters_cache``. This method should + only be used as a subroutine for :meth:`_partition_parameters`. + + Arguments: + param_group (dict[str, Any]): a parameter group as normally defined + in an optimizer state. + params_per_rank (list[list[torch.Tensor]]): a :class:`list` of + length world size containing :class:`list` s of parameters to + assign to each rank. + """ + for rank, params in enumerate(params_per_rank): + rank_param_group = copy.copy(param_group) + rank_param_group["params"] = params + self._partition_parameters_cache[rank].append(rank_param_group) + + def _partition_parameters( + self, + params_per_rank: Optional[List[List[torch.Tensor]]] = None, + ) -> List[List[Dict]]: + r""" + Partitions parameters across distributed data parallel ranks. + + Arguments: + params_per_rank (list[list[torch.Tensor]], optional): a + :class:`list` of length world size containing :class:`list` s + of parameters to assign to each rank; this provides a way to + specify a partition manually. + If ``None``, the parameters are partitioned according to an + internal algorithm. + (default: ``None``) + + Returns: + A :class:`list` where each element of the list contains the + ``param_groups`` for a rank (which itself is a :class:`list` of + :class:`dict`); element 0 corresponds to rank 0, etc.; each rank + stores the ``param_groups`` for all ranks for the collective + communication in :meth:`step`. + + Raises: + ValueError: see :meth:`_validate_params_per_rank`. + RuntimeError: if ``params_per_rank`` is not ``None`` and this + :class:`ZeroRedundancyOptimizer` instance is using more than + one parameter group. + """ + if params_per_rank is None: + # Partition the parameters optimizing for uniformity + if len(self._partition_parameters_cache) == 0: + self._partition_parameters_cache = [[] for _ in range(self.world_size)] + sizes = [0] * self.world_size + for param_group in self.param_groups: + param_group_params_per_rank: List[List] = [ + [] for _ in range(self.world_size) + ] + # Sort the parameters by size (largest first) + params_sorted = sorted( + param_group["params"], key=lambda t: t.numel(), reverse=True + ) + for param in params_sorted: + # Greedily add the parameter to rank with smallest size so far + rank = self._get_min_index(sizes) + param_group_params_per_rank[rank].append(param) + sizes[rank] += param.numel() + # Apply the constructed partition of the parameter group + self._partition_param_group( + param_group, param_group_params_per_rank + ) + + return self._partition_parameters_cache + + # Partition the parameters according to `params_per_rank` + assert len(self._partition_parameters_cache) == 0, ( + "Specifying `params_per_rank` should only be done when the " + "parameters have not been partitioned yet" + ) + if len(self.param_groups) != 1: + raise RuntimeError( + "Specifying `params_per_rank` only supports a single parameter group" + ) + self._verify_params_per_rank(params_per_rank) + self._partition_parameters_cache = [[] for _ in range(self.world_size)] + + # Apply the passed-in partition of the parameter group + param_group = self.param_groups[0] + self._partition_param_group(param_group, params_per_rank) + + return self._partition_parameters_cache + + @property + def _param_to_rank(self) -> Dict[torch.Tensor, int]: + r""":class:`dict` mapping parameters to their assigned data parallel rank in the partition.""" + if len(self._param_to_rank_cache) == 0: + for rank, param_groups in enumerate(self._partition_parameters()): + for param_group in param_groups: + for param in param_group["params"]: + self._param_to_rank_cache[param] = rank + return self._param_to_rank_cache + + @property + def _param_to_index(self) -> Dict[torch.Tensor, int]: + r""" + :class:`dict` mapping parameters to their indices in the global optimizer state. + + NOTE: This assumes that the global optimizer state's indexing (in + ``state_dict``) follows a linear ordering over the parameter groups. + """ + if len(self._param_to_index_cache) == 0: + self._param_to_index_cache = { + p: i + for i, p in enumerate(chain(*(g["params"] for g in self.param_groups))) + } + return self._param_to_index_cache + + @property + def _index_to_param(self) -> List[torch.Tensor]: + r"""List mapping parameter indices in the global optimizer scheme to the actual params.""" + if len(self._index_to_param_cache) == 0: + self._index_to_param_cache = list( + chain(*(g["params"] for g in self.param_groups)) + ) + return self._index_to_param_cache + + def _broadcast_params_from_rank(self, rank: int): + r""" + Broadcast the shard of parameters from a given rank to all other ranks asynchronously. + + Arguments: + rank (int): the source rank. + + Returns: + A :class:`list` of async work handles for the ``broadcast()`` s + performed to synchronize the parameters. + """ + assert not self._overlap_with_ddp, ( + "`_broadcast_params_from_rank()` should not be used if " + "`overlap_with_ddp=True`; instead, the broadcasting should " + "happen in the DDP communication hook" + ) + handles = [] + if self.parameters_as_bucket_view: + for dev_i_buckets in self._buckets: + bucket = dev_i_buckets[rank] + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + handles.append( + dist.broadcast( + tensor=bucket, + src=global_rank, + group=self.process_group, + async_op=True, + ) + ) + else: + param_groups = self._partition_parameters()[rank] + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + for param_group in param_groups: + for param in param_group["params"]: + handles.append( + dist.broadcast( + tensor=param.data, + src=global_rank, + group=self.process_group, + async_op=True, + ) + ) + return handles + + def _sync_params(self): + r""" + Sync all parameter shards across the ranks. + + This rank sends its shard of the parameters to all other ranks and + receives a shard from each other rank. This is done using + ``broadcast()``. Parameters are sent bucket-by-bucket if + ``parameters_as_bucket_view=True``and sent parameter-by-parameter + otherwise. + """ + handles = [] + for rank in range(self.world_size): + handles.extend(self._broadcast_params_from_rank(rank)) + _ = [x.wait() for x in handles] + + @property + def _device_to_params_per_rank( + self, + ) -> Dict[torch.device, List[List[torch.Tensor]]]: + r""" + Return device parameters assigned per rank. + + :class:`dict` mapping each device to a :class:`list` of the per-rank parameter + lists filtered to only include the parameters stored on that device. + Each per-rank parameter list gives the parameters assigned to that rank + to update. + + This is used for constructing the parameter buckets if + ``parameters_as_bucket_view=True``. + + Let ``dev_i`` denote the ``i``th device for this rank. Then: + ``dev_0`` maps to a list containing: + rank 0's assigned parameters stored on ``dev_0``, + rank 1's assigned parameters stored on ``dev_0``, + ... + ``dev_1`` maps to a list containing: + rank 0's assigned parameters stored on ``dev_1``, + rank 1's assigned parameters stored on ``dev_1``, + ... + ... + """ + assert self.parameters_as_bucket_view, ( + "`_device_to_params_per_rank` should only be used if " + "`parameters_as_bucket_view=True`" + ) + if len(self._device_to_params_per_rank_cache) == 0: + for rank, param_groups in enumerate(self._partition_parameters()): + for param_group in param_groups: + for param in param_group["params"]: + device = param.device + if device not in self._device_to_params_per_rank_cache: + self._device_to_params_per_rank_cache[device] = [ + [] for _ in range(self.world_size) + ] + self._device_to_params_per_rank_cache[device][rank].append( + param + ) + return self._device_to_params_per_rank_cache + + def _get_min_index( + self, + values: List[int], + disallowed_indices: Optional[Set[int]] = None, + ) -> int: + r""" + Return ``values.index(min(values))``, except only uses one pass. + + It also excludes any indices in ``disallowed_indices`` if provided. + + Arguments: + values: (List[int]): :class:`list` of values. + disallowed_indices (Optional[Set[int]]): indices that are + disallowed from being the returned min index. + """ + min_index = -1 + min_value = float("inf") + for i, value in enumerate(values): + if disallowed_indices and i in disallowed_indices: + continue + if value < min_value: + min_value = value + min_index = i + assert min_index >= 0, "All indices are disallowed" + return min_index + + def _assign_bucket_subset_to_rank( + self, + bucket_index: int, + bucket_params: List[torch.Tensor], + bucket_offset: int, + assigned_rank: int, + assigned_ranks_per_bucket: List[Set[int]], + ) -> None: + r""" + Assign ``bucket_params`` to the rank with the least size assigned so far and collects relevant information. + + The model parameters given by ``bucket_params`` represents a (possibly non-strict) + subset of the parameters corresponding to a :class:`DistributedDataParallel` bucket. + + Arguments: + bucket_index (int): index of the :class:`DistributedDataParallel` + gradient bucket. + bucket_params (List[torch.Tensor]): subset of the parameters + corresponding to the bucket to assign. + bucket_offset (int): offset giving the index of the first element + in ``bucket_params`` in the bucket's full parameter list. + assigned_rank (int): group rank to assign to. + assigned_ranks_per_bucket (List[Set[int]]): :class:`set` of group ranks + assigned to each bucket. + """ + overlap_info = self._overlap_info + if len(bucket_params) == 0: + raise ValueError("Empty bucket assignment") + params_per_rank = overlap_info.params_per_rank + offsets = overlap_info.offsets + + self._bucket_assignments_per_rank_cache[assigned_rank][ + bucket_index + ] = _DDPBucketAssignment(bucket_index, bucket_params, bucket_offset) + if self.global_rank == assigned_rank: + offsets[bucket_index] = len(params_per_rank[assigned_rank]) + params_per_rank[assigned_rank].extend(bucket_params) + assigned_ranks_per_bucket[bucket_index].add(assigned_rank) + self._overlap_info.num_bucket_assignments += 1 + + @property + def _bucket_assignments_per_rank(self) -> List[Dict[int, _DDPBucketAssignment]]: + r""" + Return DDP bucket parameters assigned per rank. + + :class:`list` of length world size consisting of :class:`dict` s + mapping bucket indices to :class:`_DDPBucketAssignment` s for each + rank. + """ + assert self._overlap_with_ddp, ( + "`_bucket_assignments_per_rank` only be used if `overlap_with_ddp=True`" + ) + if len(self._bucket_assignments_per_rank_cache) > 0: + return self._bucket_assignments_per_rank_cache + + overlap_info = self._overlap_info + assert overlap_info.status == _OverlapStatus.INITIALIZED + + self._bucket_assignments_per_rank_cache = [{} for _ in range(self.world_size)] + params_per_bucket = overlap_info.params_per_bucket + + if overlap_info.shard_buckets: + # Define the assignment threshold to approximate uniformity + assert overlap_info.total_size is not None, "`total_size` was not computed" + threshold = overlap_info.total_size / self.world_size # type: ignore[operator] + size_per_rank = [0 for _ in range(self.world_size)] + + num_buckets = len(params_per_bucket) + overlap_info.assigned_ranks_per_bucket = [set() for _ in range(num_buckets)] + assigned_ranks_per_bucket = overlap_info.assigned_ranks_per_bucket + if not overlap_info.shard_buckets: + # Assign each DDP bucket entirely to a single rank + for bucket_index, bucket_params in enumerate(params_per_bucket): + assert len(bucket_params) > 0, "Empty bucket" + assigned_rank = self._get_assigned_rank(bucket_index) + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params, + 0, + assigned_rank, + assigned_ranks_per_bucket, + ) + else: + # Assign each DDP bucket to possibly multiple ranks + # Specifically, sort the DDP buckets by increasing size, and for + # each bucket, iteratively assign the maximal unassigned subset + # with size less than `threshold` to the rank with the least total + # size so far -- each such assignment is represented by a + # `_DDPBucketAssignment` instance and only contains parameters from + # a single DDP bucket + params_per_bucket_enum = sorted( + enumerate(params_per_bucket), key=lambda x: sum(p.numel() for p in x[1]) + ) + for bucket_index, bucket_params in params_per_bucket_enum: + assert len(bucket_params) > 0, "Empty bucket" + bucket_offset = 0 + assignment_size = 0 + for param_index, param in enumerate(bucket_params): + param_numel = param.numel() + if ( + assignment_size + param_numel >= threshold + and param_index > bucket_offset + ): + assigned_rank = self._get_min_index( + size_per_rank, assigned_ranks_per_bucket[bucket_index] + ) + # Include up to but not including the parameter that + # exceeded the threshold + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params[bucket_offset:param_index], + bucket_offset, + assigned_rank, + assigned_ranks_per_bucket, + ) + size_per_rank[assigned_rank] += assignment_size + bucket_offset = param_index + assignment_size = 0 + assignment_size += param_numel + # Assign the remainder of the bucket so that no assignment + # spans across two buckets + assigned_rank = self._get_min_index( + size_per_rank, assigned_ranks_per_bucket[bucket_index] + ) + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params[bucket_offset:], + bucket_offset, + assigned_rank, + assigned_ranks_per_bucket, + ) + size_per_rank[assigned_rank] += assignment_size + + return self._bucket_assignments_per_rank_cache + + def _local_step( + self, + gradients: Optional[List[Optional[torch.Tensor]]] = None, + closure: Optional[Callable[[], float]] = None, + **kwargs: Any, + ) -> Optional[float]: + r""" + Perform a single optimizer step without syncing parameters across ranks. + + Arguments: + gradients (list[Optional[torch.Tensor]], optional): a :class:`list` + of length equal to the number of parameters assigned to this + rank containing gradient tensors or ``None`` as its elements; + a ``None`` in the :class:`list` indicates that the + corresponding parameter should not be updated. + If the argument itself is ``None``, then all parameters are + updated, and the gradients are assumed to be already populated. + (default: ``None``) + closure (Callable): a closure that re-evaluates the model and + returns the loss; optional for most optimizers and should be + ``None`` if ``gradients`` is not ``None``; (default: ``None``) + Returns: + Optional loss depending on the underlying local optimizer. + + .. warning:: + The argument ``gradients`` should only be specified (i.e. not + ``None``) if ``overlap_with_ddp=True``, in which case + :class:`ZeroRedundancyOptimizer` wraps a functional optimizer. + """ + Join.notify_join_context(self) + # Check if the model trainability has changed + is_trainable_mask = self._get_is_trainable_mask() + if is_trainable_mask != self._is_trainable_mask: + if self._overlap_with_ddp: + raise RuntimeError( + "ZeroRedundancyOptimizer with `overlap_with_ddp=True` " + "does not support changing parameter trainability at run " + "time" + ) + logger.warning( + "ZeroRedundancyOptimizer detected that the trainable " + "parameters changed; rebuilding the parameter buckets if " + "enabled" + ) + self._build_param_buckets() + self._is_trainable_mask = is_trainable_mask + + # Sync the exposed `param_groups` attributes to the local optimizer in + # case they have been updated + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + # Run the optimizer step on this shard only + if gradients is None: + loss = ( + self.optim.step(**kwargs) + if closure is None + else self.optim.step(closure=closure, **kwargs) + ) + else: + assert self._overlap_with_ddp, ( + "Specifying `gradients` should not " + "be used when `overlap_with_ddp=False`" + ) + assert closure is None, ( + "`closure` is not supported when using a local functional optimizer" + ) + loss = self.optim.step(gradients=gradients) + + # Sync any updated attributes in the local optimizer to the exposed + # `param_groups` + self._sync_param_groups(self.optim.param_groups, self.param_groups) + + return loss + + def step( + self, + closure: Optional[Callable[[], float]] = None, + **kwargs: Any, + ) -> Optional[float]: + r""" + Perform a single optimizer step and syncs parameters across all ranks. + + Arguments: + closure (Callable): a closure that re-evaluates the model and + returns the loss; optional for most optimizers. + Returns: + Optional loss depending on the underlying local optimizer. + + .. note: Any extra parameters are passed to the base optimizer as-is. + """ + if self._overlap_with_ddp: + logger.warning( + "`step()` should not be included in the training loop when " + "`overlap_with_ddp=True`" + ) + return None + + # Perform the local optimizer step + loss = self._local_step(closure=closure, **kwargs) + + # Sync all of the updated parameter shards across the ranks + self._sync_params() + + return loss + + def join_hook(self, **kwargs): + r""" + Return the ZeRO join hook. + + It enables training on uneven inputs by + shadowing the collective communications in the optimizer step. + + Gradients must be properly set before this hook is called. + + Arguments: + kwargs (dict): a :class:`dict` containing any keyword arguments + to modify the behavior of the join hook at run time; all + :class:`Joinable` instances sharing the same join context + manager are forwarded the same value for ``kwargs``. + + This hook does not support any keyword arguments; i.e. ``kwargs`` is + unused. + """ + return _ZeROJoinHook(self) + + @property + def join_device(self) -> torch.device: + r"""Return default device.""" + return self._default_device + + @property + def join_process_group(self) -> Any: + r"""Return process group.""" + return self.process_group + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + r""" + Load the state pertaining to the given rank from the input ``state_dict``, updating the local optimizer as needed. + + Arguments: + state_dict (dict): optimizer state; should be an object returned + from a call to :meth:`state_dict`. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt. + """ + self._check_overlap_initialized() + + for index, value in state_dict["state"].items(): + param = self._index_to_param[index] + if self._param_to_rank[param] != self.rank: + # Clear any state irrelevant to this rank + state_dict["state"][index] = None + else: + # Load the parameter state to the local optimizer + self.optim.state[param] = _recursive_copy_to_device( + value, non_blocking=True, device=param.device + ) + # Force zero-dimensional tensors (like Adam "step") on CPU + for state_name, state_value in self.optim.state[param].items(): + if torch.is_tensor(state_value) and state_value.dim() == 0: + self.optim.state[param][state_name] = state_value.cpu() + + super().load_state_dict(state_dict) + + # Sync the input state with the exposed and local optimizer states + self._sync_param_groups(state_dict["param_groups"], self.param_groups) + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + def state_dict(self) -> Dict[str, Any]: + r""" + Return the last global optimizer state known to this rank. + + .. warning: + If the state has not been consolidated to this rank, this raises a + runtime error, and even if it has, the state may not be up-to-date, + depending on when :meth:`consolidate_state_dict` was last called. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt; or if this method is called without a preceding call + to :meth:`consolidate_state_dict`. + """ + self._check_overlap_initialized() + + if len(self._all_state_dicts) == 0: + raise RuntimeError( + "Optimizer state has not been consolidated on this rank. " + f"Please call `consolidate_state_dict(to={self.rank})` on " + "all ranks beforehand if you meant to save the global state." + ) + + # Get the possibly-stale global optimizer state that uses global + # parameter indexing + state_dict = super().state_dict() + + # Update the global optimizer state with local state information, + # factoring in the translation from local to global indexing + for rank, local_state_dict in enumerate(self._all_state_dicts): + local_param_groups = local_state_dict["param_groups"] + global_param_groups = self._partition_parameters()[rank] + assert len(local_param_groups) == len( + global_param_groups + ), "Mismatch between number of local and global parameter groups" + + for local_param_group, global_param_group in zip( + local_param_groups, global_param_groups + ): + # `local_param_group` stores local indices, while + # `global_param_group` stores the tensors directly + local_param_indices = local_param_group["params"] + global_params = global_param_group["params"] + + assert len(local_param_indices) == len( + global_params + ), "Mismatch between number of local and global parameters in parameter group" + for local_param_index, global_param in zip( + local_param_indices, global_params + ): + # Update the global parameter state, if any + if local_param_index in local_state_dict["state"]: + global_param_index = self._param_to_index[global_param] + state_dict["state"][global_param_index] = local_state_dict[ + "state" + ][local_param_index] + + # Sort the parameters in the state + state_dict["state"] = dict(sorted(state_dict["state"].items())) + return state_dict + + @staticmethod + def _sync_param_groups( + src_param_groups: List[Dict[Any, Any]], + dst_param_groups: List[Dict[Any, Any]], + ) -> None: + r""" + Sync the attributes from the source parameter groups to the destination parameter groups. + + Example attributes include learning rate or scheduler attributes. The + two parameter groups should have the same length (i.e. same number of + parameter groups). + + Arguments: + src_param_groups (list[dict]): parameter groups giving the + attribute settings to copy. + dst_param_groups (list[dict]): parameter groups giving the + attribute settings to set. + """ + assert len(src_param_groups) == len( + dst_param_groups + ), "Mismatch between number of source and destination parameter groups" + for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups): + # Sync all attributes except the parameters + for attr in filter(lambda x: x != "params", src_param_group.keys()): + dst_param_group[attr] = src_param_group[attr] + + def _build_param_buckets(self) -> None: + r""" + Build parameter buckets if ``parameters_as_bucket_view=True``. + + For each device that stores this rank's parameters, there is a + bucket (represented as a tensor) containing all of the parameters on + that device that are assigned to a given rank in the parameter update + partition. + + This method is called in the constructor and any time parameter + trainability is changed. + + .. warning:: + The current implementation assumes that all of the parameters in a + bucket are of the same dense type when allocating the bucket's + tensor. + + .. warning:: + If the model parameters are stored across more than one device, + then the storage partitioning must be the same across all + processes in order for parameter synchronization to work. + """ + if not self.parameters_as_bucket_view or self._overlap_with_ddp: + return + + # `self._buckets[i][j]` are the parameters stored on device i and + # assigned to rank j + num_devices = len(self._device_to_params_per_rank) + self._buckets = [[] for _ in range(num_devices)] # type: ignore[assignment] + + for dev_i, (device, params_per_rank) in enumerate( + self._device_to_params_per_rank.items() + ): + for params in params_per_rank: + bucket_size = 0 + dtype = None + trainable_params = [] + for param in params: + if not _is_trainable(param): + # Clone in case the parameter was previously part of + # a bucket to avoid the data from being destroyed + param.data = param.data.detach().clone() + else: + bucket_size += param.numel() + trainable_params.append(param) + dtype = param.dtype # assumes all same dtype + + if bucket_size == 0: + # Create a dummy bucket if there are no parameters + bucket = torch.zeros(1, device=device) + else: + # Construct the bucket (assuming all dense and same dtype) + bucket = torch.empty(bucket_size, dtype=dtype, device=device) + offset = 0 + for param in trainable_params: + offset_next = offset + param.numel() + bucket[offset:offset_next].copy_(param.data.flatten()) + param.data = bucket[offset:offset_next].view_as(param.data) + offset = offset_next + self._buckets[dev_i].append(bucket) # type: ignore[arg-type] + + def _build_ddp_param_buckets(self) -> None: + r""" + Build the DDP bucket with parameters assigned to this rank. + + For each DDP bucket with parameters assigned to this rank, flattens the + data of those parameters into a single tensor and saves the tensor to + the ``tensor`` attribute in the corresponding + :class:`_DDPBucketAssignment` instance stored in + ``self._bucket_assignments_per_rank``. + + :class:`DistributedDataParallel` guarantees that the parameters + corresponding to a gradient bucket have the same device and the same + dtype. + """ + for bucket_assignments in self._bucket_assignments_per_rank: + for bucket_assignment in bucket_assignments.values(): + params = bucket_assignment.parameters + bucket_size = 0 + dtype = None + for param in params: + assert _is_trainable(param), ( + "Model parameter " + "corresponding to a gradient in a DDP bucket should " + "require a gradient" + ) + bucket_size += param.numel() + dtype = param.dtype # assumes all same dtype + assert bucket_size > 0, "Empty bucket" + + # Construct the bucket tensor (assuming all dense and same dtype) + tensor = torch.empty( + bucket_size, dtype=dtype, device=bucket_assignment.device + ) + offset = 0 + for param in params: + offset_next = offset + param.numel() + tensor[offset:offset_next].copy_(param.data.flatten()) + param.data = tensor[offset:offset_next].view_as(param.data) + offset = offset_next + bucket_assignment.tensor = tensor + + def _verify_and_init_params( + self, + params: Any, + ) -> Union[List[torch.Tensor], List[dict]]: + r""" + Verify the type of ``params`` and initializes ``self._all_params`` as a :class:`list` of all parameters. + + The initializagtion will first make sure that provided ``params`` is valid. + + Arguments: + params (Any): Candidate parameter list or parameter groups to verify. + + Raises: + TypeError: ``params`` has an invalid type. + ValueError: ``params`` is empty. + + Returns: + The persistent form of ``params`` to be passed into the parent + :class:`Optimizer` constructor -- i.e. returns ``params`` as a + :class:`list` to ensure that it can be iterated over again. + """ + if isinstance(params, torch.Tensor): + raise TypeError( + "`params` argument should be an iterable of " + f"Tensors, but got {torch.typename(params)}" + ) + try: + all_params = list(params) + except TypeError as e: + raise TypeError( + "`params` argument should be an iterable of Tensors" + f" or dicts, but got {torch.typename(params)}" + ) from e + if len(all_params) == 0: + raise ValueError("ZeroRedundancyOptimizer got an empty parameter list") + all_tensors = True + all_dicts = True + for param in all_params: + all_tensors &= isinstance(param, torch.Tensor) + all_dicts &= isinstance(param, dict) + if not all_tensors and not all_dicts: + raise TypeError( + "`params` argument should be an iterable of Tensors or dicts" + ) + # Ensure that `self._all_params` contains a list of all parameters + if all_tensors: + self._all_params = all_params + elif all_dicts: + self._all_params = [] + # `all_params` contains parameter groups (not parameters) + for param_group in all_params: + if "params" not in param_group: + raise ValueError( + "Each parameter group passed-in via `params` must " + "have a 'params' key mapping to the parameters in " + "the group" + ) + self._all_params.extend(param_group["params"]) + return all_params + + def _verify_same_dense_param_type(self) -> None: + r""" + Verify that all parameters are of the same dense type. + + The method assumes that ``self._all_params`` has been initialized + and is non-empty. + + Raises: + ValueError: ``params`` contains sparse parameters or parameters + of varying dense types. + + NOTE: This method can be removed once support for sparse parameters + and varying parameter types is added. + """ + typename = torch.typename(self._all_params[0]) + if self._all_params[0].is_sparse: + raise ValueError( + "ZeroRedundancyOptimizer only supports using " + "the same dense type for all parameters but got " + f"{typename}" + ) + for param in self._all_params[1:]: + other_typename = torch.typename(param) + if other_typename != typename: + raise ValueError( + "ZeroRedundancyOptimizer only supports " + "using the same dense type for all " + f"parameters but got both {typename} and " + f"{other_typename}" + ) + + def _get_is_trainable_mask(self) -> List[bool]: + r"""Return a boolean mask indicating if each parameter is trainable (``requires_grad``) or not.""" + return list(map(_is_trainable, self._all_params)) + + def _init_local_optimizer(self) -> None: + r""" + Initialize this rank's local optimizer, responsible for its subset of the parameters. + + The local optimizer is saved in ``self.optim``. + """ + assert ( + self._optim_constructor is not None + ), "The local optimizer class has not been set" + + param_groups = self._partition_parameters()[self.rank] + # `overlap_with_ddp=True` requires a local functional optimizer + if self._overlap_with_ddp: + # Functional optimizers only support a single parameter group and + # require passing in the parameters as a list + assert len(param_groups) == 1, ( + "Initializing the local " + "functional optimizer with more than one parameter group" + ) + params = param_groups[0]["params"] + # Try to pass `_allow_empty_param_list=True` to avoid erroring + if ( + "_allow_empty_param_list" + in inspect.signature(self._optim_constructor).parameters + ): + self.optim: Any = self._optim_constructor( + params, **self._optim_defaults, _allow_empty_param_list=True + ) + else: + logger.warning( + "%s does not support the argument " + "`_allow_empty_param_list`; ZeroRedundancyOptimizer may " + "error due to an empty parameter list", + self._optim_constructor + ) + self.optim: Any = self._optim_constructor(params, **self._optim_defaults) # type: ignore[no-redef] + + # Log information about the DDP and ZeRO bucketing + if dist.get_debug_level() != dist.DebugLevel.OFF: + local_numel = sum(p.numel() for p in params) + num_assigned_buckets = len( + self._bucket_assignments_per_rank[self.global_rank] + ) + logger.info( + "rank %s with %s parameters " + "across %s buckets", + self.global_rank, local_numel, num_assigned_buckets + ) + if self.global_rank == 0: + logger.info( + "%s DDP " + "buckets and " + "%s bucket " + "assignments", + len(self._overlap_info.params_per_bucket), self._overlap_info.num_bucket_assignments + ) + else: + # NOTE: Passing `param_groups` into the local optimizer constructor + # bypasses the empty parameter list check + self.optim: Optimizer = self._optim_constructor(param_groups, **self._optim_defaults) # type: ignore[no-redef] + + # TODO: Manually add `self.param_groups` if using a functional + # optimizer; remove this if/when the functional optimizers support + # multiple parameter groups + if self._overlap_with_ddp and not hasattr(self.optim, "param_groups"): + assert hasattr(self.optim, "param_group"), ( + "The functional optimizer should set at least one of the " + "attributes `param_group` or `param_groups`" + ) + self.optim.param_groups = [self.optim.param_group] # type: ignore[attr-defined] + + self._sync_param_groups(self.optim.param_groups, self.param_groups) + + def _init_zero_for_overlap(self) -> None: + r"""Perform a delayed initialization of the local optimizer and the supporting data structures.""" + assert self._overlap_with_ddp, ( + "`_init_zero_for_overlap()` should only be called when " + "`overlap_with_ddp=True`" + ) + self._overlap_info.status = _OverlapStatus.INITIALIZED + self._clear_cache() + self._partition_parameters(self._overlap_info.params_per_rank) + self._build_ddp_param_buckets() + self._init_local_optimizer() + + def _get_assigned_rank(self, bucket_index: int) -> int: + r""" + Return the single rank assigned to a :class:`DistributedDataParallel` gradient bucket. + + Arguments: + bucket_index (int): index of the :class:`DistributedDataParallel` + bucket for which to get the assigned rank. + """ + assert not self._overlap_info.shard_buckets, ( + "The bucket assignment requires global bucket information and " + "will be computed later; there should be no need to use this " + "method" + ) + return bucket_index % self.world_size + + def _check_overlap_initialized(self): + r""" + Check the delayed initialization depending on the value of ``overlap_with_ddp``. + + The delayed initialization has occurred (see + :meth:`_init_zero_for_overlap`) if ``overlap_with_ddp=True``, and + raises a ``RuntimeError`` if not. This should preface methods that + should not be run before that delayed initialization. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and + :meth:`_init_zero_for_overlap` has not been called. + """ + if ( + self._overlap_with_ddp + and self._overlap_info.status != _OverlapStatus.INITIALIZED + ): + raise RuntimeError( + "This method should not be called until this " + "ZeroRedundancyOptimizer instance has been fully " + "initialized" + ) + + def _get_optimizer_constructor(self, optimizer_class: Any) -> Any: + r""" + Return the optimizer constructor using validation and transformation depending on ``overlap_with_ddp``. + + Returns: + - ``optimizer_class`` if ``overlap_with_ddp=False`` and + ``optimizer_class`` is not a functional optimizer. + - ``optimizer_class`` if ``overlap_with_ddp=True`` and + ``optimizer_class`` is already a functional optimizer. + - The functional equivalent of ``optimizer_class`` if + ``overlap_with_ddp=True`` and ``optimizer_class`` is not + already a functional optimizer (assuming the equivalent + exists). + + Raises: + ValueError: + + - if ``overlap_with_ddp=True`` but ``optimizer_class`` is + neither a functional optimizer nor translatable to a + functional optimizer. + - if ``overlap_with_ddp=False`` and ``optimizer_class`` is a + functional optimizer. + """ + functional_optims = functional_optim_map.values() + if not self._overlap_with_ddp: + if optimizer_class in functional_optims: + # Using a functional optimizer is only supported when + # `overlap_with_ddp=True` + raise ValueError( + f"Passing in a functional optimizer {optimizer_class} " + "when `overlap_with_ddp=False`" + ) + else: + return optimizer_class + else: + if optimizer_class in functional_optims: + # Already a functional optimizer + return optimizer_class + elif optimizer_class in functional_optim_map: + # Translate the passed-in optimizer class to its functional + # equivalent if `overlap_with_ddp=True` + optim_constructor = functional_optim_map[optimizer_class] + logger.info( + "Using the functional optimizer %s " + "instead of %s since " + "`overlap_with_ddp=True`", + optim_constructor, optimizer_class + ) + return optim_constructor + else: + raise ValueError( + "Using `ddp_with_overlap=True` requires using a " + "functional optimizer, but there is no supported functional " + f"optimizer equivalent for {optimizer_class}" + )