diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f832cee9abbdecff96abba9a4d4d48e680d6cdb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..807526783c256c0cc7a71bf2f28c09909778a438 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de693b7754048da4fcdf4321701df3456f7dba3c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1900ef5449696e7a92640a4745d8d601327a2f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c0a3cb7047f3ff78fcfb0f8ae6a09b713af2de Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..8accef6afc3433c3371955eceb9f626e6fcd8558 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py @@ -0,0 +1,94 @@ +from contextlib import contextmanager, nullcontext +from typing import Any, Tuple + +import torch +import torch.nn as nn +from torch.utils.checkpoint import ( + _checkpoint_without_reentrant_generator, + _DEFAULT_DETERMINISM_MODE, +) + +from .contract import contract + + +@contextmanager +def _no_hook(module: nn.Module): + r""" + Disable hooks installed by checkpoint to avoid unintentional recursion + during backward recomputation. + """ + orig_enable_hook = checkpoint.state(module).enable_hook + checkpoint.state(module).enable_hook = False + try: + yield + finally: + checkpoint.state(module).enable_hook = orig_enable_hook + + +@contract() +def checkpoint(module: nn.Module) -> nn.Module: + r""" + This is a composable activation checkpointing API. Unlike functional + activation checkpointing APIs, this one does not require changing model + source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs, + this one does not modify model structure or fully-qualified names either. + Under the hood, it registers activation checkpointing logic as pre- and + post-forward hooks. Hence, this API can be easily applied to any model or + sub-modules in the model. + + Args: + module (nn.Module): the target model or sub-module to apply activation + checkpointing. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> model = MyModel() + >>> checkpoint(model.l1) # apply activation checkpointing only to l1 + >>> model(torch.zeros(2, 10)).sum().backward() + + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint") + + def forward_pre_hook(module: nn.Module, inputs: Tuple[Any, ...]) -> None: + if checkpoint.state(module).enable_hook: + + def context_fns(): + return nullcontext(), _no_hook(module) + + checkpoint.state( + module + )._ac_generator = _checkpoint_without_reentrant_generator( + module, True, context_fns, _DEFAULT_DETERMINISM_MODE, False, *inputs + ) + next(checkpoint.state(module)._ac_generator) + + def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any: + if checkpoint.state(module).enable_hook: + try: + next(checkpoint.state(module)._ac_generator) + except StopIteration: + pass + else: + raise RuntimeError( + "Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!" + ) + + # Ensure that we no longer hold on to the generator. always_call=True helps ensure we + # clear this even in the case of exception in fwd pass. + checkpoint.state(module)._ac_generator = None + + checkpoint.state(module).enable_hook = True + module.register_forward_pre_hook(forward_pre_hook) + module.register_forward_hook(forward_hook, prepend=True, always_call=True) + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/contract.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/contract.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6983023f76e26698a3c4e8ee477fae9e7eb508 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/contract.py @@ -0,0 +1,194 @@ +import uuid +from collections import OrderedDict +from functools import wraps +from typing import Callable, Dict, List, Optional, Type + +import torch.nn as nn +from torch.distributed._composable_state import _State + + +def generate_state_key(string="__composable_api_state_key"): + return f"{string}_{str(uuid.uuid4())}" + + +STATE_KEY = generate_state_key() +REGISTRY_KEY = generate_state_key() + + +# TODO: we can add additional info to RegistryItem to share across APIs. E.g., +# we can add args and kwargs here, and then we can detect whether fully_shard +# is combined with reentrant activation checkpointing and error out with a clear +# message. +class RegistryItem: + pass + + +def contract(state_cls: Type[_State] = _State): + r""" + Decorate a function as a composable distributed API, where the first + argument of the function must be an :class:`nn.Module` instance. The + decorator verifies that the wrapped function does not modify parameter, + buffer or sub-module fully-qualified names (FQN). + + When a function ``func`` is decorated by ``@contract()``, a + ``.state(module: nn.Module)`` method will be installed to the decorated + function. Then you can retrieve and modify the state on a module by calling + ``func.state(module)``. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> @contract() + >>> def my_feature(module: nn.Module) -> nn.Module: + >>> my_feature.state(module).some_state = "any value" + >>> return module + >>> + >>> model = MyModel() + >>> my_feature(model.l1) + >>> assert my_feature.state(model.l1).some_state == "any value" + >>> my_feature(model.l2) + >>> model(torch.randn(2, 10)).sum().backward() + """ + + # wraps will make functions decorated with contract() pickleable - needed for integration with torch.package + @wraps(state_cls) + def inner(func): + @wraps(func) + def wrapper(module: nn.Module, *args, **kwargs) -> Optional[nn.Module]: + # get existing global states + default_all_state: Dict[Callable, _State] = OrderedDict() + all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, default_all_state + ) + assert isinstance( + all_state, dict + ), "Distributed composable API states corrupted" + + # get global registry + default_registry: Dict[str, RegistryItem] = OrderedDict() + registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload] + REGISTRY_KEY, default_registry + ) + + assert isinstance( + registry, dict + ), "Distributed composable API registry corrupted" + + # make sure the API func has not been applied to the input module yet. + assert func not in all_state and func.__name__ not in registry, ( + "Each distinct composable distributed API can only be applied to a " + f"module once. {func.__name__} has already been applied to the " + f"following module.\n{module}" + ) + + # install states specific to the wrapped ``func`` + all_state.setdefault(func, state_cls()) + # register ``func`` in the global registry by name + registry.setdefault(func.__name__, RegistryItem()) + + orig_named_params = OrderedDict(module.named_parameters()) + orig_named_buffers = OrderedDict( + module.named_buffers(remove_duplicate=False) + ) + orig_named_modules = OrderedDict( + module.named_modules(remove_duplicate=False) + ) + + updated = func(module, *args, **kwargs) + + if updated is None: + updated = module + + new_named_params = OrderedDict(updated.named_parameters()) + new_named_buffers = OrderedDict( + updated.named_buffers(remove_duplicate=False) + ) + new_named_modules = OrderedDict( + updated.named_modules(remove_duplicate=False) + ) + + assert isinstance(updated, nn.Module), ( + "Output of composable distributed APIs must be either None or " + f"nn.Module, but got {type(updated)}" + ) + + def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str): + if orig_fqns == new_fqns: + return + + orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns) + orig_only = orig_fqn_set - new_fqn_set + new_only = new_fqn_set - orig_fqn_set + if len(orig_only) or len(new_only): + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify " + "FQNs.\n" + f"Only in original FQNs: {orig_only},\n" + f"Only in new FQNs: {new_only}" + ) + else: + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify " + "the order of FQNs.\n" + f"Original FQNs: {orig_only}\n" + f"New FQNs: {new_only}" + ) + + check_fqn( + list(orig_named_params.keys()), + list(new_named_params.keys()), + "Check parameters, ", + ) + check_fqn( + list(orig_named_buffers.keys()), + list(new_named_buffers.keys()), + "Check buffer, ", + ) + check_fqn( + list(orig_named_modules.keys()), + list(new_named_modules.keys()), + "Check modules, ", + ) + + # TODO: a stricter verification should also reject changing module + # types and monkey-patching forward() method implementations. + + # TODO: verify that installed distributed paradigms are compatible with + # each other. + + return updated + + def get_state(module: nn.Module) -> Optional[_State]: + return module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, + {}, # TODO(@yhcharles): this is a temporary fix, need a better way + ).get( + func + ) # type: ignore[call-overload] + + wrapper.state = get_state # type: ignore[attr-defined] + + return wrapper + + return inner + + +def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]: + r""" + Get an ``OrderedDict`` of composable APIs that have been applied to the + ``module``, indexed by the API name. If no API has been applied, then this + returns ``None``. + """ + return getattr(module, REGISTRY_KEY, None) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6438814ed1c97290d50e0afb379ce1030d4288e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py @@ -0,0 +1,2 @@ +from ._fsdp_api import MixedPrecisionPolicy +from .fully_shard import FSDP, fully_shard diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9eeb22885a6e138dbae09be1625c518f5489b2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py new file mode 100644 index 0000000000000000000000000000000000000000..26444ccf1e087383be1a3c0783c3c0ea4d402640 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py @@ -0,0 +1,52 @@ +from dataclasses import dataclass +from typing import Optional + +import torch + + +@dataclass(frozen=True) +class MixedPrecisionPolicy: + """ + This configures FSDP's mixed precision. Unlike autocast, this applies mixed + precision at the module level, not op level, which means low-precision + activations are saved for backward and high-to-low-precision casts are + incurred only at module boundaries. + + FSDP works well with module-level mixed precision since it keeps the + high-precision sharded parameters in memory anyway. In other words, FSDP + does not require any extra memory to keep a high-precision copy of the + parameters for the optimizer step. + + Attributes: + param_dtype (Optional[torch.dtype]): This specifies the dtype for + the unsharded parameter and hence the dtype for forward/backward + computation and the parameter all-gather. If this is ``None``, then + the unsharded parameter uses the original dtype. The optimizer step + uses the sharded parameter in the original dtype. (Default: + ``None``) + reduce_dtype (Optional[torch.dtype]): This specifies the dtype for + gradient reduction (i.e. reduce-scatter or all-reduce). If this is + ``None`` but ``param_dtype`` is not ``None``, then the reduction + uses the compute dtype. This can be used to run gradient reduction + in full precision while using low precision for compute. (Default: + ``None``) + output_dtype (Optional[torch.dtype]): This specifies the dtype for + casting floating-point forward outputs. This can be used to + help implement cases where different modules have different mixed + precision policies. (Default: ``None``) + cast_forward_inputs (bool): This specifies whether FSDP should cast the + forward's floating-point input tensors to ``param_dtype`` or not. + """ + + param_dtype: Optional[torch.dtype] = None + reduce_dtype: Optional[torch.dtype] = None + output_dtype: Optional[torch.dtype] = None + cast_forward_inputs: bool = True + + def __post_init__(self): + # Clamp `reduce_dtype` to `None` if no casting is required: since + # gradients are computed in `param_dtype`, if `reduce_dtype` matches, + # then we do not need extra casting + if self.param_dtype == self.reduce_dtype: + # Bypass the frozen dataclass checks + object.__setattr__(self, "reduce_dtype", None) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py new file mode 100644 index 0000000000000000000000000000000000000000..8b3ad6e5cf683291a6b657d2b940f112854d9dad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py @@ -0,0 +1,217 @@ +from typing import List, NamedTuple, Optional, Tuple + +import torch +import torch.distributed as dist +from torch.distributed.distributed_c10d import ReduceOp +from ._fsdp_common import ( + _get_dim0_padded_size, + _raise_assert_with_print, + _to_dtype_if_needed, +) +from ._fsdp_param import FSDPParam + + +class AllGatherResult(NamedTuple): + all_gather_output: torch.Tensor + all_gather_event: Optional[torch.cuda.Event] + all_gather_work: Optional[dist.distributed_c10d.Work] + all_gather_input_numels: List[int] + + +@torch.no_grad() +def foreach_all_gather( + fsdp_params: List[FSDPParam], + group: dist.ProcessGroup, + async_op: bool, + all_gather_copy_in_stream: torch.cuda.Stream, + all_gather_stream: torch.cuda.Stream, + device: torch.device, +) -> Optional[AllGatherResult]: + world_size, rank = group.size(), group.rank() + # - Copy in + with torch.cuda.stream(all_gather_copy_in_stream): + param_all_gather_inputs = [ + fsdp_param.all_gather_input for fsdp_param in fsdp_params + ] + dtype = param_all_gather_inputs[0].dtype + if not all(t.dtype == dtype for t in param_all_gather_inputs): + raise NotImplementedError( + f"Mixed dtype not supported yet: {[t.dtype for t in param_all_gather_inputs]}" + ) + inp_split_sizes = [inp.numel() for inp in param_all_gather_inputs] + all_gather_input_numel = sum(inp_split_sizes) + all_gather_output = torch.empty( + (all_gather_input_numel * world_size,), dtype=dtype, device=device + ) + all_gather_input = all_gather_output.narrow( + 0, all_gather_input_numel * rank, all_gather_input_numel + ) + foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes) + torch._foreach_copy_(foreach_copy_dsts, param_all_gather_inputs) + del param_all_gather_inputs + all_gather_stream.wait_stream(all_gather_copy_in_stream) + with torch.cuda.stream(all_gather_stream): + # - All-gather + all_gather_work = dist.all_gather_into_tensor( + output_tensor=all_gather_output, + input_tensor=all_gather_input, + group=group, + async_op=async_op, + ) + all_gather_event = all_gather_stream.record_event() + return AllGatherResult( + all_gather_output, all_gather_event, all_gather_work, inp_split_sizes + ) + + +@torch.no_grad() +def foreach_all_gather_copy_out( + all_gather_result: AllGatherResult, + fsdp_params: List[FSDPParam], + group: dist.ProcessGroup, +) -> None: + ( + all_gather_output, + all_gather_event, + all_gather_work, + all_gather_input_numels, + ) = all_gather_result + if all_gather_event is not None: # sync op + torch.cuda.current_stream().wait_event(all_gather_event) + if all_gather_work is not None: # async op + all_gather_work.wait() + world_size = group.size() + dtype, device = all_gather_output.dtype, all_gather_output.device + for all_gather_input_numel, fsdp_param in zip(all_gather_input_numels, fsdp_params): + fsdp_param.init_all_gather_output( + all_gather_input_numel, world_size, dtype, device + ) # no-op after 1st call + fsdp_param.alloc_all_gather_output() + all_gather_output = all_gather_output.view(world_size, -1) + out = [ + fsdp_param.all_gather_output.view(world_size, -1) for fsdp_param in fsdp_params + ] + torch.split_with_sizes_copy( + all_gather_output, all_gather_input_numels, dim=1, out=out + ) + + +@torch.no_grad() +def foreach_reduce_scatter( + fsdp_params: List[FSDPParam], + unsharded_grads: List[torch.Tensor], + group: dist.ProcessGroup, + reduce_scatter_stream: torch.cuda.Stream, + orig_dtype: torch.dtype, + reduce_dtype: Optional[torch.dtype], + device: torch.device, + divide_factors: Optional[Tuple[float, float]], +) -> torch.cuda.Event: + """ + ``unsharded_grads`` owns the references to the gradients computed by + autograd, so clearing the list frees the gradients. + """ + grad_dtypes = {grad.dtype for grad in unsharded_grads} + if len(grad_dtypes) != 1: + # Check this at runtime since it could be a real runtime error if e.g. + # fp8 weights do not produce the correct higher precision gradients + _raise_assert_with_print( + f"FSDP reduce-scatter expects uniform gradient dtype but got {grad_dtypes}" + ) + grad_dtype = unsharded_grads[0].dtype + reduce_dtype = reduce_dtype or grad_dtype + world_size = group.size() + padded_unsharded_sizes = tuple( + _get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads + ) + reduce_scatter_input_numel = sum(s.numel() for s in padded_unsharded_sizes) + reduce_scatter_output_numel = reduce_scatter_input_numel // world_size + current_stream = torch.cuda.current_stream() + reduce_scatter_stream.wait_stream(current_stream) + with torch.cuda.stream(reduce_scatter_stream): + reduce_scatter_input = torch.empty( + (reduce_scatter_input_numel,), dtype=reduce_dtype, device=device + ) + foreach_reduce_scatter_copy_in( + unsharded_grads, reduce_scatter_input, world_size + ) + # Only after the copy-in finishes can we free the gradients, which were + # computed in the default stream + current_stream.wait_stream(reduce_scatter_stream) + unsharded_grads.clear() + reduce_scatter_output = reduce_scatter_input.new_empty( + (reduce_scatter_output_numel,) + ) + _reduce_scatter( + reduce_scatter_output, reduce_scatter_input, group, divide_factors + ) + reduce_scatter_output = _to_dtype_if_needed(reduce_scatter_output, orig_dtype) + # - View out and accumulate + flat_grad_offset = 0 # [0, reduce_scatter_output_numel - 1] + for padded_unsharded_size, fsdp_param in zip( + padded_unsharded_sizes, fsdp_params + ): + new_sharded_grad = torch.as_strided( + reduce_scatter_output, + size=fsdp_param.sharded_size, + stride=fsdp_param.contiguous_sharded_stride, + storage_offset=flat_grad_offset, + ) + to_accumulate_grad = fsdp_param.sharded_param.grad is not None + new_sharded_dtensor_grad = fsdp_param.to_sharded_dtensor(new_sharded_grad) + if to_accumulate_grad: + fsdp_param.sharded_param.grad += new_sharded_dtensor_grad + else: + fsdp_param.sharded_param.grad = new_sharded_dtensor_grad + padded_sharded_numel = padded_unsharded_size.numel() // world_size + flat_grad_offset += padded_sharded_numel + reduce_scatter_view_out_event = reduce_scatter_stream.record_event() + # The RS output is allocated in the RS stream and used in the default + # stream (for optimizer). To ensure its memory is not reused for later + # RSs, we do not need extra synchronization since the sharded parameters + # hold refs through the end of backward. + return reduce_scatter_view_out_event + + +def foreach_reduce_scatter_copy_in( + unsharded_grads: List[torch.Tensor], + reduce_scatter_input: torch.Tensor, + world_size: int, +) -> None: + grad_views: List[torch.Tensor] = [] + grads_to_copy: List[torch.Tensor] = [] + padded_grad_slices: List[torch.Tensor] = [] + for grad in unsharded_grads: + grad_size = grad.size() + dim0_padded_size = _get_dim0_padded_size(grad_size, world_size) + if dim0_padded_size != grad_size: + padded_grad = grad.new_empty(dim0_padded_size) + padded_grad_slices.append(padded_grad[: grad.size(0)]) + grads_to_copy.append(grad) + grad = padded_grad + grad_views.append(grad.view(world_size, -1)) + if padded_grad_slices: + torch._foreach_copy_(padded_grad_slices, grads_to_copy) + torch.cat(grad_views, dim=-1, out=reduce_scatter_input.view(world_size, -1)) + + +def _reduce_scatter( + output: torch.Tensor, + input: torch.Tensor, + group: dist.ProcessGroup, + divide_factors: Optional[Tuple[float, float]], +) -> None: + if divide_factors: + predivide_factor, postdivide_factor = divide_factors + _div_if_needed(input, predivide_factor) + dist.reduce_scatter_tensor(output, input, group=group) + _div_if_needed(output, postdivide_factor) + else: + # Using NCCL's reduce-scatter to do the division by world size saves + # extra memory read/write from a separate division kernel + dist.reduce_scatter_tensor(output, input, op=ReduceOp.AVG, group=group) + + +def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None: + if div_factor > 1: + tensor.div_(div_factor) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py new file mode 100644 index 0000000000000000000000000000000000000000..94b0249177697d99ce4ee13355cdd0cdb4b1de27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py @@ -0,0 +1,151 @@ +import math +import traceback + +from dataclasses import dataclass +from enum import auto, Enum +from typing import Any, cast, List, Optional, Tuple + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import _get_registry +from torch.distributed._tensor import DeviceMesh, DTensor, Placement + + +@dataclass +class DataParallelMeshInfo: + mesh: DeviceMesh + shard_mesh_dim: Optional[int] = None + replicate_mesh_dim: Optional[int] = None + + def __post_init__(self): + if self.shard_mesh_dim is None and self.replicate_mesh_dim is None: + raise AssertionError( + "At least one of shard_mesh_dim and replicate_mesh_dim must not be None" + ) + + +@dataclass +class FSDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.shard_mesh_dim is None: + raise AssertionError("Expects non-None shard_mesh_dim") + self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim) + self.shard_process_group = cast( + dist.ProcessGroup, self.mesh.get_group(self.shard_mesh_dim) + ) + self.shard_mesh_rank: int = self.shard_process_group.rank() + + +@dataclass +class DDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.replicate_mesh_dim is None: + raise AssertionError("Expects non-None replicate_mesh_dim") + self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim) + self.replicate_process_group = cast( + dist.ProcessGroup, self.mesh.get_group(self.replicate_mesh_dim) + ) + self.replicate_mesh_rank: int = self.replicate_process_group.rank() + + +@dataclass +class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo): + def __post_init__(self): + # Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo` + super().__post_init__() + + +class TrainingState(Enum): + """Describes the training state of one FSDP state / parameter group.""" + + # Transition to forward starting pre-forward until post-forward + FORWARD = auto() + # Transition to pre-backward when unsharding in backward + PRE_BACKWARD = auto() + # Transition to post-backward when resharding and reducing gradients + POST_BACKWARD = auto() + # Idle before/after forward or before pre-backward/after post-backward + IDLE = auto() + + +def _raise_assert_with_print(*args: Any, **kwargs: Any): + print(f"[Rank {dist.get_rank()}] ", end="") + print(*args, **kwargs) + traceback.print_stack() + raise AssertionError(*args, **kwargs) + + +def _is_composable_with_fsdp(module: nn.Module) -> bool: + registry = _get_registry(module) + if registry is None: + return True + # Registry keys by function name + return "replicate" not in registry + + +def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size: + padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor + return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:]) + + +def _chunk_with_empty( + tensor: torch.Tensor, num_chunks: int, dim: int +) -> List[torch.Tensor]: + chunks = list(torch.chunk(tensor, num_chunks, dim=dim)) + while len(chunks) < num_chunks: + chunks.append(chunks[0].new_empty(0)) + return chunks + + +def _get_dim0_chunked_size( + chunk: torch.Tensor, unchunked_size: torch.Size +) -> torch.Size: + if chunk.numel() > 0: + return chunk.size() + # For 0 numel, we need to preserve trailing dims for DTensor APIs + return cast(torch.Size, torch.Size([0]) + unchunked_size[1:]) + + +def _from_local_no_grad( + local_tensor: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + global_size: torch.Size, + global_stride: Tuple[int, ...], +) -> DTensor: + """ + This method is similar to ``DTensor.from_local()`` except it avoids some + CPU overhead by avoiding default args and not being differentiable. + """ + return DTensor( + # Use the local tensor directly instead of constructing a new tensor + # variable, e.g. with `view_as()`, since this is not differentiable + local_tensor, + device_mesh, + placements, + shape=global_size, + dtype=local_tensor.dtype, + requires_grad=local_tensor.requires_grad, + stride=global_stride, + ) + + +def _to_dtype_if_needed( + tensor: torch.Tensor, dtype: Optional[torch.dtype] +) -> torch.Tensor: + if dtype is not None and tensor.dtype != dtype: + return tensor.to(dtype) + return tensor + + +def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor: + if ( + not isinstance(x, torch.Tensor) + or not torch.is_floating_point(x) + or x.dtype == dtype + ): + return x + return x.to(dtype) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a33dfe7b28e9df8abc43c4b1e4bec32036af4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py @@ -0,0 +1,144 @@ +import itertools +from typing import List, Optional, Set, Tuple, Union + +import torch +import torch.distributed as dist +import torch.nn as nn + +from torch.distributed._tensor import DeviceMesh, DTensor, init_device_mesh +from torch.distributed.device_mesh import _get_device_handle +from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo +from ._fsdp_state import _get_module_fsdp_state + + +def _get_post_forward_mesh_info( + reshard_after_forward: Union[bool, int], mesh_info: FSDPMeshInfo +) -> Optional[FSDPMeshInfo]: + shard_mesh_size = mesh_info.shard_mesh_size + if not isinstance(reshard_after_forward, (bool, int)): + raise ValueError( + "reshard_after_forward should be a bool or an int representing the " + f"group size to reshard to, not {reshard_after_forward}" + ) + # NOTE: `isinstance(False, int)` returns `True`. + if not isinstance(reshard_after_forward, bool) and isinstance( + reshard_after_forward, int + ): + if ( + reshard_after_forward < 1 + or reshard_after_forward > shard_mesh_size + or shard_mesh_size % reshard_after_forward != 0 + ): + raise ValueError( + "If passing reshard_after_forward as an int, it should be a " + f"factor of {shard_mesh_size}, not {reshard_after_forward}" + ) + elif reshard_after_forward == 1: + reshard_after_forward = False + elif reshard_after_forward == shard_mesh_size: + reshard_after_forward = True + post_forward_mesh_info = None + if reshard_after_forward is True: + post_forward_mesh_info = mesh_info + elif reshard_after_forward is not False: # int case + # For HSDP, we can flatten the two replicate dims into the 0th dim + post_forward_mesh_tensor = mesh_info.mesh.mesh.view(-1, reshard_after_forward) + post_forward_mesh = DeviceMesh( + mesh_info.mesh.device_type, post_forward_mesh_tensor + ) + post_forward_mesh_info = HSDPMeshInfo( + post_forward_mesh, shard_mesh_dim=1, replicate_mesh_dim=0 + ) + return post_forward_mesh_info + + +def _init_default_fully_shard_mesh() -> DeviceMesh: + """Default to global CUDA mesh if possible else global CPU mesh.""" + if not dist.distributed_c10d.is_initialized(): + dist.distributed_c10d.init_process_group() + default_pg = dist.distributed_c10d._get_default_group() + device_type = "cuda" if torch.cuda.is_available() else "cpu" + mesh = init_device_mesh(device_type, mesh_shape=(default_pg.size(),)) + return mesh + + +def _get_device_from_mesh(mesh: DeviceMesh) -> torch.device: + if mesh.device_type == "cpu": + return torch.device("cpu") + device_handle = _get_device_handle(mesh.device_type) + return torch.device(mesh.device_type, device_handle.current_device()) + + +def _get_managed_modules(root_module: nn.Module) -> List[nn.Module]: + modules: List[nn.Module] = [] + # Track visisted modules to avoid visiting shared modules multiple times + visited_modules: Set[nn.Module] = set() + + def dfs(module: nn.Module) -> None: + """ + Runs a DFS to collect managed modules, not recursing into modules with + a non-composable API or ``fully_shard`` already applied. + """ + if not _is_composable_with_fsdp(module): + return + elif module is not root_module and _get_module_fsdp_state(module) is not None: + return # nested `fully_shard` module + visited_modules.add(module) + for submodule in module.children(): + if submodule not in visited_modules: + dfs(submodule) + modules.append(module) + + dfs(root_module) + return modules + + +def _get_managed_states( + modules: List[nn.Module], +) -> Tuple[List[nn.Parameter], List[torch.Tensor]]: + params: List[nn.Parameter] = [] + buffers: List[torch.Tensor] = [] + # Track visited parameters/buffers to avoid visiting shared parameters and + # buffers multiple times + visited_params: Set[nn.Parameter] = set() + visited_buffers: Set[torch.Tensor] = set() + for module in modules: + for param in module.parameters(recurse=False): + if param not in visited_params: + params.append(param) + visited_params.add(param) + for buffer in module.buffers(recurse=False): + if buffer not in visited_buffers: + buffers.append(buffer) + visited_buffers.add(buffer) + return params, buffers + + +def _move_states_to_device( + params: List[nn.Parameter], + buffers: List[torch.Tensor], + device: torch.device, + mesh_info: FSDPMeshInfo, +) -> None: + """ + We have FSDP move states to device for simpler and faster initialization + since FSDP almost always uses CUDA for training. We move parameters/buffers + rather than modules since modules to support ignoring parameters/buffers in + the future. + """ + # TODO: De-duplicate with `_apply` after `swap_tensors` path lands: + # https://github.com/pytorch/pytorch/issues/115792 + for tensor in itertools.chain(params, buffers): + if tensor.device == device or tensor.device.type == "meta": + # Keep meta-device tensors on meta device for deferred init + continue + if isinstance(tensor, DTensor): + if (dtensor_mesh_type := tensor._spec.mesh.device_type) != device.type: + raise ValueError( + "Requires DTensor to have mesh of the same type as the FSDP mesh " + f"but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP" + ) + raise AssertionError( + f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}" + ) + tensor.data = tensor.to(device) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py new file mode 100644 index 0000000000000000000000000000000000000000..0141b8a4f461a239e290ab7a053a6048730367e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py @@ -0,0 +1,438 @@ +from dataclasses import dataclass, field +from enum import auto, Enum +from typing import cast, List, Optional, Tuple + +import torch +import torch.nn as nn + +from torch._prims_common import make_contiguous_strides_for +from torch.distributed._functional_collectives import AsyncCollectiveTensor +from torch.distributed._tensor import DTensor, Placement, Replicate, Shard +from torch.distributed._tensor.device_mesh import _mesh_resources +from torch.distributed._tensor.placement_types import DTensorSpec +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import ( + _chunk_with_empty, + _from_local_no_grad, + _get_dim0_chunked_size, + _raise_assert_with_print, + _to_dtype_if_needed, + FSDPMeshInfo, + HSDPMeshInfo, +) + +""" +[Note: FSDP tensors] +FSDP considers the following tensors: +- Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one + on the module when applying FSDP +- Sharded parameter: sharding the original parameter on dim-0 as a DTensor + over the main mesh +- All-gather input: the ``torch.Tensor`` passed to all-gather, derived from the + sharded parameter +- All-gather output: the ``torch.Tensor`` resulting from all-gathering the + all-gather input +- Unsharded parameter: parameter used for forward/backward computation, derived + from the all-gather output; autograd leaf + +We define these tensors to describe the general framework that can accomodate +extensions, where: +- all-gather-input = pre-all-gather-transform(sharded-parameter) +- unsharded-parameter = post-all-gather-transform(all-gather-output) + +For the default ``torch.Tensor`` case, the sharded parameter and all-gather +input share the same underlying tensor data, meaning that they can be thought +of as the same tensors. The same applies for the all-gather output and +unsharded parameter. For non-``torch.Tensor`` extensions, these equivalences +may no longer hold due to the pre/post-all-gather transforms. + +[Note: FSDP and autograd] +FSDP dynamically frees and allocates the unsharded parameter. Since autograd +can pack a reference to it or a view to save for backward, we use storage +resizing to implement the freeing/allocation since that preserves the aliasing. +This implies that we construct the unsharded parameter object once and write to +it in-place thereafter. For the default ``torch.Tensor` original parameter +case, the all-gather output and unsharded parameter share the same +data, so we use storage resizing on the all-gather output. +""" + + +class ShardedState(Enum): + """ + - ``SHARDED``: The sharded parameter is registered to the module. It is the + only contributor to parameter memory. + - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a + smaller world size. Since this data should not be used for computation, + we do not register it to the module. Users should reshard the module + before any in-place modifications. Both it and the sharded parameter + contribute to parameter memory. + - ``UNSHARDED``: The unsharded parameter is registered to the module. Both + it and the sharded parameter contribute to parameter memory. + """ + + SHARDED = auto() + SHARDED_POST_FORWARD = auto() + UNSHARDED = auto() + + +@dataclass +class ParamModuleInfo: + """ + For a parameter, this stores the module and the parameter name to be able + to do a parameter swap via ``setattr(module, param_name, ...)`` or to get + the parameter via ``getattr(module, param_name)``. We additionally save + shared modules and shared parameter names to update them accordingly. + """ + + # Parameter names are unprefixed, e.g. "weight", not "lin.weight" + module: nn.Module + param_name: str + shared_modules: List[nn.Module] = field(default_factory=list) + shared_param_names: List[str] = field(default_factory=list) + + +class FSDPParam: + """ + This class manages a parameter with FSDP or FSDP variants applied, + implementing dim-0 per-parameter sharding. + """ + + orig_dtype: torch.dtype + param_dtype: Optional[torch.dtype] + reduce_dtype: Optional[torch.dtype] + _orig_size: torch.Size # ND + _contiguous_orig_stride: Tuple[int, ...] + sharded_size: torch.Size # ND + contiguous_sharded_stride: Tuple[int, ...] + padded_sharded_param_size: torch.Size # ND + sharded_post_forward_size: torch.Size # ND + contiguous_sharded_post_forward_stride: Tuple[int, ...] + _sharded_param_data: torch.Tensor # 1D + sharded_param: nn.Parameter # ND + _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D + _sharded_post_forward_param: Optional[nn.Parameter] # ND + _unsharded_param: nn.Parameter # ND + _global_placements: Tuple[Placement, ...] + _global_size: torch.Size + _global_stride: Tuple[int, ...] + # DTensor attributes (only defined for DTensor `param`): + _tp_spec: DTensorSpec + + def __init__( + self, + param: nn.Parameter, + module_info: ParamModuleInfo, + mesh_info: FSDPMeshInfo, + post_forward_mesh_info: Optional[FSDPMeshInfo], + device: torch.device, + mp_policy: MixedPrecisionPolicy, + ): + self._module_info: ParamModuleInfo = module_info + self.mesh_info = mesh_info + self.post_forward_mesh_info = post_forward_mesh_info + self.device = device + self._init_sharded_param(param, device) + if self.post_forward_mesh_info: + self._init_sharded_post_forward_param_metadata(param) + self.all_gather_output = torch.empty(0) + self._param_fqn: Optional[str] = None # prefixed from root module + + @torch.no_grad() + def _init_sharded_param(self, param: nn.Parameter, device: torch.device): + if param.device != device and param.device.type != "meta": + raise AssertionError( + f"Expects the parameter to already be moved to device {device} but got {param.device}" + ) + # TODO: Replace the sharded DTensor parameter construction logic with + # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101 + # TODO: Simplify the following sharded parameter padding logic after + # https://github.com/pytorch/pytorch/issues/113045 + self.is_dtensor = isinstance(param, DTensor) + if self.is_dtensor: + self._tp_spec = cast(DTensor, param)._spec + if ( + self.mesh_info.shard_mesh_dim != 0 + or self.mesh_info.replicate_mesh_dim is not None + ): + raise NotImplementedError("Using TP with HSDP is not supported") + dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh) + dp_global_mesh = _mesh_resources.get_parent_mesh(dp_mesh) + tp_global_mesh = _mesh_resources.get_parent_mesh(tp_mesh) + if dp_global_mesh != tp_global_mesh or ( + dp_global_mesh is None or tp_global_mesh is None + ): + raise AssertionError( + "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n" + f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}" + ) + self._global_mesh = dp_global_mesh + if len(self._tp_spec.placements) != 1: + raise NotImplementedError( + f"FSDP only supports 1D TP, not {self._tp_spec.placements}" + ) + global_placements: List[Placement] = [Replicate(), Replicate()] + global_dp_mesh_dim = _mesh_resources.get_parent_mesh_dim(dp_mesh) + global_tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(tp_mesh) + assert global_dp_mesh_dim is not None # mypy + assert global_tp_mesh_dim is not None # mypy + # TODO: Hard code FSDP + TP; need to support HSDP + TP + global_placements[global_dp_mesh_dim] = Shard(0) + global_placements[global_tp_mesh_dim] = self._tp_spec.placements[0] + self._global_placements = tuple(global_placements) + self._global_size = param.size() + self._global_stride = param.stride() + param_data = cast(DTensor, param)._local_tensor + else: + self._global_mesh = self.mesh_info.mesh + self._global_placements = (Shard(0),) + self._global_size = param.size() + self._global_stride = param.stride() + param_data = param + self._orig_size = param_data.size() + self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size) + shard_rank = self.mesh_info.shard_mesh_rank + shard_world_size = self.mesh_info.shard_mesh_size + chunks = _chunk_with_empty(param_data, shard_world_size, dim=0) + sharded_param = chunks[shard_rank] + self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size()) + self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size) + padded_sharded_size = chunks[0].size() # 0th always padded + padded_sharded_param = param_data.new_zeros(padded_sharded_size) + self.padded_sharded_param_size = padded_sharded_param.size() + if sharded_param.numel() > 0: + padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param) + self._sharded_param_data = padded_sharded_param.view(-1) + self.sharded_param = nn.Parameter( + self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)]) + ) + self.sharded_param.requires_grad_(param.requires_grad) + # Let `param_data` be freed normally when its ref count reaches 0 when + # the `fully_shard` call returns to allow provided parameters to alias + self._setattr_on_modules(self.sharded_param) + self.sharded_state = ShardedState.SHARDED + + def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None: + mesh_info = self.post_forward_mesh_info + assert mesh_info is not None # mypy + param_data = param._local_tensor if isinstance(param, DTensor) else param + chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0) + self.sharded_post_forward_size = _get_dim0_chunked_size( + chunks[mesh_info.shard_mesh_rank], param_data.size() + ) + self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for( + self.sharded_post_forward_size + ) + + def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy): + param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype) + self.orig_dtype = self.sharded_param.dtype + # Clamp `param_dtype` to `None` if no casting is required + if param_dtype == self.orig_dtype: + param_dtype = None + self.param_dtype = param_dtype + self.reduce_dtype = reduce_dtype + # None indicates that the mixed precision is not enabled + + def init_all_gather_output( + self, + all_gather_input_numel: int, + world_size: int, + dtype: torch.dtype, + device: torch.device, + ): + if self.all_gather_output.numel() > 0: + return # already initialized + all_gather_output_size = torch.Size([all_gather_input_numel * world_size]) + self.all_gather_output = torch.empty( + all_gather_output_size, dtype=dtype, device=device + ) + + def init_unsharded_param(self): + if hasattr(self, "_unsharded_param"): + return # already initialized + # For the default path (no post-all-gather), the all-gather output + # gives the unsharded parameter data directly + unsharded_param = torch.as_strided( + self.all_gather_output, + self._orig_size, + self._contiguous_orig_stride, + storage_offset=0, + ) + if self.is_dtensor: + unsharded_param = _from_local_no_grad( + unsharded_param, + self._tp_spec.mesh, + self._tp_spec.placements, + self._global_size, + self._global_stride, + ) + self._unsharded_param = nn.Parameter(unsharded_param) + self._unsharded_param.requires_grad_(self.sharded_param.requires_grad) + + def to_sharded(self) -> None: + self._setattr_on_modules(self.sharded_param) + self.free_all_gather_output() + self.sharded_state = ShardedState.SHARDED + + def to_sharded_post_forward(self) -> None: + if self.is_dtensor: + raise NotImplementedError( + "Resharding to smaller mesh with TP is not supported yet" + ) + self._assert_in_states(ShardedState.UNSHARDED) + assert self.post_forward_mesh_info is not None # mypy + shard_world_size = self.post_forward_mesh_info.shard_mesh_size + if (numel := self.all_gather_output.numel()) % shard_world_size != 0: + _raise_assert_with_print( + f"All-gather output size ({numel}) must be divisible by the shard " + f"world size ({shard_world_size})" + ) + shard_rank = self.post_forward_mesh_info.shard_mesh_rank + sharded_numel = numel // shard_world_size + self._sharded_post_forward_param_data = ( + self.all_gather_output.narrow(0, sharded_numel * shard_rank, sharded_numel) + ).clone() # clone to be able to free all-gather output + sharded_post_forward_tensor = torch.as_strided( + self._sharded_post_forward_param_data, + size=self.sharded_post_forward_size, + stride=self.contiguous_sharded_post_forward_stride, + storage_offset=0, + ) + self._sharded_post_forward_param = nn.Parameter( + self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor) + ) + self._setattr_on_modules(self._sharded_post_forward_param) + self.free_all_gather_output() + self.sharded_state = ShardedState.SHARDED_POST_FORWARD + + def to_unsharded(self) -> None: + # Assume that the data has been allocated and all-gathered + set_requires_grad_if_needed(self.sharded_param, self._unsharded_param) + self._setattr_on_modules(self._unsharded_param) + if self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + # The data is allocated in the default stream via the post-forward + # reshard and must be kept alive for the next all-gather copy-in. + # Since we call this method after the copy-out, the data's lifetime + # is ensured without further synchronization. + self._sharded_post_forward_param = None + self._sharded_post_forward_param_data = None # free + self.sharded_state = ShardedState.UNSHARDED + + def _setattr_on_modules(self, param: nn.Parameter) -> None: + unsafe_setattr_param( + self._module_info.module, self._module_info.param_name, param + ) + for shared_module, shared_param_name in zip( + self._module_info.shared_modules, self._module_info.shared_param_names + ): + unsafe_setattr_param(shared_module, shared_param_name, param) + + def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor: + """ + Converts a local tensor representing either the sharded parameter or + sharded gradient to DTensor. + """ + if tensor.shape != self.sharded_size: + _raise_assert_with_print( + f"Expects size {self.sharded_size} but got {tensor.shape}" + ) + return _from_local_no_grad( + tensor, + self._global_mesh, + self._global_placements, + self._global_size, + self._global_stride, + ) + + def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor: + if tensor.shape != self.sharded_post_forward_size: + _raise_assert_with_print( + f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}" + ) + assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo) + # TODO: Prefer this DTensor to be read-only and generalize the + # placement once we support TP. + return _from_local_no_grad( + tensor, + self.post_forward_mesh_info.mesh, + (Replicate(), Shard(0)), + self._global_size, + self._global_stride, + ) + + def alloc_all_gather_output(self) -> None: + unsafe_alloc_storage(self.all_gather_output) + + def free_all_gather_output(self) -> None: + unsafe_free_storage(self.all_gather_output) + + @property + def all_gather_input(self) -> torch.Tensor: # 1D + self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD) + if self.sharded_state == ShardedState.SHARDED: + return _to_dtype_if_needed(self._sharded_param_data, self.param_dtype) + elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + return _to_dtype_if_needed( + cast(torch.Tensor, self._sharded_post_forward_param_data), + self.param_dtype, + ) + return torch.empty(0) # mypy + + @property + def unsharded_param(self) -> nn.Parameter: # ND + self._assert_in_states(ShardedState.UNSHARDED) + return self._unsharded_param + + @property + def unsharded_grad_data(self) -> torch.Tensor: + grad = self.unsharded_param.grad + assert grad is not None, "Expects unsharded_param.grad to not be None" + return self._get_grad_inner_tensor(grad) + + def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor: + if self.is_dtensor: + if isinstance(grad, AsyncCollectiveTensor): + grad = grad.wait() + grad = cast(DTensor, grad)._local_tensor + return grad + + def _assert_in_states(self, *states: ShardedState) -> None: + if self.sharded_state not in states: + _raise_assert_with_print( + f"Expects to be in one of {states}, not {self.sharded_state}" + ) + + +# NOTE: Unsafe here refers to not checking whether the storage is already +# allocated or freed, respectively. We should be safe to use them since we +# explicitly manage the state transition. +def unsafe_alloc_storage(tensor: torch.Tensor) -> None: + # Skip the already-allocated check and assume that `tensor` is the base + # tensor to save CPU overhead + tensor.untyped_storage().resize_(tensor.numel() * tensor.itemsize) + + +def unsafe_free_storage(tensor: torch.Tensor) -> None: + # Skip the already-freed check to save CPU overhead + tensor.untyped_storage().resize_(0) + + +# NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial +# CPU overhead, if the module did not override it. For FSDP, we know we do not +# need those checks when transitioning between sharded/unsharded parameters. +def unsafe_setattr_param( + module: nn.Module, param_name: str, param: nn.Parameter +) -> None: + if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__: + module._parameters[param_name] = param + else: # slow path + setattr(module, param_name, param) + + +def set_requires_grad_if_needed( + src_tensor: torch.Tensor, dst_tensor: torch.Tensor +) -> None: + # Only call `requires_grad_` if needed to avoid the Python <> C++ context + # switch overhead + if src_tensor.requires_grad != dst_tensor.requires_grad: + dst_tensor.requires_grad_(src_tensor.requires_grad) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py new file mode 100644 index 0000000000000000000000000000000000000000..88bfe0b86f373425bb9253538740875cc7cd0beb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py @@ -0,0 +1,506 @@ +import contextlib + +from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple + +import torch +import torch.distributed as dist +import torch.nn as nn + +from torch.autograd.graph import Node +from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates +from torch.utils._pytree import tree_flatten, tree_unflatten +from torch.utils.hooks import RemovableHandle +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_collectives import ( + AllGatherResult, + foreach_all_gather, + foreach_all_gather_copy_out, + foreach_reduce_scatter, +) +from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo, TrainingState +from ._fsdp_param import FSDPParam, ParamModuleInfo, ShardedState + +_ModuleToHandleDict = Dict[nn.Module, RemovableHandle] # for state dict + + +""" +[Note: Overlapping all-gather copy-in and all-gather] +For implicit forward prefetching, we want to overlap the next copy-in with the +current all-gather. We do so using a separate copy-in stream. However, since +we have the all-gather input as a view into the output, we must make sure to +copy into different memory from the current all-gather's output. Thus, we keep +a reference to the current all-gather's output and have the next FSDP parameter +group free it after its copy-in. Finally, we have the last FSDP state flush the +reference to avoid holding onto memory after forward. +""" + + +class FSDPCommContext: + """This has the communication state shared across FSDP states/parameter groups.""" + + def init(self): + # Setting the all-gather/reduce-scatter streams to be higher priority + # can help avoid some issues where their copies in/out are delayed and + # block computation + high_priority = -1 + # All-gather state and copy-in stream allow overlapping the next + # copy-in with the current all-gather in forward; copy-in overlaps with + # reduce-scatter in backward without the separate copy-in stream + self.all_gather_copy_in_stream = torch.cuda.Stream(priority=high_priority) + self.all_gather_state: Optional[AllGatherState] = None + # All-gather stream allows overlapping next all-gather with current + # forward compute + self.all_gather_stream = torch.cuda.Stream(priority=high_priority) + # Reduce-scatter stream gives separate execution "thread" for post- + # backward logic like pre/post-gradient division and reduce-scatter + self.reduce_scatter_stream = torch.cuda.Stream(priority=high_priority) + # Post-forward order for explicit backward prefetching + self.post_forward_order: List[FSDPParamGroup] = [] # will cause ref cycles + + def get_all_gather_streams( + self, training_state: TrainingState + ) -> Tuple[torch.cuda.Stream, torch.cuda.Stream]: + if training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD): + # Use separate streams for implicit prefetching + return self.all_gather_copy_in_stream, self.all_gather_stream + current_stream = torch.cuda.current_stream() + return current_stream, current_stream + + +# See [Note: Overlapping all-gather copy-in and all-gather] +class AllGatherState(NamedTuple): + all_gather_result: AllGatherResult + event: torch.cuda.Event # all-gather copy-out + + +class FSDPParamGroup: + """This class represents a parameter group to communicate together.""" + + _orig_dtype: torch.dtype + _reduce_dtype: Optional[torch.dtype] + + def __init__( + self, + params: List[nn.Parameter], + module: nn.Module, + mesh_info: FSDPMeshInfo, + post_forward_mesh_info: Optional[FSDPMeshInfo], + device: torch.device, + mp_policy: MixedPrecisionPolicy, + ): + self.module = module # permit ref cycle because 1:1 lifetime + param_module_infos = _get_param_module_infos(params, module) + self.fsdp_params = [ + FSDPParam( + param, module_info, mesh_info, post_forward_mesh_info, device, mp_policy + ) + for param, module_info in zip(params, param_module_infos) + ] + self.mesh_info = mesh_info + self.post_forward_mesh_info = post_forward_mesh_info + self.device = device + self.mp_policy = mp_policy + self._training_state = TrainingState.IDLE + # Group's sharded state always matches its parameters' sharded states + self._sharded_state = ShardedState.SHARDED + self._module_fqn: Optional[str] = None # prefixed from root module + + # - Hook state + self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {} + self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {} + + # - Communication and communication/computation overlap + self.comm_ctx = FSDPCommContext() + # Group's indices in the shared post-forward order + self._post_forward_indices: List[int] = [] + # Used to avoid mistargeted backward prefetches when the module is used + # in forward but not in backward: for each forward, we record a tuple + # of the output's grad fns and later query the autograd engine whether + # any grad fn will execute in the current backward to know to prefetch. + self.all_forward_output_grad_fns: Set[Tuple[Node, ...]] = set() + # Whether to reduce-scatter or all-reduce gradients, respectively + # (can be set to false to save communication during gradient + # accumulation); all-reducing without reduce-scatter is disallowed + self.reduce_scatter_grads: bool = True + self.all_reduce_grads: bool = True + + # - CUDA events for stream synchronization + # Holds the all-gather output buffer, sync objects, and metadata + self._all_gather_result: Optional[AllGatherResult] = None + # Holds the reduce-scatter view-out CUDA event that marks the end of + # the group's post-backward (e.g. reduce-scatter and div), which should + # be waited on at the end of backward + self._reduce_scatter_view_out_event: Optional[torch.cuda.Event] = None + # Holds the reshard-after-forward CUDA event when resharding to a + # different world size, which should be waited on in the next unshard + self._reshard_after_forward_event: Optional[torch.cuda.Event] = None + + # Initialization # + def _init_mp_dtypes(self) -> None: + for fsdp_param in self.fsdp_params: + fsdp_param.init_dtype_attrs(self.mp_policy) + orig_dtypes = {fsdp_param.orig_dtype for fsdp_param in self.fsdp_params} + if len(orig_dtypes) != 1: + # This can be relaxed if we copy-out for the reduce-scatter + raise AssertionError( + f"FSDP expects uniform original parameter dtype but got {orig_dtypes}" + ) + self._orig_dtype = next(iter(orig_dtypes)) + reduce_dtypes = {fsdp_param.reduce_dtype for fsdp_param in self.fsdp_params} + if len(reduce_dtypes) != 1: + # This can be relaxed if we issue one reduce-scatter per reduce + # dtype (but we would need a way for users to specify multiple + # reduce dtypes) + raise AssertionError( + f"FSDP expects uniform reduce dtype but got {reduce_dtypes}" + ) + self._reduce_dtype = next(iter(reduce_dtypes)) + + def _init_grad_divide_factors(self): + data_parallel_world_size = 1 + data_parallel_world_size *= self.mesh_info.shard_mesh_size + if isinstance(self.mesh_info, HSDPMeshInfo): + data_parallel_world_size *= self.mesh_info.replicate_mesh_size + if self._reduce_dtype == torch.float32: + # Use NCCL's AVG op to divide after reduction since it is more + # performant and fp32 has sufficient precision + self._grad_divide_factors: Optional[Tuple[float, float]] = None + return + # For N data parallel workers, each worker computes g_i, and they + # collectively reduce (g_1 + ... + g_N) / N. To avoid overflow and + # underflow, we divide by ~sqrt(N) before and after the reduction. + factor: int = 1 + while ( + data_parallel_world_size % factor == 0 + and data_parallel_world_size / factor > factor + ): + factor *= 2 + factor = float(factor) + self._grad_divide_factors = (factor, data_parallel_world_size / factor) + + def lazy_init(self): + param_names_on_meta = [ + fsdp_param._param_fqn + for fsdp_param in self.fsdp_params + if fsdp_param.sharded_param.device.type == "meta" + ] + if param_names_on_meta: + raise RuntimeError( + "FSDP parameters should be materialized from meta device before training, " + f"but the following were still on meta device: {param_names_on_meta}\n" + "For example, call module.to_empty(device) to materialize to device and " + "call module.reset_parameters() on each module to initialize values." + ) + # Initialize mixed precision attributes lazily in case the user changes + # the parameter dtypes after construction time but before forward + self._init_mp_dtypes() + self._init_grad_divide_factors() + self._register_state_dict_hooks() + + # Runtime # + def unshard(self, async_op: bool = False): + if self._all_gather_result is not None: # already called, pending wait + return + if self.is_unsharded: + return # no-op + if self._reshard_after_forward_event is not None: + # Resharded parameter data is allocated in the default stream and + # used in the all-gather streams + self._wait_all_gather_streams_on_event(self._reshard_after_forward_event) + self._reshard_after_forward_event = None + self._all_gather_result = foreach_all_gather( + self.fsdp_params, + self._all_gather_process_group, + async_op, + *self.comm_ctx.get_all_gather_streams(self._training_state), + self.device, + ) + + def wait_for_unshard(self): + """ + 1. In forward with implict prefetching, to overlap the current copy-out + with the next all-gather, we save a reference to the current all-gather + result to free after the next copy-out. + 2. Otherwise (explicit prefetching or in backward), we free the + all-gather result immediately after the current copy-out since we can + already overlap the current copy-out with the previous reduce-scatter. + """ + if not self._all_gather_result: + return # no preceding unshard + if self._training_state == TrainingState.FORWARD: # implicit prefetch + if prev_all_gather_state := self.comm_ctx.all_gather_state: + self._wait_all_gather_streams_on_event(prev_all_gather_state.event) + self.comm_ctx.all_gather_state = None # free the all-gather result + foreach_all_gather_copy_out( + self._all_gather_result, self.fsdp_params, self._all_gather_process_group + ) + for fsdp_param in self.fsdp_params: + fsdp_param.init_unsharded_param() # no-op after 1st call + self._to_unsharded() + all_gather_copy_out_event = torch.cuda.Event() + all_gather_copy_out_event.record() + if self._training_state == TrainingState.FORWARD: + self.comm_ctx.all_gather_state = AllGatherState( + self._all_gather_result, all_gather_copy_out_event + ) + else: + self._wait_all_gather_streams_on_event(all_gather_copy_out_event) + self._all_gather_result = None # free unless saved in `all_gather_state` + + def _wait_all_gather_streams_on_event(self, event: torch.cuda.Event): + self.comm_ctx.all_gather_copy_in_stream.wait_event(event) + self.comm_ctx.all_gather_stream.wait_event(event) + + def reshard(self): + if self._training_state == TrainingState.FORWARD: + if not self._reshard_after_forward: + return + if self._use_post_forward_mesh: + self._to_sharded_post_forward() + self._reshard_after_forward_event = torch.cuda.Event() + self._reshard_after_forward_event.record() + return + self._to_sharded() + + def pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + with torch.profiler.record_function("FSDP::pre_forward"): + self._training_state = TrainingState.FORWARD + self.unshard() + self.wait_for_unshard() + args, kwargs = self._register_post_backward_hook(args, kwargs) + return args, kwargs + + def post_forward(self, module: nn.Module, input: Any, output: Any): + with torch.profiler.record_function("FSDP::post_forward"): + self.reshard() + self._record_post_forward() + self._training_state = TrainingState.IDLE + return output + + def _record_post_forward(self) -> None: + # Since a group has one pre-backward unshard for each forward call + # before the backward, we record each usage (with multiplicity) + post_forward_index = len(self.comm_ctx.post_forward_order) + self.comm_ctx.post_forward_order.append(self) + self._post_forward_indices.append(post_forward_index) + + def pre_backward(self, forward_grad_fns: Tuple[Any, ...], *unused: Any): + with torch.profiler.record_function("FSDP::pre_backward"): + self._training_state = TrainingState.PRE_BACKWARD + self.unshard() # no-op if prefetched + self.wait_for_unshard() + # Can be already removed if running multiple `backward`s + self.all_forward_output_grad_fns.discard(forward_grad_fns) + self._prefetch_unshard() + + def post_backward(self, *unused: Any): + self._training_state = TrainingState.POST_BACKWARD + with torch.profiler.record_function("FSDP::post_backward_reshard"): + if not self.reduce_scatter_grads: + self.reshard() + return + # Save the autograd-computed gradients before resharding to only + # access the unsharded parameters when their data is present + fsdp_params_with_grad: List[FSDPParam] = [] + unsharded_grads: List[torch.Tensor] = [] + for fsdp_param in self.fsdp_params: + if fsdp_param.unsharded_param.grad is not None: + fsdp_params_with_grad.append(fsdp_param) + unsharded_grads.append(fsdp_param.unsharded_grad_data) + fsdp_param.unsharded_param.grad = None + self.reshard() + if len(fsdp_params_with_grad) == 0: + return + with torch.profiler.record_function("FSDP::post_backward_reduce"): + self._reduce_scatter_view_out_event = foreach_reduce_scatter( + fsdp_params_with_grad, + unsharded_grads, + self._reduce_scatter_process_group, + self.comm_ctx.reduce_scatter_stream, + self._orig_dtype, + self._reduce_dtype, + self.device, + self._grad_divide_factors, + ) + + def finalize_backward(self): + if self._reduce_scatter_view_out_event is not None: + torch.cuda.current_stream().wait_event(self._reduce_scatter_view_out_event) + self._reduce_scatter_view_out_event = None + self._training_state = TrainingState.IDLE + self._post_forward_indices.clear() + self.all_forward_output_grad_fns.clear() + + def _prefetch_unshard(self): + if self._training_state == TrainingState.PRE_BACKWARD: + if not self._post_forward_indices: + # Can be cleared if running multiple `backward`s + return + curr_index = self._post_forward_indices.pop() + if (target_index := curr_index - 1) < 0: + return + target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index] + if any( + torch._C._will_engine_execute_node(grad_fn) # type: ignore[attr-defined] + for grad_fns in target_fsdp_param_group.all_forward_output_grad_fns + for grad_fn in grad_fns + ): + with torch.profiler.record_function( + "FSDP::backward_prefetch" + ), target_fsdp_param_group.use_training_state( + TrainingState.PRE_BACKWARD + ): + target_fsdp_param_group.unshard() + + # Utilities # + def _to_sharded(self): + if not self.is_sharded: + for fsdp_param in self.fsdp_params: + fsdp_param.to_sharded() + self._sharded_state = ShardedState.SHARDED + + def _to_sharded_post_forward(self): + if not self.is_sharded_post_forward: + for fsdp_param in self.fsdp_params: + fsdp_param.to_sharded_post_forward() + self._sharded_state = ShardedState.SHARDED_POST_FORWARD + + def _to_unsharded(self): + if not self.is_unsharded: + for fsdp_param in self.fsdp_params: + fsdp_param.to_unsharded() + self._sharded_state = ShardedState.UNSHARDED + + @property + def is_sharded(self) -> bool: + return self._sharded_state == ShardedState.SHARDED + + @property + def is_sharded_post_forward(self) -> bool: + return self._sharded_state == ShardedState.SHARDED_POST_FORWARD + + @property + def is_unsharded(self) -> bool: + return self._sharded_state == ShardedState.UNSHARDED + + @contextlib.contextmanager + def use_training_state(self, training_state: TrainingState): + old_training_state = self._training_state + self._training_state = training_state + try: + yield + finally: + self._training_state = old_training_state + + # Hook Registration # + def _register_post_backward_hook( + self, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + if not torch.is_grad_enabled(): + return args, kwargs + args_list, args_spec = tree_flatten(args) + kwargs_list, kwargs_spec = tree_flatten(kwargs) + args_kwargs_list = list(args_list) + list(kwargs_list) + inp_tensor_indices: List[int] = [] + inp_tensors: List[torch.Tensor] = [] + for i, obj in enumerate(args_kwargs_list): + if torch.is_tensor(obj) and obj.requires_grad: + inp_tensor_indices.append(i) + inp_tensors.append(obj) + if len(inp_tensors) == 0: + return args, kwargs # no tensors that require gradients + inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors) + for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors): + args_kwargs_list[inp_tensor_idx] = inp_tensor + args_list = args_kwargs_list[: len(args_list)] + kwargs_list = args_kwargs_list[len(args_list) :] + args = tree_unflatten(args_list, args_spec) + kwargs = tree_unflatten(kwargs_list, kwargs_spec) + return args, kwargs + + def _register_state_dict_hooks(self) -> None: + assert len(self._module_to_pre_save_state_dict_hook_handle) == 0 + assert len(self._module_to_pre_load_state_dict_hook_handle) == 0 + modules_with_fsdp_params: Set[nn.Module] = { + fsdp_param._module_info.module for fsdp_param in self.fsdp_params + } + + def to_sharded_hook(*args: Any, **kwargs: Any) -> None: + self._to_sharded() + + for module in modules_with_fsdp_params: + self._module_to_pre_save_state_dict_hook_handle[ + module + ] = module.register_state_dict_pre_hook(to_sharded_hook) + self._module_to_pre_load_state_dict_hook_handle[ + module + ] = module._register_load_state_dict_pre_hook(to_sharded_hook) + + # Properties # + @property + def _reshard_after_forward(self) -> bool: + return self.post_forward_mesh_info is not None + + @property + def _use_post_forward_mesh(self) -> bool: + return ( + self._reshard_after_forward + and self.mesh_info != self.post_forward_mesh_info + ) + + @property + def _all_gather_process_group(self) -> dist.ProcessGroup: + mesh_info = ( + cast(FSDPMeshInfo, self.post_forward_mesh_info) + if self.is_sharded_post_forward + else self.mesh_info + ) + assert isinstance(mesh_info, FSDPMeshInfo) + return mesh_info.shard_process_group + + @property + def _reduce_scatter_process_group(self) -> dist.ProcessGroup: + mesh_info = self.mesh_info + assert isinstance(mesh_info, FSDPMeshInfo) + return mesh_info.shard_process_group + + +def _get_param_module_infos( + params: List[nn.Parameter], module: nn.Module +) -> List[ParamModuleInfo]: + """ + Shared parameter: lin1.weight = lin2.weight + Shared module: mlp.lin1 = mlp.lin2 + We do not remove duplicates when traversing both modules and parameters to + find shared modules' parameters and shared parameters within a module. + """ + params_set = set(params) + param_to_module_info: Dict[nn.Parameter, ParamModuleInfo] = {} + for _, submodule in module.named_modules(remove_duplicate=False): + for param_name, param in _named_parameters_with_duplicates( + submodule, recurse=False + ): + if param in params_set: + if param not in param_to_module_info: + param_to_module_info[param] = ParamModuleInfo(submodule, param_name) + else: + param_to_module_info[param].shared_modules.append(submodule) + param_to_module_info[param].shared_param_names.append(param_name) + if len(param_to_module_info) != len(params): + raise AssertionError(f"Some parameters are not in the module tree of {module}") + return [param_to_module_info[param] for param in params] + + +class RegisterPostBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor): + # All tensors in `inputs` should require gradient + ctx.param_group = param_group + return inputs + + @staticmethod + def backward(ctx, *grads: torch.Tensor): + ctx.param_group.post_backward() + return (None,) + grads diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py new file mode 100644 index 0000000000000000000000000000000000000000..b2aba4182f4666a2deab5346f4a6532f083a1a0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py @@ -0,0 +1,246 @@ +import functools + +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.autograd.graph import Node, register_multi_grad_hook +from torch.distributed._composable_state import ( + _get_module_state, + _insert_module_state, + _State, +) +from torch.distributed.utils import _to_kwargs +from torch.utils._pytree import tree_flatten, tree_map +from torch.utils.hooks import RemovableHandle +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import _cast_fp_tensor, TrainingState +from ._fsdp_param import FSDPParam +from ._fsdp_param_group import FSDPCommContext, FSDPParamGroup + + +class FSDPStateContext: + """This has state shared across FSDP states.""" + + def __init__(self): + # All FSDP states in the root state's module tree + self.all_states: List[FSDPState] = [] + # Iteration's forward root runs the once-per-forward logic; this root + # may not be the overall root set by lazy initialization in cases where + # only a submodule runs forward (e.g. encoder-only for eval) + self.iter_forward_root: Optional[FSDPState] = None + # Final callback should only be queued once per backward + self.post_backward_final_callback_queued: bool = False + # Whether to finalize backward in this backward's final callback + self.is_last_backward: bool = True + + +class FSDPState(_State): + def __init__(self): + super().__init__() + self._fsdp_param_group: Optional[FSDPParamGroup] = None + self._is_root: Optional[bool] = None # root set during lazy init + self._state_ctx = FSDPStateContext() + self._comm_ctx = FSDPCommContext() + self._training_state: TrainingState = TrainingState.IDLE + self._pre_backward_hook_handles: List[RemovableHandle] = [] + + # Define a separate init since `__init__` is called in the contract + def init( + self, module: nn.Module, device: torch.device, mp_policy: MixedPrecisionPolicy + ) -> None: + _insert_module_state(module, self) + self._module = module + self._device = device + self._mp_policy = mp_policy + self._pre_forward_hook_handle = module.register_forward_pre_hook( + self._pre_forward, prepend=True, with_kwargs=True + ) + self._post_forward_hook_handle = module.register_forward_hook( + self._post_forward, prepend=False + ) + + def _root_pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + self._lazy_init() + if self._state_ctx.iter_forward_root is not None: + return args, kwargs + self._state_ctx.iter_forward_root = self + with torch.profiler.record_function("FSDP::root_pre_forward"): + # Wait for optimizer before implicitly prefetched all-gathers + current_stream = torch.cuda.current_stream() + self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream) + self._comm_ctx.all_gather_stream.wait_stream(current_stream) + if self._device.type == "cuda": + with torch.profiler.record_function("FSDP::inputs_to_device"): + args_tuple, kwargs_tuple = _to_kwargs( + args, kwargs, self._device, False + ) # same as DDP + args, kwargs = args_tuple[0], kwargs_tuple[0] + return args, kwargs + + def _lazy_init(self) -> None: + """ + Lazy initialization represents when all modules' parallelisms have + finalized (e.g. FSDP has been applied to all desired modules). This + means that we can determine which state is the root, and we do so by + the 1st state to run forward. + """ + if self._is_root is not None: + return # no-op: already initialized + self._is_root = True + root_module = self._module + for module_name, module in root_module.named_modules(): + if (state := _get_module_fsdp_state(module)) is None: + continue + if module is not root_module: + if state._is_root is not None: + raise RuntimeError( + "FSDP state has already been lazily initialized for " + f"{module_name}\nFSDP requires running forward through " + "the root module first" + ) + state._is_root = False + self._state_ctx.all_states.append(state) + if self._fsdp_param_group: + # For the root, do not reshard after forward since for training, + # the parameters would be freed and all-gathered immediately + self._fsdp_param_group.post_forward_mesh_info = None + self._init_fqns() + self._init_shared_state() + # Run parameter group lazy inits after initializing FQNs for improved + # error messages + for state in self._state_ctx.all_states: + if state._fsdp_param_group: + state._fsdp_param_group.lazy_init() + + def _init_shared_state(self) -> None: + self._comm_ctx.init() + for state in self._state_ctx.all_states: + state._state_ctx = self._state_ctx + state._comm_ctx = self._comm_ctx + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.comm_ctx = self._comm_ctx + + def _init_fqns(self) -> None: + """Sets module and parameter FQN attributes for debugging.""" + assert self._is_root + root_module = self._module + param_to_fsdp_param: Dict[nn.Parameter, FSDPParam] = {} + module_to_fsdp_param_group: Dict[nn.Module, FSDPParamGroup] = {} + for state in self._state_ctx.all_states: + if fsdp_param_group := state._fsdp_param_group: + for fsdp_param in fsdp_param_group.fsdp_params: + param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param + module_to_fsdp_param_group[fsdp_param_group.module] = fsdp_param_group + for param_name, param in root_module.named_parameters(): + if param in param_to_fsdp_param: + param_to_fsdp_param[param]._param_fqn = param_name + for module_name, module in root_module.named_modules(): + if module in module_to_fsdp_param_group: + module_to_fsdp_param_group[module]._module_fqn = module_name + + def _pre_forward( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + # When composing with module-hook-based activation checkpointing, the + # the pre-backward hook is responsible for the unshard + if self._training_state == TrainingState.PRE_BACKWARD: + return args, kwargs + self._training_state = TrainingState.FORWARD + args, kwargs = self._root_pre_forward(module, args, kwargs) + if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype: + with torch.profiler.record_function("FSDP::cast_forward_inputs"): + cast_fn = functools.partial( + _cast_fp_tensor, self._mp_policy.param_dtype + ) + args, kwargs = tree_map(cast_fn, args), tree_map(cast_fn, kwargs) + if self._fsdp_param_group: + args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs) + return args, kwargs + + def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any: + # When composing with module-hook-based activation checkpointing, the + # post-backward hook is responsible for the reshard + if self._training_state == TrainingState.PRE_BACKWARD: + return output + if self._fsdp_param_group: + output = self._fsdp_param_group.post_forward(module, input, output) + output = self._register_pre_backward_hook(output) + self._training_state = TrainingState.IDLE + if self._state_ctx.iter_forward_root is self: + if all_gather_state := self._comm_ctx.all_gather_state: + # Free the last all-gather result if needed; refer to + # [Note: Overlapping all-gather copy-in and all-gather] + self._comm_ctx.all_gather_copy_in_stream.wait_event( + all_gather_state.event + ) + self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event) + self._comm_ctx.all_gather_state = None # free the all-gather result + self._state_ctx.iter_forward_root = None + if self._mp_policy.output_dtype is not None: + with torch.profiler.record_function("FSDP::cast_forward_outputs"): + output = tree_map( + functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype), + output, + ) + return output + + def _pre_backward(self, forward_grad_fns: Tuple[Node, ...], *unused: Any) -> None: + self._training_state = TrainingState.PRE_BACKWARD + self._register_root_post_backward_final_callback() + if self._fsdp_param_group: + self._fsdp_param_group.pre_backward(forward_grad_fns, *unused) + + def _root_post_backward_final_callback(self) -> None: + with torch.profiler.record_function("FSDP::root_post_backward_callback"): + for state in self._state_ctx.all_states: + if state._fsdp_param_group and state._fsdp_param_group.is_unsharded: + # Run post-backward in case forward inputs did not require + # gradient so the autograd backward did not run + state._fsdp_param_group.post_backward() + if self._state_ctx.is_last_backward: + state._finalize_backward() + if self._state_ctx.is_last_backward: + self._comm_ctx.post_forward_order.clear() + self._state_ctx.post_backward_final_callback_queued = False + + def _finalize_backward(self) -> None: + self._training_state = TrainingState.IDLE + for handle in self._pre_backward_hook_handles: + handle.remove() + self._pre_backward_hook_handles.clear() + if self._fsdp_param_group: + self._fsdp_param_group.finalize_backward() + + def _register_pre_backward_hook(self, output: Any) -> Any: + if not torch.is_grad_enabled(): + return output + + flat_outputs, _ = tree_flatten(output) + tensors = tuple(t for t in flat_outputs if t.requires_grad) + if tensors: + grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None) + pre_backward = functools.partial(self._pre_backward, grad_fns) + handle = register_multi_grad_hook(tensors, pre_backward, mode="any") + self._pre_backward_hook_handles.append(handle) + if self._fsdp_param_group: + self._fsdp_param_group.all_forward_output_grad_fns.add(grad_fns) + return output + + def _register_root_post_backward_final_callback(self): + if self._state_ctx.post_backward_final_callback_queued: + return + self._state_ctx.post_backward_final_callback_queued = True + Variable._execution_engine.queue_callback( + self._root_post_backward_final_callback + ) + + +def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]: + state = _get_module_state(module) + if isinstance(state, FSDPState): + return state + return None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..47184af418c27832914b048b13be6e3d456eab55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py @@ -0,0 +1,246 @@ +from typing import Any, cast, Optional, Union + +import typing_extensions + +import torch +import torch.nn as nn + +from torch.distributed._composable import contract +from torch.distributed._tensor import DeviceMesh, DTensor + +from ._fsdp_api import MixedPrecisionPolicy +from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo +from ._fsdp_init import ( + _get_device_from_mesh, + _get_managed_modules, + _get_managed_states, + _get_post_forward_mesh_info, + _init_default_fully_shard_mesh, + _move_states_to_device, +) +from ._fsdp_param_group import FSDPParamGroup +from ._fsdp_state import _get_module_fsdp_state, FSDPState + + +# The decorator adds a state object to `module` that can be accessed via +# `fully_shard.state(module)`. The state object and module are 1:1. +@contract(state_cls=FSDPState) +def fully_shard( + module: nn.Module, + *, + mesh: Optional[DeviceMesh] = None, + reshard_after_forward: Union[bool, int] = True, + mp_policy: MixedPrecisionPolicy = MixedPrecisionPolicy(), +): + """ + Shard module parameters across data parallel workers. + + This function applies fully sharded data parallelism (FSDP) or a variant to + ``module``, a technique for memory savings at the cost of communication. + Parameters are sharded across ``mesh``, and in turn, so are their gradients + and optimizer states. + + The sharded parameters are all-gathered to construct the unsharded + parameters for forward or backward computation. The unsharded parameters + are freed after computation to save memory. The gradients are reduced + across the mesh and divided by the mesh size for data parallelism. The + optimizer step runs on the sharded parameters. + + Each call to ``fully_shard`` constructs one communication group that + includes the parameters in ``module.parameters()`` except those already + assigned to a group from a nested call. Each group's parameters and its + gradients are communicated together in one collective, respectively. + Constructing multiple groups across the model (e.g. "layer by layer") + allows for peak memory savings and communication/computation overlap. + + Implementation-wise, the sharded parameters are represented as + :class:`DTensor` s, sharded on dim-0, and the unsharded parameters are + represented as :class:`Tensor` s. A module forward pre-hook all-gathers the + parameters, and a module forward hook frees them. Similar backward hooks + gather parameters and later free parameters/reduce gradients. + + Args: + mesh (Optional[DeviceMesh]): This data parallel mesh defines the + sharding and device. If 1D, then parameters are fully sharded + across the 1D mesh (FSDP). If 2D, then parameters are sharded + across the 0th dim and replicated across the 1st dim (HSDP). The + mesh's device type gives the device type used for communication; + if a CUDA or CUDA-like device type, then we use the current device. + reshard_after_forward (Union[bool, int]): This controls the parameter + behavior after forward and can trade off memory and communication: + - If ``True``, then this reshards parameters after forward and + all-gathers in backward. + - If ``False``, then this keeps the unsharded parameters in memory + after forward and avoids the all-gather in backward. + - If an ``int``, then this represents the world size to reshard to + after forward. It should be a non-trivial divisor of the ``mesh`` + shard dim size (i.e. excluding 1 and the dim size itself). A choice + may be the intra-node size (e.g. ``torch.cuda.device_count()``). + This allows the all-gather in backward to be over a smaller world + size at the cost of higher memory usage than setting to ``True``. + - The root FSDP state has its value specially set to ``False`` as a + heuristic since its parameters would typically be immediately + all-gathered for backward. + - After forward, the parameters registered to the module depend on + to this: The registered parameters are the sharded parameters if + ``True``; unsharded parameters if ``False``; and the paramters + resharded to the smaller mesh otherwise. To modify the parameters + between forward and backward, the registered parameters must be the + sharded parameters. For ``False`` or an ``int``, this can be done + by manually resharding via :meth:`reshard`. + mp_policy (MixedPrecisionPolicy): This controls the mixed precision + policy, which offers parameter/reduction mixed precision for this + module. See :class:`MixedPrecisionPolicy` for details. + """ + if isinstance(module, (nn.ModuleList, nn.ModuleDict)): + raise ValueError( + f"fully_shard does not support containers that do not implement forward: {module}" + ) + mesh = mesh or _init_default_fully_shard_mesh() + if mesh.ndim not in (1, 2): + raise ValueError(f"fully_shard expects a 1D or 2D DeviceMesh but got {mesh}") + elif mesh.ndim == 1: + mesh_info = FSDPMeshInfo(mesh, shard_mesh_dim=0) + else: + mesh_info = HSDPMeshInfo(mesh, shard_mesh_dim=1, replicate_mesh_dim=0) + device = _get_device_from_mesh(mesh) + post_forward_mesh_info = _get_post_forward_mesh_info( + reshard_after_forward, mesh_info + ) + + state = fully_shard.state(module) + state.init(module, device, mp_policy) + + managed_modules = _get_managed_modules(module) + params, buffers = _get_managed_states(managed_modules) + _move_states_to_device(params, buffers, device, mesh_info) + if params: + state._fsdp_param_group = FSDPParamGroup( + params, module, mesh_info, post_forward_mesh_info, device, mp_policy + ) + + # for dynamo + for module in managed_modules: + module._is_fsdp_managed_module = True # type: ignore[assignment] + module._fsdp_use_orig_params = True # type: ignore[assignment] + + # Place FSDP leftmost for highest priority in the method resolution order + cls = module.__class__ + dct = {"__deepcopy__": unimplemented_deepcopy} + new_cls = type(f"FSDP{cls.__name__}", (FSDP, cls), dct) + module.__class__ = new_cls + return module + + +def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never: + raise AssertionError( + "FSDP does not support deepcopy. Please use state dict for serialization." + ) + + +class FSDP: + def __new__(cls, *args, **kwargs): + """ + Override ``__new__`` to remove the FSDP class and directly construct + the original class for cases like indexing into a container module. + """ + # Use index 2 since 0 is the dynamically constructed `FSDP<...>` class + # and index 1 is the `FSDP` class itself + orig_cls = cls.__mro__[2] + self = orig_cls.__new__(orig_cls, *args, **kwargs) + self.__init__(*args, **kwargs) + return self + + def reshard(self) -> None: + """ + Reshards the module's parameters, registering the sharded parameters + to the module and freeing the unsharded parameters if needed. This + method is *not* recursive. + """ + state = self._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.reshard() + + def set_is_last_backward(self, is_last_backward: bool) -> None: + """ + Sets whether the next backward is the last one, meaning that FSDP + should wait for gradient reduction to finish and clear internal data + structures used for explicit prefetching. + """ + state = self._get_fsdp_state() + state._state_ctx.is_last_backward = is_last_backward + + def set_requires_gradient_sync( + self, requires_gradient_sync: bool, recurse: bool = True + ) -> None: + """ + Sets if the module should sync gradients. This can be used to implement + gradient accumulation without communication. For HSDP, this controls + both reduce-scatter and all-reduce together. + + Args: + requires_gradient_sync (bool): Whether to reduce gradients for the + module's parameters. + recurse (bool): Whether to set for all submodules or just the + passed-in module. + """ + for module in cast(nn.Module, self).modules(): + if isinstance(module, FSDP): + state = module._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.reduce_scatter_grads = requires_gradient_sync + fsdp_param_group.all_reduce_grads = requires_gradient_sync + + def set_requires_all_reduce(self, requires_all_reduce: bool, recurse: bool = True): + """ + Sets if the module should all-reduce gradients. This can be used to + implement gradient accumulation with only reduce-scatter but not + all-reduce for HSDP. + """ + for module in cast(nn.Module, self).modules(): + if isinstance(module, FSDP): + state = module._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.all_reduce_grads = requires_all_reduce + + def _get_fsdp_state(self) -> FSDPState: + if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None: + raise AssertionError(f"No FSDP state found on {self}") + return state + + def _apply(self, *args: Any, **kwargs: Any) -> Any: + # Reshard to ensure that sharded parameters are registered + self.reshard() + ret = super()._apply(*args, **kwargs) # type: ignore[misc] + state = self._get_fsdp_state() + if not (fsdp_param_group := state._fsdp_param_group): + return ret + # TODO: Remove this padding logic once DTensor pads the local tensor: + # https://github.com/pytorch/pytorch/issues/113045 + with torch.no_grad(): + for fsdp_param in fsdp_param_group.fsdp_params: + module_info = fsdp_param._module_info + new_param = getattr(module_info.module, module_info.param_name) + if new_param is not fsdp_param.sharded_param: + if torch.__future__.get_swap_module_params_on_conversion(): + raise AssertionError( + "Expects swap_tensors to preserve object but got " + f"{new_param} instead of {fsdp_param.sharded_param}" + ) + else: + raise AssertionError( + "Please set torch.__future__.set_swap_module_params_on_conversion(True) " + "to use _apply methods with FSDP" + ) + local_tensor = new_param._local_tensor + padded_sharded_size = fsdp_param.padded_sharded_param_size + if local_tensor.size() != padded_sharded_size: + padded_local_tensor = local_tensor.new_zeros(padded_sharded_size) + padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor) + local_tensor = padded_local_tensor + fsdp_param._sharded_param_data = local_tensor.view(-1) + assert isinstance(fsdp_param.sharded_param, DTensor) # mypy + fsdp_param.sharded_param._local_tensor = local_tensor[ + : fsdp_param.sharded_size[0] + ] + return ret diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..37e3d1544cd176438b1173627b819f58a747e2a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py @@ -0,0 +1,133 @@ +import warnings +from typing import Callable, Iterable, Optional, Union + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import contract +from torch.distributed._composable_state import _get_module_state, _insert_module_state +from torch.distributed.fsdp._common_utils import _FSDPState +from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo + +from torch.distributed.fsdp._init_utils import ( + _init_buffer_state, + _init_core_state, + _init_device_handle, + _init_ignored_module_states, + _init_param_handle_from_module, + _init_prefetching_state, + _init_process_group_state, + _init_runtime_state, + _init_state_dict_state, + HYBRID_SHARDING_STRATEGIES, +) +from torch.distributed.fsdp._runtime_utils import ( + _register_post_forward_hook, + _register_pre_forward_hook, + _register_root_pre_forward_hook, +) +from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks +from torch.distributed.fsdp._wrap_utils import _auto_wrap +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.wrap import _Policy + + +@contract(state_cls=_FSDPState) +def fully_shard( + module: nn.Module, + *, + process_group: Optional[dist.ProcessGroup] = None, + policy: Optional[_Policy] = None, + strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + cpu_offload: Optional[CPUOffload] = None, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + device_id: Optional[Union[int, torch.device]] = None, + param_init_fn: Optional[Callable[[nn.Module], None]] = None, + sync_module_states: bool = False, + forward_prefetch: bool = False, + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, +) -> nn.Module: + """ + Applies ``FullyShardedDataParallel` (FSDP) semantics to ``module``. + """ + warnings.warn( + "``torch.distributed._composable.fully_shard`` is being deprecated." + "You can contintue to use the wrapper based FSDP." + "See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py." + "``torch.distributed._composable.fully_shard`` will be removed after PyTorch 2.5." + ) + + torch._C._log_api_usage_once("torch.distributed.fully_shard") + # Enforce the new auto wrap policy + if policy is not None and not isinstance(policy, _Policy): + raise ValueError(f"Expects a `_Policy` but got {policy}") + state = fully_shard.state(module) + state = _init_ignored_module_states(state, module, ignored_modules, ignored_states) + state = _init_device_handle(state, module, state._ignored_params, device_id) + _annotate_modules_for_dynamo(module, state._ignored_modules, True) + state = _init_process_group_state(state, process_group, strategy, policy) + if policy is not None: + root_kwargs = { + "process_group": process_group, + "strategy": strategy, + "mixed_precision": mixed_precision, + "cpu_offload": cpu_offload, + "ignored_modules": ignored_modules, + "device_id": device_id, + "param_init_fn": param_init_fn, + "sync_module_states": sync_module_states, + "forward_prefetch": forward_prefetch, + "ignored_states": ignored_states, + } + if strategy in HYBRID_SHARDING_STRATEGIES: + root_kwargs["process_group"] = (state.process_group, state._inter_node_pg) + _auto_wrap( + module, + policy, + state._ignored_modules, + state._ignored_params, + root_kwargs, + fully_shard, + ) + state = _init_core_state( + state, + strategy or ShardingStrategy.FULL_SHARD, + mixed_precision, + cpu_offload, + limit_all_gathers=True, + use_orig_params=True, + backward_prefetch_limit=1, + forward_prefetch_limit=1, + ) + state = _init_runtime_state(state) + state = _init_prefetching_state( + state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch + ) + state = _init_buffer_state(state, module) + state = _init_param_handle_from_module( + state, module, device_id, param_init_fn, sync_module_states + ) + state = _init_state_dict_state(state) + _register_all_state_dict_hooks(state) + _register_pre_forward_hook(state, module) + _register_post_forward_hook(state, module) + _register_root_pre_forward_hook(state, module) # prepend last + # Always insert the state for the passed-in module even if it has no + # managed parameters, in which case it has no handles and does not appear + # in `_fully_sharded_module_to_handles` + _insert_module_state(module, state) + for submodule in module.modules(): + if ( + submodule in state._fully_sharded_module_to_handle + and _get_module_state(submodule) is None + ): + _insert_module_state(submodule, state) + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..801b27f086709b4e30fe57adc20c8321b8f49b84 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..809db0090b3c75fea29005e9833d0e9fbc6a6f37 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6492d83c90175bf99bd0c9a70274fb0cd4b77dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc6923f2dd5ebef2fb07fa9cc7378e9a3a0e01eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0227cf537613ac9260b565684491e7b98d2ffba3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c73b414c769e0b0251bb3926c93202bcae782c1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1164c549ff799f57c117fc510c2276c414f3de82 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4b2a46c4231dcec6f2b99af677b6979083b4b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Expiration timers are set up on the same process as the agent and +used from your script to deal with stuck workers. When you go into +a code-block that has the potential to get stuck you can acquire +an expiration timer, which instructs the timer server to kill the +process if it does not release the timer by the self-imposed expiration +deadline. + +Usage:: + + import torchelastic.timer as timer + import torchelastic.agent.server as agent + + def main(): + start_method = "spawn" + message_queue = mp.get_context(start_method).Queue() + server = timer.LocalTimerServer(message, max_interval=0.01) + server.start() # non-blocking + + spec = WorkerSpec( + fn=trainer_func, + args=(message_queue,), + ...) + agent = agent.LocalElasticAgent(spec, start_method) + agent.run() + + def trainer_func(message_queue): + timer.configure(timer.LocalTimerClient(message_queue)) + with timer.expires(after=60): # 60 second expiry + # do some work + +In the example above if ``trainer_func`` takes more than 60 seconds to +complete, then the worker process is killed and the agent retries the worker group. +""" + +from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401 +from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401 +from .file_based_local_timer import FileTimerClient, FileTimerServer, FileTimerRequest # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py new file mode 100644 index 0000000000000000000000000000000000000000..26ebce33dcb5b5062667cbb5fc9630b93eee4c79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py @@ -0,0 +1,333 @@ +# Copyright (c) Meta Platforms, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import io +import json +import logging +import os +import select +import signal +import sys +import threading +import time +from typing import Callable, Dict, List, Optional, Set, Tuple + +from torch.distributed.elastic.timer.api import TimerClient, TimerRequest + +__all__ = ["FileTimerClient", "FileTimerRequest", "FileTimerServer"] + +log = logging.getLogger(__name__) + +class FileTimerRequest(TimerRequest): + """ + Data object representing a countdown timer acquisition and release + that is used between the ``FileTimerClient`` and ``FileTimerServer``. + A negative ``expiration_time`` should be interpreted as a "release" + request. + ``signal`` is the signal to reap the worker process from the server + process. + """ + + __slots__ = ["version", "worker_pid", "scope_id", "expiration_time", "signal"] + + def __init__(self, worker_pid: int, scope_id: str, expiration_time: float, signal: int = 0) -> None: + self.version = 1 + self.worker_pid = worker_pid + self.scope_id = scope_id + self.expiration_time = expiration_time + self.signal = signal + + def __eq__(self, other) -> bool: + if isinstance(other, FileTimerRequest): + return ( + self.version == other.version + and self.worker_pid == other.worker_pid + and self.scope_id == other.scope_id + and self.expiration_time == other.expiration_time + and self.signal == other.signal + ) + return False + + def to_json(self) -> str: + return json.dumps( + { + "version": self.version, + "pid": self.worker_pid, + "scope_id": self.scope_id, + "expiration_time": self.expiration_time, + "signal": self.signal + }, + ) + + +class FileTimerClient(TimerClient): + """ + Client side of ``FileTimerServer``. This client is meant to be used + on the same host that the ``FileTimerServer`` is running on and uses + pid to uniquely identify a worker. + This client uses a named_pipe to send timer requests to the + ``FileTimerServer``. This client is a producer while the + ``FileTimerServer`` is a consumer. Multiple clients can work with + the same ``FileTimerServer``. + + Args: + + file_path: str, the path of a FIFO special file. ``FileTimerServer`` + must have created it by calling os.mkfifo(). + + signal: signal, the signal to use to kill the process. Using a + negative or zero signal will not kill the process. + """ + def __init__(self, file_path: str, signal=(signal.SIGKILL if sys.platform != "win32" else + signal.CTRL_C_EVENT)) -> None: # type: ignore[attr-defined] + super().__init__() + self._file_path = file_path + self.signal = signal + + def _open_non_blocking(self) -> Optional[io.TextIOWrapper]: + try: + fd = os.open(self._file_path, os.O_WRONLY | os.O_NONBLOCK) + return os.fdopen(fd, "wt") + except Exception: + return None + + def _send_request(self, request: FileTimerRequest) -> None: + # The server may have crashed or may haven't started yet. + # In such case, calling open() in blocking model blocks the client. + # To avoid such issue, open it in non-blocking mode, and an OSError will + # be raised if the server is not there. + file = self._open_non_blocking() + if file is None: + raise BrokenPipeError("Could not send the FileTimerRequest because FileTimerServer is not available.") + with file: + json_request = request.to_json() + # Write request with no greater than select.PIPE_BUF is guarantee to be atomic. + if len(json_request) > select.PIPE_BUF: + raise RuntimeError( + f"FileTimerRequest larger than {select.PIPE_BUF} bytes " + f"is not supported: {json_request}" + ) + file.write(json_request + "\n") + + def acquire(self, scope_id: str, expiration_time: float) -> None: + self._send_request( + request=FileTimerRequest( + worker_pid=os.getpid(), + scope_id=scope_id, + expiration_time=expiration_time, + signal=self.signal + ), + ) + + def release(self, scope_id: str) -> None: + self._send_request( + request=FileTimerRequest( + worker_pid=os.getpid(), + scope_id=scope_id, + expiration_time=-1, + signal=0 + ), + ) + + +class FileTimerServer: + """ + Server that works with ``FileTimerClient``. Clients are expected to be + running on the same host as the process that is running this server. + Each host in the job is expected to start its own timer server locally + and each server instance manages timers for local workers (running on + processes on the same host). + + Args: + + file_path: str, the path of a FIFO special file to be created. + + max_interval: float, max interval in seconds for each watchdog loop. + + daemon: bool, running the watchdog thread in daemon mode or not. + A daemon thread will not block a process to stop. + log_event: Callable[[Dict[str, str]], None], an optional callback for + logging the events in JSON format. + """ + + def __init__( + self, + file_path: str, + max_interval: float = 10, + daemon: bool = True, + log_event: Optional[Callable[[str, Optional[FileTimerRequest]], None]] = None + ) -> None: + self._file_path = file_path + self._max_interval = max_interval + self._daemon = daemon + self._timers: Dict[Tuple[int, str], FileTimerRequest] = {} + self._stop_signaled = False + self._watchdog_thread: Optional[threading.Thread] = None + if os.path.exists(self._file_path): + os.remove(self._file_path) + os.mkfifo(self._file_path) + # For test only. Count the number of requests received. + self._request_count = 0 + # For test only. Process all requests and stop the server. + self._run_once = False + self._log_event = log_event if log_event is not None else lambda name, request: None + + + def start(self) -> None: + log.info( + "Starting %s..." + " max_interval=%s," + " daemon=%s", + type(self).__name__, self._max_interval, self._daemon + ) + self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=self._daemon) + log.info("Starting watchdog thread...") + self._watchdog_thread.start() + self._log_event("watchdog started", None) + + def stop(self) -> None: + log.info("Stopping %s", type(self).__name__) + self._stop_signaled = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join(self._max_interval) + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + if os.path.exists(self._file_path): + os.remove(self._file_path) + self._log_event("watchdog stopped", None) + + def run_once(self) -> None: + self._run_once = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join() + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + if os.path.exists(self._file_path): + os.remove(self._file_path) + + def _watchdog_loop(self) -> None: + # Open the pipe in blocking mode blocks the server thread. + # This is fine for the following reasons: + # 1. No client case usually does not happen. + # 2. We are running the watchdog loop in a separate daemon + # thread, which will not block the process to stop. + with open(self._file_path) as fd: + while not self._stop_signaled: + try: + run_once = self._run_once + self._run_watchdog(fd) + if run_once: + break + except Exception: + log.exception("Error running watchdog") + + def _run_watchdog(self, fd: io.TextIOWrapper) -> None: + timer_requests = self._get_requests(fd, self._max_interval) + self.register_timers(timer_requests) + now = time.time() + reaped_worker_pids = set() + for worker_pid, expired_timers in self.get_expired_timers(now).items(): + log.info("Reaping worker_pid=[%s]. Expired timers: %s", worker_pid, self._get_scopes(expired_timers)) + reaped_worker_pids.add(worker_pid) + # In case we have multiple expired timers, we find the first timer + # with a valid signal (>0) in the expiration time order. + expired_timers.sort(key=lambda timer: timer.expiration_time) + signal = 0 + expired_timer = None + for timer in expired_timers: + self._log_event("timer expired", timer) + if timer.signal > 0: + signal = timer.signal + expired_timer = timer + break + if signal <= 0: + log.info("No signal specified with worker=[%s]. Do not reap it.", worker_pid) + continue + if self._reap_worker(worker_pid, signal): + log.info("Successfully reaped worker=[%s] with signal=%s", worker_pid, signal) + self._log_event("kill worker process", expired_timer) + else: + log.error("Error reaping worker=[%s]. Will retry on next watchdog.", worker_pid) + self.clear_timers(reaped_worker_pids) + + def _get_scopes(self, timer_requests: List[FileTimerRequest]) -> List[str]: + return [r.scope_id for r in timer_requests] + + def _get_requests(self, fd: io.TextIOWrapper, max_interval: float) -> List[FileTimerRequest]: + start = time.time() + requests = [] + while not self._stop_signaled or self._run_once: + # For named pipe, readline() is blocking when at least one writer opens. + # It returns only when flush() is called at the writer side. + # Note that flush() is automatically called inside close(). + # After the last writer closes, readline() is not blocking. + # It will return an empty string when it's at end-of-file. + # Since the client side always opens the pipe, writes a message and closes + # the pipe immediately, the readline() call below is not blocking for long. + json_request = fd.readline() + if len(json_request) == 0: + if self._run_once: + break + time.sleep(min(max_interval, 1)) + else: + request = json.loads(json_request) + pid = request["pid"] + scope_id = request["scope_id"] + expiration_time = request["expiration_time"] + signal = request["signal"] + requests.append( + FileTimerRequest( + worker_pid=pid, scope_id=scope_id, expiration_time=expiration_time, signal=signal + ) + ) + now = time.time() + if now - start > max_interval: + break + return requests + + def register_timers(self, timer_requests: List[FileTimerRequest]) -> None: + for request in timer_requests: + pid = request.worker_pid + scope_id = request.scope_id + expiration_time = request.expiration_time + self._request_count += 1 + + key = (pid, scope_id) + # negative expiration is a proxy for a release call + if expiration_time < 0: + if key in self._timers: + del self._timers[key] + else: + self._timers[key] = request + + def clear_timers(self, worker_pids: Set[int]) -> None: + for (pid, scope_id) in list(self._timers.keys()): + if pid in worker_pids: + del self._timers[(pid, scope_id)] + + def get_expired_timers(self, deadline: float) -> Dict[int, List[FileTimerRequest]]: + # pid -> [timer_requests...] + expired_timers: Dict[int, List[FileTimerRequest]] = {} + for request in self._timers.values(): + if request.expiration_time <= deadline: + expired_scopes = expired_timers.setdefault(request.worker_pid, []) + expired_scopes.append(request) + return expired_timers + + def _reap_worker(self, worker_pid: int, signal: int) -> bool: + try: + os.kill(worker_pid, signal) + return True + except ProcessLookupError: + log.info("Process with pid=%s does not exist. Skipping", worker_pid) + return True + except Exception: + log.exception("Error terminating pid=%s", worker_pid) + return False diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py new file mode 100644 index 0000000000000000000000000000000000000000..05f467c807a5bc61bb0a3c6853cd17243636e1cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py @@ -0,0 +1,125 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import logging +import multiprocessing as mp +import os +import signal +import time +from queue import Empty +from typing import Any, Dict, List, Set, Tuple + +from .api import RequestQueue, TimerClient, TimerRequest, TimerServer + +__all__ = ['LocalTimerClient', 'MultiprocessingRequestQueue', 'LocalTimerServer'] + +log = logging.getLogger(__name__) + +class LocalTimerClient(TimerClient): + """ + Client side of ``LocalTimerServer``. This client is meant to be used + on the same host that the ``LocalTimerServer`` is running on and uses + pid to uniquely identify a worker. This is particularly useful in situations + where one spawns a subprocess (trainer) per GPU on a host with multiple + GPU devices. + """ + + def __init__(self, mp_queue): + super().__init__() + self._mp_queue = mp_queue + + def acquire(self, scope_id, expiration_time): + pid = os.getpid() + acquire_request = TimerRequest(pid, scope_id, expiration_time) + self._mp_queue.put(acquire_request) + + def release(self, scope_id): + pid = os.getpid() + release_request = TimerRequest(pid, scope_id, -1) + self._mp_queue.put(release_request) + + +class MultiprocessingRequestQueue(RequestQueue): + """ + A ``RequestQueue`` backed by python ``multiprocessing.Queue`` + """ + + def __init__(self, mp_queue: mp.Queue): + super().__init__() + self._mp_queue = mp_queue + + def size(self) -> int: + return self._mp_queue.qsize() + + def get(self, size, timeout: float) -> List[TimerRequest]: + requests = [] + wait = timeout + for _ in range(0, size): + start = time.time() + + try: + r = self._mp_queue.get(block=True, timeout=wait) + except Empty: + break + + requests.append(r) + wait = wait - (time.time() - start) + if wait <= 0: + break + + return requests + + +class LocalTimerServer(TimerServer): + """ + Server that works with ``LocalTimerClient``. Clients are expected to be + subprocesses to the parent process that is running this server. Each host + in the job is expected to start its own timer server locally and each + server instance manages timers for local workers (running on processes + on the same host). + """ + + def __init__( + self, mp_queue: mp.Queue, max_interval: float = 60, daemon: bool = True + ): + super().__init__(MultiprocessingRequestQueue(mp_queue), max_interval, daemon) + self._timers: Dict[Tuple[Any, str], TimerRequest] = {} + + def register_timers(self, timer_requests: List[TimerRequest]) -> None: + for request in timer_requests: + pid = request.worker_id + scope_id = request.scope_id + expiration_time = request.expiration_time + + # negative expiration is a proxy for a release call + if expiration_time < 0: + self._timers.pop((pid, scope_id), None) + else: + self._timers[(pid, scope_id)] = request + + def clear_timers(self, worker_ids: Set[int]) -> None: + for (pid, scope_id) in list(self._timers.keys()): + if pid in worker_ids: + self._timers.pop((pid, scope_id)) + + def get_expired_timers(self, deadline: float) -> Dict[Any, List[TimerRequest]]: + # pid -> [timer_requests...] + expired_timers: Dict[Any, List[TimerRequest]] = {} + for request in self._timers.values(): + if request.expiration_time <= deadline: + expired_scopes = expired_timers.setdefault(request.worker_id, []) + expired_scopes.append(request) + return expired_timers + + def _reap_worker(self, worker_id: int) -> bool: + try: + os.kill(worker_id, signal.SIGKILL) + return True + except ProcessLookupError: + log.info("Process with pid=%s does not exist. Skipping", worker_id) + return True + except Exception: + log.exception("Error terminating pid=%s", worker_id) + return False diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be514adb47bfde24d85fd498566c8ec25aa0d536 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ef69065405ca7998067b484f9690cbfb5ca1e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f89f272001a872961b2888eb6b3d0187fa3efa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..653c0e32f71338c779d4e26c88227978e2589a19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f4406d76c2aff69d35fd986eff77339f53b3db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed92ab52fbad108f8b75fc65f5fec852ad68d2b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..845663af24ff014d9ef3f5e4c8039a39ea89a8a2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64475e984c4a43d7ee2322b6379640a62fb8e71f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94985228977cbe5425be555c81ccae3b52f4510 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b032975f2afef11e8290c42c37dddb2dc2acbaf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b91670201d18b04552b73069b025602e53bdd298 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8df1407963c06b293793a83028256417bff8dd7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc657896d87b69cbd548878164e08b29be25e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py @@ -0,0 +1,164 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""A helper to roughly balance a sequential module. + +Usage:: + + import torch + from torch.distributed.pipeline.sync import Pipe + from torch.distributed.pipeline.sync.balance import balance_by_time + + sample = torch.empty(128, 3, 224, 224) + balance = balance_by_time(torch.cuda.device_count(), model, sample) + + pipe = Pipe(model, balance, chunks=8) + +""" +from typing import Any, List, Union, Sequence + +import torch +from torch import Tensor +import torch.nn as nn + +from . import blockpartition +from .profile import profile_sizes, profile_times + +__all__ = ["balance_by_time", "balance_by_size"] + + +Device = Union[torch.device, int, str] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + + +def balance_cost(cost: List[int], partitions: int) -> List[int]: + partitioned = blockpartition.solve(cost, partitions) + return [len(p) for p in partitioned] + + +def balance_by_time( + partitions: int, + module: nn.Sequential, + sample: Union[List[Any], Tensor], + *, + timeout: float = 1.0, + device: Device = torch.device("cuda"), +) -> List[int]: + """Naive automatic balancing by elapsed time per layer. + :: + + sample = torch.empty(128, 3, 224, 224) + balance = balance_by_time(torch.cuda.device_count(), model, sample) + pipe = Pipe(model, balance, chunks=8) + + Args: + partitions (int): + intended number of partitions + module (torch.nn.Sequential): + sequential module to be partitioned + sample (torch.Tensor): + example input with arbitrary batch size + + Keyword Args: + timeout (float): + profiling iterates again if the timeout (in second) is not exceeded + (default: ``1.0``) + device ('cpu' or 'cuda' device): + CPU or CUDA device where each layer is profiled (default: the + current CUDA device) + + Returns: + A list of number of layers in each partition. Use it for the `balance` + parameter of :class:`~torchpipe.Pipe`. + + .. note:: + `module` and `sample` must be placed on the same device. + + """ + times = profile_times(module, sample, timeout, torch.device(device)) + return balance_cost(times, partitions) + + +def balance_by_size( + partitions: int, + module: nn.Sequential, + input: Union[List[Any], Tensor], + *, + chunks: int = 1, + param_scale: float = 2.0, + device: Device = torch.device("cuda"), +) -> List[int]: + """Naive automatic balancing by CUDA memory usage per layer. + + During training, required memory for parameters depends on which optimizer + is used. Optimizers may use buffers for each parameter to track + optimization statistics internally, such as momentum buffer in SGD. + + To get more reliable size based balance, you should specify `param_scale` + with regard to your optimizer. The default `param_scale` is 2 instead of 1 + due to gradient accumulation which is necessary for every optimizer. + + Follow this guide to choose correct `param_scale` for typical optimizers: + + ========= ============= ========================================= + Optimizer `param_scale` Internal State + ========= ============= ========================================= + SGD 2--3 (momentum_buffer) + Adam 4--5 exp_avg, exp_avg_sq, (max_exp_avg_sq) + Adadelta 4 square_avg, acc_delta + Adagrad 3 sum + RMSprop 3--5 square_avg, (momentum_buffer), (grad_avg) + ========= ============= ========================================= + + Here's a simple example with the Adam optimizer:: + + balance = balance_by_size( + torch.cuda.device_count(), + model, + + # Same size with mini-batch to train + torch.empty(1024, 3, 224, 224), + + # Number of micro-batches to train with Pipe + chunks=8, + + # 4 for Adam + param_scale=4.0, + ) + + pipe = Pipe(model, balance, chunks=8) + adam = Adam(pipe.parameters()) + + Args: + partitions (int): + intended number of partitions + module (torch.nn.Sequential): + sequential module to be partitioned + input (torch.Tensor): + example mini-batch with the same size to train + + Keyword Args: + chunks (int): + number of micro-batches will be used to train (default: ``1``) + param_scale (float): + how many copies of parameters would be allocated for training. It + depends on optimizer. See the above guide. (default: ``2.0``) + device ('cuda' device): + CUDA device where each layer is profiled (default: the current CUDA + device) + + Returns: + A list of number of layers in each partition. Use it for the `balance` + parameter of :class:`~torchpipe.Pipe`. + + .. note:: + `module` and `input` must be placed on the same CUDA device. + + """ + sizes = profile_sizes(module, input, chunks, param_scale, torch.device(device)) + return balance_cost(sizes, partitions) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py new file mode 100644 index 0000000000000000000000000000000000000000..7afe782f6ac8c7c0585ae1f93e3ccfa7e25fce78 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py @@ -0,0 +1,95 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Implements "Block Partitions of Sequences" by Imre Bárány et al. + +Paper: https://arxiv.org/pdf/1308.2452.pdf + +""" +from typing import Iterator, List, Tuple + +__all__ = ["solve"] + + +def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]: + """Splits a sequence into several partitions to minimize variance for each + partition. + + The result might not be optimal. However, it can be done only in O(kn³), + where k is the number of partitions and n is the length of the sequence. + + """ + if partitions < 1: + raise ValueError(f"partitions must be a positive integer ({partitions} < 1)") + + n = len(sequence) + if n < partitions: + raise ValueError(f"sequence is shorter than intended partitions ({n} < {partitions})") + + # Normalize the sequence in [0, 1]. + minimum = min(sequence) + maximum = max(sequence) - minimum + + normal_sequence: List[float] + if maximum == 0: + normal_sequence = [0 for _ in sequence] + else: + normal_sequence = [(x - minimum) / maximum for x in sequence] + + splits = [n // partitions * (x + 1) for x in range(partitions - 1)] + [n] + + def block_size(i: int) -> float: + start = splits[i - 1] if i > 0 else 0 + stop = splits[i] + return sum(normal_sequence[start:stop]) + + def leaderboard() -> Iterator[Tuple[float, int]]: + return ((block_size(i), i) for i in range(partitions)) + + while True: + """ + (1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P. + """ + # max_size: M(P) + max_size, p = max(leaderboard()) + + while True: + """ + (2) If M(P) ≤ m(P) + 1, then stop. + """ + # min_size: m(P) + min_size, q = min(leaderboard()) + + if max_size <= min_size + 1: + return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)] + + """ + (3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is + closest to p (ties broken arbitrarily). Thus Bq is a minimal block + of P. Let Bh be the block next to Bq between Bp and Bq. (Note that + Bh is a non-empty block: if it were, then m(P) = 0 and we should + have chosen Bh instead of Bq.) + """ + if p < q: + """ + So either p < q and then h = q−1 and we define P ∗ by moving + the last element from Bh = Bq−1 to Bq, + """ + h = q - 1 + splits[h] -= 1 + else: + """ + or q < p, and then h = q + 1 and P ∗ is obtained by moving the + first element of Bh = Bq+1 to Bq. + """ + h = q + 1 + splits[q] += 1 + + """ + Set P = P ∗ . If p = h, then go to (1), else go to (2). + """ + if p == h: + break diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1a0c06a8e3ac580d42cc0b34fb093126bc6333 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py @@ -0,0 +1,116 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Per-layer profilers.""" +import copy +import time +from typing import Any, Generator, List, Union, Sequence + +import torch +from torch import Tensor +import torch.nn as nn + +from ..microbatch import Batch + +__all__: List[str] = [] + + +Device = Union[torch.device, int, str] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + + +def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]: + """Copies layers for ease to profile. It doesn't modify the given + module. + """ + for layer in module: + layer_copy = copy.deepcopy(layer) + layer_copy.to(device) + layer_copy.train() + yield layer_copy + + +def detach(batch: Batch) -> None: + """Detaches from autograd graph.""" + for i, x in enumerate(batch): + batch[i] = x.detach().requires_grad_(x.requires_grad) + + +def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]: + """Profiles elapsed times per layer.""" + if any(p.grad is not None for p in module.parameters()): + raise ValueError("some parameter already has gradient") + + _batch = Batch(sample) + for i, x in enumerate(_batch): + _batch[i] = x.detach().to(device).requires_grad_(x.requires_grad) + + time_bufs: List[List[float]] = [[] for _ in module] + begun_at = time.time() + + while time.time() - begun_at < timeout: + batch = _batch + + for i, layer in enumerate(layerwise_sandbox(module, device)): + detach(batch) + + if device.type == "cuda": + torch.cuda.synchronize(device) + tick = time.time() + + # Forward + batch = batch.call(layer) + + # Backward + backward_tensors = tuple(y for y in batch if y.requires_grad) + if backward_tensors: + torch.autograd.backward(backward_tensors, backward_tensors) + + if device.type == "cuda": + torch.cuda.synchronize(device) + tock = time.time() + + time_bufs[i].append(tock - tick) + + us = 1_000_000 + return [sum(int(t * us) for t in buf) for buf in time_bufs] + + +def profile_sizes( + module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device, +) -> List[int]: + """Profiles CUDA memory usage per layer.""" + if device.type != "cuda": + raise ValueError("size profiler supports only CUDA device") + + batch = Batch(input) + sizes: List[int] = [] + + latent_scale = batch[0].size(0) / chunks + for i, x in enumerate(batch): + batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad) + + for layer in layerwise_sandbox(module, device): + detach(batch) + + # Detect memory usage at forward. + torch._C._cuda_clearCublasWorkspaces() + memory_before = torch.cuda.memory_allocated(device) + batch = batch.call(layer) + torch._C._cuda_clearCublasWorkspaces() + memory_after = torch.cuda.memory_allocated(device) + latent_size = memory_after - memory_before + + # Analyze size of parameters. + param_size = sum(p._typed_storage()._nbytes() for p in layer.parameters()) + + # Combine size of parameters and activations with normalize scales. + size = latent_size * latent_scale + param_size * param_scale + sizes.append(int(size)) + + return sizes diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..ad375f893318ec130c4b7777c7f557a6697f0091 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py @@ -0,0 +1,159 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Tracks the running statistics per mini-batch instead of micro-batch.""" +from typing import TypeVar, cast + +import torch +from torch import Tensor, nn +from torch.nn.functional import batch_norm +from torch.nn.modules.batchnorm import _BatchNorm + +from .checkpoint import is_recomputing + +__all__ = ["DeferredBatchNorm"] + + +TModule = TypeVar("TModule", bound=nn.Module) + + +class DeferredBatchNorm(_BatchNorm): + """A BatchNorm layer tracks multiple micro-batches to update running statistics per mini-batch.""" + + sum: Tensor + sum_squares: Tensor + running_mean: Tensor + running_var: Tensor + num_batches_tracked: Tensor + + def __init__( + self, + num_features: int, + eps: float = 1e-5, + momentum: float = 0.1, + affine: bool = True, + chunks: int = 1, + ) -> None: + super().__init__(num_features, eps, momentum, affine, track_running_stats=True) + + self.register_buffer("sum", torch.zeros_like(self.running_mean)) + self.register_buffer("sum_squares", torch.zeros_like(self.running_var)) + + self.counter = 0 + self.tracked = 0 + self.chunks = chunks + + def _check_input_dim(self, input: Tensor) -> None: + # It's the typical _check_input_dim() implementation in PyTorch. + if input.dim() <= 2: + raise ValueError("expected at least 3D input (got %dD input)" % input.dim()) + + def _track(self, input: Tensor) -> bool: + """Tracks statistics of a micro-batch.""" + # Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d. + dim = [0] + dim.extend(range(2, input.dim())) + + with torch.no_grad(): + self.sum += input.sum(dim) + self.sum_squares += (input ** 2).sum(dim) + + size = input.size().numel() // input.size(1) + self.counter += size + self.tracked += 1 + + return self.tracked == self.chunks + + def _commit(self) -> None: + """Update the running statistics of a mini-batch.""" + exponential_average_factor = 0.0 + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + mean = self.sum / self.counter + var = self.sum_squares / self.counter - mean ** 2 + + # Calculate the exponential moving average here. + m = exponential_average_factor + + self.running_mean *= 1 - m + self.running_mean += mean * m + + self.running_var *= 1 - m + self.running_var += var * m + + self.sum.zero_() + self.sum_squares.zero_() + self.counter = 0 + self.tracked = 0 + + def forward(self, input: Tensor) -> Tensor: + if not self.training: + # Don't train parameters on the evaluation mode. + return batch_norm( + input, + running_mean=self.running_mean, + running_var=self.running_var, + weight=self.weight, + bias=self.bias, + training=False, + momentum=0.0, + eps=self.eps, + ) + + if not is_recomputing(): + # Track a micro-batch on the training mode + # but not under a recomputation. + tracked_enough = self._track(input) + + # Update the running statistics for a mini-batch + # if it has tracked enough micro-batches. + if tracked_enough: + self._commit() + + # Normalize a micro-batch and train the parameters. + return batch_norm( + input, + running_mean=None, + running_var=None, + weight=self.weight, + bias=self.bias, + training=True, + momentum=0.0, + eps=self.eps, + ) + + @classmethod + def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule: + """Converts a :class:`nn.BatchNorm` or underlying :class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`:: + + from torchvision.models.resnet import resnet101 + from torchpipe.batchnorm import DeferredBatchNorm + model = resnet101() + model = DeferredBatchNorm.convert_deferred_batch_norm(model) + + """ + if isinstance(module, DeferredBatchNorm) and module.chunks is chunks: + return cast(TModule, module) + + module_output: nn.Module = module + + if isinstance(module, _BatchNorm) and module.track_running_stats: + module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks) + if module.affine: + module_output.register_parameter("weight", module.weight) + module_output.register_parameter("bias", module.bias) + module_output.register_buffer("running_mean", module.running_mean) + module_output.register_buffer("running_var", module.running_var) + module_output.register_buffer("num_batches_tracked", module.num_batches_tracked) + + for name, child in module.named_children(): + module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks)) + + return cast(TModule, module_output) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py new file mode 100644 index 0000000000000000000000000000000000000000..b717f0c2932c607ec398f52adca7f820704a55e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py @@ -0,0 +1,108 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Autograd functions for stream-aware CUDA copy. + +It is used to overlap copy and computation on the same GPU. +""" +from collections import deque +from typing import Deque, List, Optional, Tuple, Sequence + +import torch +from torch import Tensor + +from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream + +__all__: List[str] = ["Context", "Copy", "Wait"] + + +Tensors = Sequence[Tensor] + + +# Common interface between :class:`Copy` and :class:`Wait`. +class Context: + prev_stream: AbstractStream + next_stream: AbstractStream + + +class Copy(torch.autograd.Function): + """Copies tensors on specific streams.""" + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input,) -> Tensors: + ctx.prev_stream = prev_stream + ctx.next_stream = next_stream + + output = [] + output_stream = current_stream(get_device(next_stream)) + + with use_stream(prev_stream), use_stream(next_stream): + for x in input: + if torch.is_tensor(x): + y = x.to(get_device(next_stream), non_blocking=True) + output.append(y) + + # 'prev_stream' is not where 'x' has been allocated. + record_stream(x, prev_stream) + # 'y' has been allocated on 'next_stream'. + # It might be used on the current stream captured as 'output_stream'. + record_stream(y, output_stream) + else: + output.append(x) + + return tuple(output) + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: + prev_stream = ctx.prev_stream + next_stream = ctx.next_stream + + grad_input: Deque[Tensor] = deque(maxlen=len(grad_output)) + input_stream = current_stream(get_device(prev_stream)) + + with use_stream(prev_stream), use_stream(next_stream): + for x in reversed(grad_output): + y = x.to(get_device(prev_stream), non_blocking=True) + grad_input.appendleft(y) + + # 'next_stream' is not where 'x' has been allocated. + record_stream(x, next_stream) + # 'y' has been allocated on 'prev_stream'. + # It might be used on the current stream captured as 'input_stream'. + record_stream(y, input_stream) + + grad_streams: Tuple[Optional[Tensor], ...] = (None, None) + return grad_streams + tuple(grad_input) + + +class Wait(torch.autograd.Function): + """Synchronizes a stream to another stream. + + Place it just before you want to start an operation on the next stream, + provided that all operations on the previous stream are done. + + """ + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input) -> Tensors: + ctx.prev_stream = prev_stream + ctx.next_stream = next_stream + + wait_stream(next_stream, prev_stream) + + return tuple(x.detach() if torch.is_tensor(x) else x for x in input) + + @staticmethod + def backward(ctx: Context, *grad_input: Tensor,) -> Tuple[Optional[Tensor], ...]: + prev_stream = ctx.prev_stream + next_stream = ctx.next_stream + + wait_stream(prev_stream, next_stream) + + grad_streams: Tuple[Optional[Tensor], ...] = (None, None) + return grad_streams + grad_input diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..5e61341d9ad9f36199ead474245f81eaaa95ef6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py @@ -0,0 +1,490 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The Pipe interface.""" +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Union, Sequence, Tuple, cast + +import torch +from torch import Tensor, nn +from torch.distributed.rpc import RRef +import torch.autograd +import torch.cuda + +from . import microbatch +from .batchnorm import DeferredBatchNorm +from .pipeline import Pipeline +from .skip.layout import inspect_skip_layout +from .skip.skippable import verify_skippables +from .stream import AbstractStream, new_stream + +__all__ = ["Pipe", "BalanceError", "PipeSequential", "WithDevice"] + + +Device = Union[torch.device, int, str] +Devices = Union[Iterable[Device], List[Device]] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +if TYPE_CHECKING: + # Typechecking: nn.Module is not a Generic + Module = nn.Module[TensorOrTensors] # type: ignore[type-arg] + NamedModules = OrderedDict[str, Module] +else: + Module = nn.Module + NamedModules = OrderedDict + + +def _recommend_auto_balance(message: str) -> str: + """Expands a message with recommendation to :mod:`torchpipe.balance`.""" + return f"""{message} + +If your model is still under development, its optimal balance would change +frequently. In this case, we highly recommend 'torch.distributed.pipeline.sync.balance' for +naive automatic balancing: + + from torch.distributed.pipeline.sync import Pipe + from torch.distributed.pipeline.sync.balance import balance_by_time + + partitions = torch.cuda.device_count() + sample = torch.empty(...) + balance = balance_by_time(partitions, model, sample) + + model = Pipe(model, balance, ...) +""" + + +def _verify_module(module: nn.Sequential) -> None: + if not isinstance(module, nn.Sequential): + raise TypeError("module must be nn.Sequential to be partitioned") + + named_children = list(module.named_children()) + if len(named_children) != len(module): + raise ValueError("module with duplicate children is not supported") + + +def _verify_splitting( + module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device] +) -> None: + num_parameters = len(list(module.parameters())) + num_child_parameters = sum(len(list(child.parameters())) for child in module.children()) + if num_parameters == num_child_parameters: + return + + for i in range(len(partitions)): + for j in range(i + 1, len(partitions)): + parti = partitions[i] + partj = partitions[j] + if devices[i] == devices[j]: + continue + for p in parti.parameters(): + for q in partj.parameters(): + if p is q: + raise ValueError("module with duplicate parameters on distinct devices is not supported") + + +class BalanceError(ValueError): + pass + + +def _retrieve_device(module: nn.Module) -> torch.device: + """Validates all parameters in the Module have the same device and returns + the appropriate device. + + Args: + An ``nn.Module`` to process. + + Returns: + ``torch.Device`` for the entire module. + + Raises: + ValueError: + If devices for ``nn.Module`` parameters are not all same. + """ + + device = None + for parameter in module.parameters(): + if device is None: + device = parameter.device + elif device != parameter.device: + raise ValueError( + f'nn.Module: {module}, should have all parameters on a single device,' + ' please use .to() to place the module on a single device') + + return device if device is not None else torch.device("cpu") + + +class PipeSequential(nn.Sequential): + """ + Pipe variant of ``nn.Sequential`` which supports multiple inputs. + """ + + def forward(self, *inputs): + for module in self: + if isinstance(inputs, Tuple): # type: ignore[arg-type] + inputs = module(*inputs) + else: + # Don't expand single variables (ex: lists/Tensor) + inputs = module(inputs) + return inputs + + +class WithDevice(nn.Module): + """ + Wraps an ``nn.Module`` which is part of ``nn.Sequential`` passed into :class:`Pipe` + that overrides the device for that module. In cases where :class:`Pipe` + can't implicitly determine the device for the module and places it on CPU, + this wrapper can be used to override the implicit behavior and explicitly + specify which device a module should run on. + + The provided module is also moved to the given device via ``.to(device)`` + by :class:`Pipe` + + Args: + module(:class:`torch.nn.Module`): The module to be wrapped. + device(:class:`torch.device`): The device to run the module on. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> fc1 = nn.Linear(16, 8).cuda(0) + >>> fc2 = nn.Linear(8, 4).cuda(1) + >>> dropout = nn.Dropout() + >>> + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) + >>> # Dropout does not have any parameters/buffers, but we want to + >>> # run it on cuda:1 to avoid any GPU to CPU transfers. + >>> model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1')) + >>> # xdoctest: +SKIP("Needs RPC framework init") + >>> model = Pipe(model, chunks=8) + """ + def __init__(self, module: nn.Module, device: torch.device): + super().__init__() + self._module = module + self._device = torch.device(device) + + def forward(self, *args, **kwargs): + return self._module(*args, **kwargs) + + @property + def module(self): + return self._module + + @property + def device(self): + return self._device + + +def _assemble_partition(modules: List[nn.Module]): + modules_list: List[nn.Module] = [] + for module in modules: + if isinstance(module, nn.Sequential): + modules_list.extend(module.children()) + else: + modules_list.append(module) + return PipeSequential(*modules_list) + + +def _split_module(modules: nn.Sequential) -> Tuple[List[nn.Sequential], List[torch.device]]: + partitions = [] + devices = [] + + current_partition = [] + current_device = None + for name, module in modules.named_children(): + if isinstance(module, WithDevice): + # Process device override and move module to appropriate device. + device = module.device + module = module.module + module.to(device) + else: + device = _retrieve_device(module) + if current_device is not None and (current_device != device or device.type == 'cpu'): + partitions.append(_assemble_partition(current_partition)) + devices.append(current_device) + current_partition = [] + current_device = device + current_partition.append(module) + + if current_device is not None: + partitions.append(_assemble_partition(current_partition)) + devices.append(current_device) + + partitions = cast(List[nn.Sequential], nn.ModuleList(partitions)) + + return partitions, devices + + +MOVING_DENIED = TypeError("denied to move parameters and buffers, because Pipe should manage device placement") + + +class Pipe(Module): + """Wraps an arbitrary :class:`nn.Sequential ` module + to train on using synchronous pipeline parallelism. If the module requires + lots of memory and doesn't fit on a single GPU, pipeline parallelism is a + useful technique to employ for training. + + The implementation is based on the torchgpipe_ paper. + + .. _torchgpipe: https://arxiv.org/abs/2004.09910 + + Pipe combines pipeline parallelism with checkpointing to reduce peak + memory required to train while minimizing device under-utilization. + + You should place all the modules on the appropriate devices and wrap them + into an :class:`nn.Sequential ` module defining the + desired order of execution. If a module does not contain any + parameters/buffers, it is assumed this module should be executed on CPU + and appropriate input tensors to the module are moved to CPU before + execution. This behavior can be overridden by the :class:`WithDevice` + wrapper which can be used to explicitly specify which device a module + should run on. + + Args: + module (:class:`nn.Sequential `): + sequential module to be parallelized using pipelining. Each module + in the sequence has to have all of its parameters on a single + device. Each module in the sequence has to either be an nn.Module + or :class:`nn.Sequential ` (to combine multiple + sequential modules on a single device) + chunks (int): + number of micro-batches (default: ``1``) + checkpoint (str): + when to enable checkpointing, one of ``'always'``, + ``'except_last'``, or ``'never'`` (default: ``'except_last'``). + ``'never'`` disables checkpointing completely, ``'except_last'`` + enables checkpointing for all micro-batches except the last one + and ``'always'`` enables checkpointing for all micro-batches. + deferred_batch_norm (bool): + whether to use deferred ``BatchNorm`` moving statistics (default: + :data:`False`). If set to :data:`True`, we track statistics across + multiple micro-batches to update the running statistics per + mini-batch. + + Raises: + TypeError: + the module is not a :class:`nn.Sequential `. + ValueError: + invalid arguments + + Example:: + Pipeline of two FC layers across GPUs 0 and 1. + + >>> # Need to initialize RPC framework first. + >>> # xdoctest: +SKIP + >>> os.environ['MASTER_ADDR'] = 'localhost' + >>> os.environ['MASTER_PORT'] = '29500' + >>> torch.distributed.rpc.init_rpc('worker', rank=0, world_size=1) + >>> + >>> # Build pipe. + >>> fc1 = nn.Linear(16, 8).cuda(0) + >>> fc2 = nn.Linear(8, 4).cuda(1) + >>> model = nn.Sequential(fc1, fc2) + >>> model = Pipe(model, chunks=8) + >>> input = torch.rand(16, 16).cuda(0) + >>> output_rref = model(input) + + .. note:: + You can wrap a :class:`Pipe` model with + :class:`torch.nn.parallel.DistributedDataParallel` only when the + checkpoint parameter of :class:`Pipe` is ``'never'``. + + .. note:: + :class:`Pipe` only supports intra-node pipelining currently, but + will be expanded to support inter-node pipelining in the future. + The forward function returns an :class:`~torch.distributed.rpc.RRef` + to allow for inter-node pipelining in the future, where the output + might be on a remote host. For intra-node pipelining you can use + :meth:`~torch.distributed.rpc.RRef.local_value` to retrieve the + output locally. + + .. warning:: + :class:`Pipe` is experimental and subject to change. + """ + + def __init__( + self, + module: nn.Sequential, + chunks: int = 1, + checkpoint: str = "except_last", + deferred_batch_norm: bool = False, + ) -> None: + super().__init__() + + # Check if RPC framework is initialized. + if not torch.distributed.rpc._is_current_rpc_agent_set(): + raise RuntimeError( + 'Please initialize RPC framework for Pipe using ' + 'torch.distributed.rpc.init_rpc') + + chunks = int(chunks) + checkpoint = str(checkpoint) + + if chunks <= 0: + raise ValueError("number of chunks must be positive integer") + if checkpoint not in ["always", "except_last", "never"]: + raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'") + + _verify_module(module) + + # Verify if the underlying skippable modules satisfy integrity. The + # integrity can be verified before forward() because it is static. + verify_skippables(module) + + self.chunks = chunks + self.checkpoint = checkpoint + + if deferred_batch_norm: + module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks) + + self.partitions, self.devices = _split_module(module) + _verify_splitting(module, self.partitions, self.devices) + + self._copy_streams: List[List[AbstractStream]] = [] + self._skip_layout = inspect_skip_layout(self.partitions) + + # Separate CUDA streams for copy. + copy_streams = self._ensure_copy_streams() + + # The micro-batch index where the checkpointing stops. + checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint] + + self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop) + + def __len__(self) -> int: + """Counts the length of the underlying sequential module.""" + return sum(len(p) for p in self.partitions) + + def __getitem__(self, index: int) -> nn.Module: + """Gets a layer in the underlying sequential module.""" + partitions = self.partitions + if index < 0: + partitions = partitions[::-1] + + for partition in partitions: + try: + return partition[index] + except IndexError: + pass + + shift = len(partition) + + if index < 0: + index += shift + else: + index -= shift + + raise IndexError + + def __iter__(self) -> Iterator[nn.Module]: + """Iterates over children of the underlying sequential module.""" + for partition in self.partitions: + yield from partition + + # Pipe should manage the device of each partition. + # Deny cuda(), cpu(), and to() with device, by TypeError. + def cuda(self, device: Optional[Device] = None) -> "Pipe": + raise MOVING_DENIED + + def cpu(self) -> "Pipe": + raise MOVING_DENIED + + def to(self, *args: Any, **kwargs: Any) -> "Pipe": + # Deny these usages: + # + # - to(device[, dtype, non_blocking]) + # - to(tensor[, non_blocking]) + # + # But allow this: + # + # - to(dtype[, non_blocking]) + # + if "device" in kwargs or "tensor" in kwargs: + raise MOVING_DENIED + + if args: + if isinstance(args[0], (torch.device, int, str)): + raise MOVING_DENIED + if torch.is_tensor(args[0]): + raise MOVING_DENIED + + return super().to(*args, **kwargs) + + def _ensure_copy_streams(self) -> List[List[AbstractStream]]: + """Ensures that :class:`Pipe` caches CUDA streams for copy. + + It's worth to cache CUDA streams although PyTorch already manages a + pool of pre-allocated CUDA streams, because it may reduce GPU memory + fragmentation when the number of micro-batches is small. + + """ + if not self._copy_streams: + for device in self.devices: + self._copy_streams.append([new_stream(device) for _ in range(self.chunks)]) + + return self._copy_streams + + def forward(self, *inputs) -> RRef: + """ + Processes a single input mini-batch through the pipe and returns an + :class:`~torch.distributed.rpc.RRef` pointing to the output. + :class:`Pipe` is a fairly transparent module wrapper. It doesn't + modify the input and output signature of the underlying module. But + there's type restriction. Input and output have to contain at least one + tensor. This restriction is applied at partition boundaries too. + + The sequence of inputs are fed into the first stage of the pipeline as + ``*inputs``. As a result the positional args for this function should + match the positional args for the first stage of the pipeline. The same + condition applies for output of one stage of the pipeline which is the + input for the next stage. + + The input tensor is split into multiple micro-batches based on the + ``chunks`` parameter used to initialize :class:`Pipe`. The batch size + is assumed to be the first dimension of the tensor and if the batch + size is less than ``chunks``, the number of micro-batches is equal to + the batch size. + + Only tensors are split into multiple micro-batches, non-Tensor inputs + are just replicated as-is in each micro-batch. For non-Tensor outputs + in the last stage of the pipeline, they are aggregated as a ``List`` + and returned the user. For example, if you have 2 micro-batches + returning the integer 5, the user would receive the consolidated + output of `[5, 5]` + + All the input tensors need to be on the same device as the first + partition of the pipeline. + + If a tensor is wrapped with the :class:`NoChunk` wrapper, the tensor + is not split across micro-batches and is replicated as-is similar to + non-tensors. + + Args: + inputs: input mini-batch + + Returns: + :class:`~torch.distributed.rpc.RRef` to the output of the mini-batch + + Raises: + TypeError: input doesn't contain at least one tensor + + """ + first_partition_device = self.devices[0] if len(self.devices) != 0 else torch.device("cpu") + microbatch.check(first_partition_device, *inputs) + + if not self.devices: + # Empty sequential module is not illegal. + return RRef(*inputs) + + # Divide a mini-batch into micro-batches. + batches = microbatch.scatter(*inputs, chunks=self.chunks) + + # Run pipeline parallelism. + self.pipeline.run(batches) + + # Merge the micro-batches into one mini-batch. + output = microbatch.gather(batches) + return RRef(output) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bdcb913867a735374cb1df625bbacfc2802b5c1e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py @@ -0,0 +1,11 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Supports efficiency with skip connections.""" +from .namespace import Namespace +from .skippable import pop, skippable, stash, verify_skippables + +__all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec6052f9a51941d15fc43625980c94c2ea960a18 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8295957e4c2b4cb4280aee7139d2e34ee7e67fc6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ab8eea2c47a792e8ae0974cf8e5c66589e641bf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f15be1d771d07fd2aeba13e581256e555fdde1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd59f049ff40fe805ee36be14f79cb2615fb97bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a85b5ef8690d71f347fabe2e6d07cd6713a2e54 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py new file mode 100644 index 0000000000000000000000000000000000000000..04d76d34ea16640c94e3d377e3f7ba70ab1689bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py @@ -0,0 +1,92 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Static skip connection layout of ``@skippable`` modules.""" +from typing import Dict, Iterable, List, Tuple + +from torch import nn + +from .namespace import Namespace + +__all__: List[str] = [] + + +class SkipLayout: + """Represents a skip connection layout across partitions.""" + + # Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...} + by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]] + + # Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...] + by_partition: List[List[Tuple[int, Namespace, str]]] + + def __init__(self, num_partitions: int, skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],) -> None: + # The skip routes are already indexed by 'ns, name'. + self.by_ns_name = skip_routes + + # Index skip routes by partition number 'j'. + self.by_partition = [[] for _ in range(num_partitions)] + + for (ns, name), (prev_j, next_j) in skip_routes.items(): + self.by_partition[next_j].append((prev_j, ns, name)) + + for p in self.by_partition: + p.sort() + + def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]: + """Generates skip routes for the given destination partition number. + The skip routes are sorted by source partition number in ascending + order. + + Yields: + Each tuple of (source partition number, namespace, name). + + """ + for prev_j, ns, name in self.by_partition[next_j]: + if prev_j == next_j: + # This skip tensor will be popped at the same partition where + # it is stashed. In this case, copy is not required. + continue + + yield (prev_j, ns, name) + + def requires_copy(self, ns: Namespace, name: str) -> bool: + """Whether the given namespace and name requires partition-to-partition + copy or not. + """ + prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1)) + return prev_j != next_j + + +def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout: + """Inspects the skip connection layout in the given partitions.""" + # NOTE(sublee): Hide circular import inside this subroutine. Circular + # import is not ideal but placing this logic near to SkipLayout may + # increase cohesion of code. + from .skippable import Skippable + + skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {} + stashed_at: Dict[Tuple[Namespace, str], int] = {} + + for j, partition in enumerate(partitions): + def inspect_layer(layer): + if not isinstance(layer, Skippable): + return + + for ns, name in layer.stashable(): + stashed_at[(ns, name)] = j + + for ns, name in layer.poppable(): + prev_j = stashed_at.pop((ns, name)) + skip_routes[(ns, name)] = (prev_j, j) + + if isinstance(partition, nn.Sequential): + for layer in partition: + inspect_layer(layer) + else: + inspect_layer(partition) + + return SkipLayout(len(partitions), skip_routes) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py new file mode 100644 index 0000000000000000000000000000000000000000..67218c3678e418df5f1ab9851d9e4e918ec308b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py @@ -0,0 +1,50 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Provides isolated namespace of skip tensors.""" +import abc +from functools import total_ordering +from typing import Any +import uuid + +__all__ = ["Namespace"] + + +@total_ordering +class Namespace(metaclass=abc.ABCMeta): + """Namespace for isolating skip tensors used by :meth:`isolate() + `. + """ + + __slots__ = ("id",) + + def __init__(self) -> None: + self.id = uuid.uuid4() + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash(self.id) + + # Namespaces should support ordering, since SkipLayout will sort tuples + # including a namespace. But actual order between namespaces is not + # important. That's why they are ordered by version 4 UUID which generates + # random numbers. + def __lt__(self, other: Any) -> bool: + if isinstance(other, Namespace): + return self.id < other.id + return False + + def __eq__(self, other: object) -> bool: + if isinstance(other, Namespace): + return self.id == other.id + return False + + +# 'None' is the default namespace, +# which means that 'isinstance(None, Namespace)' is 'True'. +Namespace.register(type(None)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py new file mode 100644 index 0000000000000000000000000000000000000000..f3484a1b69d57b087787badb2915b5efc94adeb8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py @@ -0,0 +1,231 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Portal keeps a tensor in the pocket plane. The tensor becomes hidden to the +autograd engine. The shared context of three functions (:class:`PortalBlue`, +:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is +one of the most important feature of :mod:`torchpipe.skip`. + +The metaphor is inspired by Portal™ from Valve. + +""" +from typing import List, Optional, Tuple + +import torch +from torch import Tensor + +from ..copy import Context as CopyContext +from ..copy import Copy +from ..phony import get_phony +from ..stream import AbstractStream, get_device + +__all__: List[str] = [] + + +class Portal: + """A portal for a tensor.""" + + def __init__(self, tensor: Optional[Tensor], tensor_life: int) -> None: + self.put_tensor(tensor, tensor_life) + self.grad: Optional[Tensor] = None + + def blue(self) -> Tensor: + """Creates a :class:`PortalBlue` which hides the underlying tensor from + the autograd engine. + + Join the returning phony to the main lane of the autograd graph to + assure the correct backpropagation:: + + PortalBlue --+ + | + ---------- Join -- + + """ + tensor = self.use_tensor() + + if tensor is None: + return get_phony(torch.device("cpu"), requires_grad=False) + + return PortalBlue.apply(self, tensor) + + def orange(self, phony: Tensor) -> Optional[Tensor]: + """Creates a :class:`PortalOrange` which retrieves the hidden tensor + without losing ability of backpropagation. + + Give a phony forked from the main lane of an autograd graph:: + + +-- PortalOrange --+ + | | + -- Fork --------- f(a, b) -- + + """ + self.check_tensor_life() + + if self.tensor is None: + return self.use_tensor() + + return PortalOrange.apply(self, phony) + + def copy(self, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,) -> Tensor: + """Copies the hidden tensor by a :class:`PortalCopy`. + + Give a phony and use the returning phony to keep backpropagation:: + + +-- PortalCopy --+ + | | + -- Fork ---------- Join -- + + """ + if self.tensor is None: + return get_phony(torch.device("cpu"), requires_grad=False) + + return PortalCopy.apply(self, prev_stream, next_stream, phony) + + def check_tensor_life(self) -> None: + if self.tensor_life <= 0: + raise RuntimeError("tensor in portal has been removed") + + def put_tensor(self, tensor: Optional[Tensor], tensor_life: int) -> None: + """Stores a tensor into this portal.""" + # [Life of Tensor through Portal] + # + # The tensor can be retrieved by use_tensor() up to 'tensor_life' + # times. When the life becomes 0, the tensor will be deleted for + # deallocation in CUDA memory. + # + # The below events participate in a tensor through a portal. + # Note that [x] denotes the events which call use_tensor(): + # + # 1. [x] blue() + # 2. [ ] PortalBlue.forward + # 3. [ ] copy() + # 4. [ ] PortalCopy.forward + # 5. [ ] orange() + # 6. [x] PortalOrange.forward + # - - - - - - - - - - - - - - - - - - - - - - - - - - - + # 7. [ ] orange() (recomputed) + # 8. [x] PortalOrange.forward (recomputed) + # 9. [ ] PortalOrange.backward + # 10. [ ] PortalCopy.backward + # 11. [x] blue() (recomputed) + # 12. [ ] PortalBlue.forward (recomputed) + # 13. [ ] PortalBlue.backward + # + self.tensor_life = tensor_life + + if tensor_life > 0: + self.tensor = tensor + else: + self.tensor = None + + def use_tensor(self) -> Optional[Tensor]: + """Retrieves the underlying tensor and decreases the tensor life. When + the life becomes 0, it the tensor will be removed. + """ + self.check_tensor_life() + + tensor = self.tensor + + self.tensor_life -= 1 + + if self.tensor_life <= 0: + self.tensor = None + + return tensor + + def put_grad(self, grad: Tensor) -> None: + """Stores a gradient into this portal.""" + self.grad = grad + + def use_grad(self) -> Tensor: + """Retrieves and removes the underlying gradient. The gradient is + always ephemeral. + """ + if self.grad is None: + raise RuntimeError("grad in portal has been removed or never set") + + grad = self.grad + self.grad = None + return grad + + +# Common interface between :class:`PortalBlue`, :class:`PortalOrange`, and +# :class:`PortalCopy`. +class Context(CopyContext): + portal: Portal + + +class PortalBlue(torch.autograd.Function): + """Hides a tensor from the autograd engine by a :class:`Portal`.""" + + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + portal: Portal, + # This tensor must be retrieved by portal.use_tensor(). + tensor: Tensor, + ) -> Tensor: + ctx.portal = portal + + phony = get_phony(tensor.device, requires_grad=False) + return phony.detach() + + @staticmethod + # type: ignore[override] + def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]: + # The paired PortalOrange should keep the gradient. + grad = ctx.portal.use_grad() + return None, grad + + +class PortalOrange(torch.autograd.Function): + """Retrieves the hidden tensor from a :class:`Portal`.""" + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, portal: Portal, phony: Tensor) -> Tensor: + ctx.portal = portal + + tensor = portal.use_tensor() + assert tensor is not None + + return tensor.detach() + + @staticmethod + def backward(ctx: Context, grad: Tensor) -> Tuple[None, None]: # type: ignore[override] + # The paired PortalBlue will use the gradient. + ctx.portal.put_grad(grad) + return None, None + + +class PortalCopy(torch.autograd.Function): + """Copies the hidden tensor in a :class:`Portal`. It replaces the hidden + tensor with copied one. + """ + + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, portal: Portal, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor, + ) -> Tensor: + ctx.portal = portal + + assert portal.tensor is not None + (portal.tensor,) = Copy.forward(ctx, prev_stream, next_stream, portal.tensor) + + phony = get_phony(get_device(next_stream), requires_grad=False) + return phony.detach() + + @staticmethod + # type: ignore[override] + def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, None, None, None]: + portal = ctx.portal + + assert portal.grad is not None + _, _, portal.grad = Copy.backward(ctx, portal.grad) + + return None, None, None, None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py new file mode 100644 index 0000000000000000000000000000000000000000..0c01a198f804361185c527ac086694a67a0f673e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py @@ -0,0 +1,431 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The user interface to define skip connections.""" +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + FrozenSet, + Generator, + Iterable, + List, + Optional, + Set, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from torch import Tensor, nn + +from ..microbatch import Batch +from .namespace import Namespace +from .tracker import current_skip_tracker + +__all__ = ["skippable", "stash", "pop", "verify_skippables"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +StashPop = Union["stash", "pop"] +StashPopGenerator = Generator[StashPop, Optional[Tensor], TensorOrTensors] +if TYPE_CHECKING: + # Typechecking: nn.Module is not a Generic + SkippableModule = nn.Module[Union[StashPopGenerator, TensorOrTensors]] # type: ignore[type-arg] +else: + SkippableModule = nn.Module + +T = TypeVar("T", bound="Skippable") + + +class Skippable(nn.Module): + """The base class for skippable modules. + + Do not use this class directly. Define a subclass by :func:`skippable` + instead. + + """ + + module_cls: ClassVar[Type[SkippableModule]] + stashable_names: ClassVar[FrozenSet[str]] + poppable_names: ClassVar[FrozenSet[str]] + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__() + self.module = self.module_cls(*args, **kwargs) # type: ignore[call-arg] + self.namespaces: Dict[str, Namespace] = {} + + def __repr__(self) -> str: + return f"@skippable({self.module})" + + def namespaced(self, name: str) -> Tuple[Namespace, str]: + """Prepend namespace for the given skip name.""" + ns = self.namespaces.get(name) + ns = cast(Namespace, ns) + return (ns, name) + + def stashable(self) -> Iterable[Tuple[Namespace, str]]: + """Iterate over namespaced skip names to be stashed.""" + for name in self.stashable_names: + yield self.namespaced(name) + + def poppable(self) -> Iterable[Tuple[Namespace, str]]: + """Iterate over namespaced skip names to be popped.""" + for name in self.poppable_names: + yield self.namespaced(name) + + def isolate(self: T, ns: Namespace, *, only: Optional[Iterable[str]] = None) -> T: + r"""Isolate a specified subset or the whole set of skip tensors. + + In a single sequential module, skip tensors with the same + name are not allowed unless they are isolated by different namespaces. + + Here's an example using the same name for skip tensors twice. Each pair + of ``Layer1`` and ``Layer2`` is isolated with its own namespace ``ns1`` + and ``ns2``. There is no conflict anymore:: + + ns1 = Namespace() + ns2 = Namespace() + + model = nn.Sequential( + Layer1().isolate(ns1), + Layer1().isolate(ns2), + Layer2(), + Layer3().isolate(ns2), + Layer3().isolate(ns1), + ) + + When `only` parameter is omitted, all skip tensors are isolated. You + can isolate a subset of skip tensors by passing `only` parameter:: + + ns_alice = Namespace() + ns_bob = Namespace() + + model = nn.Sequential( + ... + StashStashPop().isolate(ns_alice, only=['alice']) \ + .isolate(ns_bob, only=['bob']), + ... + ) + + Args: + ns (Namespace): + namespace for isolation + + Keyword Args: + only (iterable of strs): + names of specific skip tensors to be isolated (omit this option + to isolate all skip tensors declared in this module) + + Returns: + this module itself + + """ + names: Iterable[str] + + if only is None: + names = self.stashable_names | self.poppable_names + else: + names = set(only) + + for name in names: + self.namespaces[name] = ns + + return self + + def dispatch( + self, + input, + handle_stash: Callable[[str, Optional[Tensor]], None], + handle_pop: Callable[[str], Optional[Tensor]], + ): + """Dispatch :class:`stash` or :class:`pop` commands. + + The commands are generated by the module's ``forward()``. + """ + generator = self.module(input) + + if not isinstance(generator, Generator): + # The underlying module returned output without any yield. + output = generator + return output + + try: + op = next(generator) + + while True: + if isinstance(op, stash): + handle_stash(op.name, op.tensor) + op = next(generator) + continue + + if isinstance(op, pop): + tensor = handle_pop(op.name) + op = generator.send(tensor) + continue + + raise TypeError(f"{op!r} is not a command from @skippable") + + except StopIteration as stop: + output = stop.args[0] + return output + + def forward(self, input: Union[List[Any], Tensor]) -> TensorOrTensors: + """Perform the forward propagation. + + :class:`stash` or :class:`pop` commands will be handled by portals + silently. The portals won't be exposed to users. + + Raises: + RuntimeError: + illegal 'stash' or 'pop' is found. + + """ + skip_tracker = current_skip_tracker() + stashed_tensors: Dict[str, Optional[Tensor]] = {} + + # Load skip tensors that might be popped. + poppable_tensors = {} + batch = Batch(input) + for ns, name in self.poppable(): + try: + poppable_tensors[name] = skip_tracker.load(batch, ns, name) + except KeyError as e: + raise RuntimeError(f"'{name}' has not been stashed") from e + input = batch.values + + # Handle skip commands. + def handle_stash(name: str, tensor: Optional[Tensor]) -> None: + if name not in self.stashable_names: + raise RuntimeError(f"'{name}' has not been declared as stashable") + stashed_tensors[name] = tensor + + def handle_pop(name: str) -> Optional[Tensor]: + if name not in self.poppable_names: + raise RuntimeError(f"'{name}' has not been declared as poppable") + return poppable_tensors.pop(name) + + output = self.dispatch(input, handle_stash, handle_pop) + + # All declared skips must be stashed or popped. + not_stashed = self.stashable_names - stashed_tensors.keys() + if not_stashed: + comma_names = ", ".join(f"'{n}'" for n in not_stashed) + raise RuntimeError(f"{comma_names} must be stashed but have not") + + not_popped = poppable_tensors.keys() + if not_popped: + comma_names = ", ".join(f"'{n}'" for n in not_popped) + raise RuntimeError(f"{comma_names} must be popped but have not") + + # Save stashed skip tensors. + batch = Batch(output) + for ns, name in self.stashable(): + tensor = stashed_tensors[name] + skip_tracker.save(batch, ns, name, tensor) + output = batch.values + + return output + + +# TODO(sublee): Move to above of Skippable class for better read flow. +def skippable( + stash: Iterable[str] = (), pop: Iterable[str] = (), +) -> Callable[[Type[SkippableModule]], Type[Skippable]]: + """Define a decorator to create :class:`nn.Module ` with skip connections. + + These decorated modules are called "skippable". This functionality works perfectly + fine even when the module is not wrapped by :class:`~torch.distributed.pipeline.sync.Pipe`. + + Each skip tensor is managed by its name. Before manipulating skip tensors, + a skippable module must statically declare the names for skip tensors by + `stash` and/or `pop` parameters. Skip tensors with pre-declared name can be + stashed by ``yield stash(name, tensor)`` or popped by ``tensor = yield + pop(name)``. + + Here is an example with three layers. A skip tensor named "1to3" is stashed + and popped at the first and last layer, respectively:: + + @skippable(stash=['1to3']) + class Layer1(nn.Module): + def forward(self, input): + yield stash('1to3', input) + return f1(input) + + class Layer2(nn.Module): + def forward(self, input): + return f2(input) + + @skippable(pop=['1to3']) + class Layer3(nn.Module): + def forward(self, input): + skip_1to3 = yield pop('1to3') + return f3(input) + skip_1to3 + + model = nn.Sequential(Layer1(), Layer2(), Layer3()) + + One skippable module can stash or pop multiple skip tensors:: + + @skippable(stash=['alice', 'bob'], pop=['carol']) + class StashStashPop(nn.Module): + def forward(self, input): + yield stash('alice', f_alice(input)) + yield stash('bob', f_bob(input)) + carol = yield pop('carol') + return input + carol + + Every skip tensor must be associated with exactly one pair of `stash` and + `pop`. :class:`~torch.distributed.pipeline.sync.Pipe` checks this + restriction automatically when wrapping a module. You can also check the + restriction by :func:`verify_skippables` + without :class:`~torch.distributed.pipeline.sync.Pipe`. + + """ + stashable_names = frozenset(stash) + poppable_names = frozenset(pop) + + def extend_skippable(module_cls: Type[SkippableModule]) -> Type[Skippable]: + name = module_cls.__name__ + bases = (Skippable,) + attrs = {"module_cls": module_cls, "stashable_names": stashable_names, "poppable_names": poppable_names} + return type(name, bases, attrs) + + return extend_skippable + + +class stash: + """The command to stash a skip tensor. + + :: + + def forward(self, input): + yield stash('name', input) + return f(input) + + Args: + name (str): name of skip tensor + input (torch.Tensor or None): tensor to pass to the skip connection + + """ + + __slots__ = ("name", "tensor") + + def __init__(self, name: str, tensor: Optional[Tensor]) -> None: + self.name = name + self.tensor = tensor + + +class pop: + """The command to pop a skip tensor. + + :: + + def forward(self, input): + skip = yield pop('name') + return f(input) + skip + + Args: + name (str): name of skip tensor + + Returns: + the skip tensor previously stashed by another layer under the same name + + """ + + __slots__ = ("name",) + + def __init__(self, name: str) -> None: + self.name = name + + +def verify_skippables(module: nn.Sequential) -> None: + """Verify if the underlying skippable modules satisfy integrity. + + Every skip tensor must have only one pair of `stash` and `pop`. If there + are one or more unmatched pairs, it will raise :exc:`TypeError` with the + detailed messages. + + Here are a few failure cases. :func:`verify_skippables` will report failure + for these cases:: + + # Layer1 stashes "1to3". + # Layer3 pops "1to3". + + nn.Sequential(Layer1(), Layer2()) + # └──── ? + + nn.Sequential(Layer2(), Layer3()) + # ? ────┘ + + nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3()) + # └───────────────────┘ ^^^^^^ + + nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3()) + # ^^^^^^ └───────────────────┘ + + To use the same name for multiple skip tensors, they must be isolated by + different namespaces. See :meth:`isolate() + `. + + Raises: + TypeError: + one or more pairs of `stash` and `pop` are not matched. + + """ + stashed: Set[Tuple[Namespace, str]] = set() + popped: Set[Tuple[Namespace, str]] = set() + msgs: List[str] = [] + + for layer_name, layer in module.named_children(): + if not isinstance(layer, Skippable): + continue + + for name in layer.stashable_names & layer.poppable_names: + msg = f"'{layer_name}' declared '{name}' both as stashable and as poppable" + msgs.append(msg) + + for ns, name in layer.stashable(): + if name in layer.poppable_names: + continue + + if (ns, name) in stashed: + msg = f"'{layer_name}' redeclared '{name}' as stashable but not isolated by namespace" + msgs.append(msg) + continue + + stashed.add((ns, name)) + + for ns, name in layer.poppable(): + if name in layer.stashable_names: + continue + + if (ns, name) in popped: + msg = f"'{layer_name}' redeclared '{name}' as poppable but not isolated by namespace" + msgs.append(msg) + continue + + if (ns, name) not in stashed: + msg = f"'{layer_name}' declared '{name}' as poppable but it was not stashed" + msgs.append(msg) + continue + + popped.add((ns, name)) + + for (_, name) in stashed - popped: + msg = f"no module declared '{name}' as poppable but stashed" + msgs.append(msg) + + if msgs: + raise TypeError( + "one or more pairs of stash and pop do not match:\n\n%s" "" % "\n".join("* %s" % x for x in msgs) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac82bc05dc9457626aee240cd110eb936b9f176 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py @@ -0,0 +1,180 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Tracks skip tensors on a thread.""" +from contextlib import contextmanager +import threading +from typing import Dict, Generator, List, Optional, Tuple + +from torch import Tensor + +from ..checkpoint import is_checkpointing +from ..dependency import fork, join +from ..microbatch import Batch +from ..stream import AbstractStream +from .layout import SkipLayout +from .namespace import Namespace +from .portal import Portal + +__all__: List[str] = [] + + +class SkipTracker: + """Tracks saved skip tensors. + + It will update the given micro-batch in place. This is because when it + manipulates the underlying skip tensors, the current micro-batch also has + to be connected with the skip tensors. + + One thread has one skip tracker. Call :func:`current_skip_tracker` to get + the skip tracker on the current thread. + + """ + + def __init__(self) -> None: + self.tensors: Dict[Tuple[Namespace, str], Optional[Tensor]] = {} + + def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None: + self.tensors[(ns, name)] = tensor + + def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]: + return self.tensors.pop((ns, name)) + + def copy( + self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, + ) -> None: + raise TypeError("copy is not supported for non-portal skip tensors") + + +class SkipTrackerThroughPotals(SkipTracker): + """Tracks saved skip tensors through portals. The skip tensors will be + hidden in portals so that the autograd engine does not need to track them. + + This tracker is only used when the training or evaluating module is wrapped + with :class:`torchpipe.Pipe`. + + """ + + def __init__(self, skip_layout: SkipLayout) -> None: + super().__init__() + self.skip_layout = skip_layout + self.portals: Dict[Tuple[Namespace, str], Portal] = {} + + def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None: + """Saves the stashed skip tensor in a portal. The portal is then + connected to the given micro-batch with :class:`Join`. + """ + if not self.skip_layout.requires_copy(ns, name): + super().save(batch, ns, name, tensor) + return + + # See [Tensor Life of Portal] at Portal.put_tensor() to understand the + # below tensor_life values. Here are the selected events which retrieve + # the tensor in portal: + # + # 1. [x] blue() + # ... + # 6. [x] PortalOrange.forward + # ... + # 8. [x] PortalOrange.forward (recomputed) + # ... + # 11. [x] blue() (recomputed) + # + if (ns, name) not in self.portals: + if is_checkpointing(): + # Under checkpointing, the tensor used by the first + # PortalOrange should be alive in the portal. This tensor will + # be used again by the second PortalOrange during the + # recomputation. + tensor_life = 3 # Delete at [8. PortalOrange.forward (recomputed)] + else: + tensor_life = 2 # Delete at [6. PortalOrange.forward] + + portal = Portal(tensor, tensor_life) + self.portals[(ns, name)] = portal + + else: + # Under recomputation, the portal already exists. + portal = self.portals[(ns, name)] + + # The existing tensor life already became 0. It should be reset as + # 1 to delete the tensor after the second PortalBlue immediately. + tensor_life = 1 # Delete at [11. blue() (recomputed)] + + portal.put_tensor(tensor, tensor_life) + + phony = portal.blue() + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx] = join(batch[tensor_idx], phony) + + def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]: + """Loads a skip tensor from the corresponding portal to pop. The given + micro-batch is connected to the portal with :class:`Fork`. + """ + if not self.skip_layout.requires_copy(ns, name): + tensor = super().load(batch, ns, name) + return tensor + + portal = self.portals[(ns, name)] + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx], phony = fork(batch[tensor_idx]) + tensor = portal.orange(phony) + return tensor + + def copy( + self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, + ) -> None: + """Copies the skip tensor in the corresponding portal. The given + micro-batch and the portal will be tied with :class:`Fork` and + :class:`Join`. + """ + assert self.skip_layout.requires_copy(ns, name) + + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx], phony = fork(batch[tensor_idx]) + + portal = self.portals[(ns, name)] + phony = portal.copy(prev_stream, next_stream, phony) + + batch[tensor_idx] = join(batch[tensor_idx], phony) + + +class ThreadLocal(threading.local): + def __init__(self) -> None: + self.skip_tracker: Optional[SkipTracker] = None + + +thread_local = ThreadLocal() + + +@contextmanager +def use_skip_tracker(skip_tracker: SkipTracker) -> Generator[None, None, None]: + """Registers the given skip tracker on the current thread within a + context:: + + with use_skip_tracker(my_skip_tracker): + ... + + """ + orig = thread_local.skip_tracker + + thread_local.skip_tracker = skip_tracker + + try: + yield + finally: + thread_local.skip_tracker = orig + + +def current_skip_tracker() -> SkipTracker: + """Gets the skip tracker on the current thread.""" + skip_tracker = thread_local.skip_tracker + + if skip_tracker is None: + skip_tracker = SkipTracker() + thread_local.skip_tracker = skip_tracker + + return skip_tracker diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d942840127b9a64f51710a3f00093f2dff96be93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..990550414ca47a680c9fe2b30c0817ad4bd9bee5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from torch.distributed.tensor.parallel.api import parallelize_module + +from torch.distributed.tensor.parallel.loss import loss_parallel +from torch.distributed.tensor.parallel.style import ( + ColwiseParallel, + ParallelStyle, + PrepareModuleInput, + PrepareModuleOutput, + RowwiseParallel, + SequenceParallel, +) + +__all__ = [ + "ColwiseParallel", + "ParallelStyle", + "PrepareModuleInput", + "PrepareModuleOutput", + "RowwiseParallel", + "SequenceParallel", + "parallelize_module", + "loss_parallel" +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905a71ff5b5a7793ff6bd7877e226ec3bc95e8a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5a1848e2eb4cd4083378bfd7e39f034746c7055 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83b2c4598b8fd3cf19888c596ceb6f4ae438e7e9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/ddp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4147cb4f97e16c3cdef934ef8a03a3c18e8b39b3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/fsdp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68efa4bc2d8e417362b6f6d17b22a5c3a40c27fa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/loss.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d660631f795010ca241d7dec1da7ce2ef6978e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/style.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1ebfd53ab7b3bb88ed496435747dd1fd9ac8ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_data_parallel_utils.py @@ -0,0 +1,51 @@ +from functools import partial +from typing import no_type_check, Optional, Tuple + +import torch +from torch.distributed._functional_collectives import AsyncCollectiveTensor +from torch.distributed._tensor import DTensor +from torch.distributed._tensor.placement_types import DTensorSpec + + +@no_type_check +def sync_grad_hook(grad, *, device_handle=None, compute_stream=None): + if isinstance(grad, AsyncCollectiveTensor): + if compute_stream is not None: + with device_handle.stream(compute_stream): + grad = grad.wait() + else: + grad = grad.wait() + + return grad + + +def _flatten_tensor( + tensor: torch.Tensor, +) -> Tuple[torch.Tensor, Optional[DTensorSpec]]: + if isinstance(tensor, DTensor): + tensor._local_tensor.requires_grad_() + return tensor._local_tensor, tensor._spec + return tensor, None + + +@no_type_check +def _unflatten_tensor(tensor, spec, *, device_handle=None, compute_stream=None): + # unflatten would mainly be called everytime FSDP allgather parameters. + result = DTensor.from_local( + tensor, + spec.mesh, + spec.placements, + run_check=False, + shape=spec.shape, + stride=spec.stride, + ) + if tensor.requires_grad: + # only register the hook if the tensor requires grad + tensor.register_hook( + partial( + sync_grad_hook, + device_handle=device_handle, + compute_stream=compute_stream, + ) + ) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc37003fb2d0490d18fdb08cba77e239e7ef4c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/_utils.py @@ -0,0 +1,60 @@ +import warnings +from typing import Tuple, Union + +from torch.distributed._tensor import DeviceMesh +from torch.distributed._tensor.placement_types import Placement +from torch.distributed.device_mesh import _mesh_resources +try: + from torch._dynamo.external_utils import is_compiling as is_torchdynamo_compiling +except Exception: + def is_torchdynamo_compiling(): # type: ignore[misc] + return False + +LayoutsType = Union[Placement, Tuple[Placement, ...]] + + +def _deprecate_warnings(func_name: str, extra_msg: str) -> None: + """ + Inject common validation logics for `_prepare_input` funcs via this decorator. + + Include verifying that input needs to be either a :class:`Tensor` or :class:`DTensor` + and only 1D :class:`DeviceMesh` is passed in. + """ + # TODO: Will follow up with dynamo POC to make warnings.warn working with dynamo. + if not is_torchdynamo_compiling(): + warnings.warn(f"{func_name} is deprecated and will be removed soon. {extra_msg}") + + +def _validate_tp_mesh_dim( + device_mesh: DeviceMesh, +) -> None: + """ + Check whether TP mesh dimension is valid or not. + + Args: + device_mesh (:class:`DeviceMesh`): + The `device_mesh` where we perform + Tensor Parallelism on. + + Return: + `True` if the mesh dimension + is valid, `False` otherwise. + """ + if device_mesh.ndim > 1: + raise ValueError(f"Tensor Parallel only accepts a 1D DeviceMesh, but found {device_mesh.ndim}D!" + "If you have a 2-D or N-D device_mesh, consider passing in device_mesh[\"tp\"]") + + parent_mesh = _mesh_resources.get_parent_mesh(device_mesh) + if parent_mesh: + if parent_mesh.ndim != 2: + raise RuntimeError( + f"Found TP device_mesh has a parent mesh with dims {parent_mesh.ndim}", + "Currently we only support 2D TP composition with DP.", + ) + + tp_mesh_dim = _mesh_resources.get_parent_mesh_dim(device_mesh) + if tp_mesh_dim != 1: + raise RuntimeError( + f"Found TP device_mesh on the {tp_mesh_dim} dimension of its parent mesh.", + "Currently we only support intranode TP and TP needs to be the innermost dimension on its parent mesh.", + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a8e2e5bc1bfdfd29d61048147286d0256de4c9e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/api.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Dict, Union + +import torch +import torch.distributed._tensor.random as random +import torch.nn as nn +from torch.distributed._tensor import ( + DeviceMesh, +) +from torch.distributed._tensor.random import ( + is_rng_supported_mesh, + TensorParallelRNGTracker, +) +from torch.distributed.tensor.parallel._utils import _validate_tp_mesh_dim +from torch.distributed.tensor.parallel.style import ( + ParallelStyle, +) + + +__all__ = [ + "parallelize_module", +] + + +def parallelize_module( # type: ignore[return] + module: nn.Module, + device_mesh: DeviceMesh, + parallelize_plan: Union[ParallelStyle, Dict[str, ParallelStyle]], +) -> nn.Module: + """ + Apply Tensor Parallelism in PyTorch by parallelizing modules or sub-modules based on a user-specified plan. + + We parallelize module or sub_modules based on a parallelize_plan. The parallelize_plan contains + :class:`ParallelStyle`, which indicates how user wants the module or sub_module + to be parallelized. + + User can also specify different parallel style per module fully qualified name (FQN). + + Note that ``parallelize_module`` only accepts a 1-D :class:`DeviceMesh`, if you have a 2-D or N-D :class:`DeviceMesh`, + slice the DeviceMesh to a 1-D sub DeviceMesh first then pass to this API(i.e. ``device_mesh[\"tp\"]``) + + Args: + module (:class:`nn.Module`): + Module to be parallelized. + device_mesh (:class:`DeviceMesh`): + Object which describes the mesh topology + of devices for the DTensor. + parallelize_plan (Union[:class:`ParallelStyle`, Dict[str, :class:`ParallelStyle`]]): + The plan used to parallelize the module. It can be either a + :class:`ParallelStyle` object which contains how + we prepare input/output for Tensor Parallelism or it can be a + dict of module FQN and its corresponding :class:`ParallelStyle` object. + Return: + A :class:`nn.Module` object parallelized. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import parallelize_module, ColwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> + >>> # Define the module. + >>> m = Model(...) + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> m = parallelize_module(m, tp_mesh, {"w1": ColwiseParallel(), "w2": RowwiseParallel()}) + >>> + + .. note:: For complex module architecture like Attention, MLP layers, we recommend composing + different ParallelStyles together (i.e. ``ColwiseParallel`` and ``RowwiseParallel``) and pass + as a parallelize_plan, to achieves the desired sharding computation. + """ + torch._C._log_api_usage_once("torch.distributed.tensor.parallel.parallelize_module") + + _validate_tp_mesh_dim(device_mesh) + + # instantiate a TP RNG state tracker if it's not there + if is_rng_supported_mesh(device_mesh) and not isinstance( + random._rng_tracker, TensorParallelRNGTracker + ): + random._rng_tracker = TensorParallelRNGTracker(device_mesh.device_type) + # TODO: we should allow user to pass in the default seed from a config + random._rng_tracker._manual_seed(device_mesh, base_seed=1234) + # By default we execute random ops in non-tensor-parallel region. If users want + # to execute in tensor-parallel region, they can manually set this field to True + # after parallelizing the model. + random._rng_tracker.distribute_region_enabled = False + + if isinstance(parallelize_plan, ParallelStyle): + return parallelize_plan._apply(module, device_mesh) + elif isinstance(parallelize_plan, dict): + for module_path, parallelize_style in parallelize_plan.items(): + sub_module = module.get_submodule(module_path) + parent_module = module + if "." in module_path: + parent_module_path = ".".join(module_path.split(".")[:-1]) + parent_module = module.get_submodule(parent_module_path) + module_path = module_path.split(".")[-1] + parent_module.register_module( # type: ignore[call-arg] # pyre-ignore[20] + module_path, + parallelize_module( # type: ignore[arg-type] + sub_module, device_mesh, parallelize_style # type: ignore[arg-type] # pyre-ignore[6] + ), + ) + return module + else: + raise RuntimeError( # pyre-ignore[7] + "Expect Union[ParallelStyle, Dict[str, ParallelStyle]] for" + f" parallelize_plan, {type(parallelize_plan)} found!" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py new file mode 100644 index 0000000000000000000000000000000000000000..474e542551ae90348d8f7e0a856e210ae24d022a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/ddp.py @@ -0,0 +1,96 @@ +from typing import Any, List, Tuple + +import torch.nn as nn +from torch.distributed.tensor.parallel._data_parallel_utils import ( + _flatten_tensor, + _unflatten_tensor, +) + +__all__ = [] # type: ignore[var-annotated] + + +def _get_submodule_n_params(module: nn.Module, path: str): + """ + Get submodule and the direct path of parameter from the module + """ + if "." in path: + path_list = path.split(".") + parent_module_path = ".".join(path_list[:-1]) + module = module.get_submodule(parent_module_path) + path = path_list[-1] + return module, path + + +def _update_module_param(param_list: List[Tuple[nn.Module, str, nn.Parameter]]): + """ + Update parameters within the module + """ + for item in param_list: + parent_module, module_path, t = item + assert hasattr(parent_module, module_path) + delattr(parent_module, module_path) + setattr(parent_module, module_path, t) + + +def _reconstruct_dtensor(module: nn.Module, _input: Any): + """ + Recontruct DTensor parameters from local tensors + """ + param_list = [] + # TODO: To add perf optimizations to this iterations + for name, t in module.named_parameters(): + if hasattr(t, "_st_info"): + dtensor = _unflatten_tensor(t, t._st_info) + param_list.append((*_get_submodule_n_params(module, name), dtensor)) + _update_module_param(param_list) # type: ignore[arg-type] + + +def _localize_dtensor(module: nn.Module, *_: Any): + """ + Convert DTensor parameters to local tensors + """ + param_list = [] + for name, param in module.named_parameters(): + t, sharding_info = _flatten_tensor(param) + if sharding_info is not None: + t = nn.Parameter(t) + t._st_info = sharding_info # type: ignore[attr-defined] + param_list.append((*_get_submodule_n_params(module, name), t)) + _update_module_param(param_list) # type: ignore[arg-type] + + +def _pre_dp_module_transform(module: nn.Module): + """ + Enable the composability between Tensor Parallelism (TP) and Data + Parallelism(DP) in PyTorch when using DDP. We need to convert Parameters which + are DTensors to local tensors before wrapping with data parallelism API. + We then register two hooks, one for converting local tensors back to DTensor + preforward and one to convert DTensors back to tensors after Forward. By + integrating this way, we avoid any special handling of DTensor parameters by DDP + and get DTensor's gradients propagated back to DP, e.g. gradient buckets of DDP. + + For now, this API only works with ``DistributedDataParallel``. It will later support + other DP methods such as FSDP. + + Args: + module (:class:`nn.Module`): + Module which has been applied TP on. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import parallelize_module, PairwiseParallel + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> from torch.distributed.tensor.parallel.ddp import pre_dp_module_transform + >>> + >>> # Define the module. + >>> m = module(...) + >>> parallelize_module(m, PairwiseParallel()) + >>> m = pre_dp_module_transform(m) + >>> m = DDP(m) + >>> + """ + + _localize_dtensor(module, None, None) + # TODO: To add test cases and ensure that it works for nested modules + module.register_forward_pre_hook(_reconstruct_dtensor) + module.register_forward_hook(_localize_dtensor) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..7b8d0af39bc2b48289696a52923995d4f488f446 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/fsdp.py @@ -0,0 +1,391 @@ +import copy +from typing import Any, cast, List, Optional, Tuple + +import torch +import torch.distributed as dist + +import torch.distributed._shard.sharding_spec as shard_spec +import torch.distributed.distributed_c10d as c10d +from torch.distributed._shard.sharded_tensor import ( + Shard, + ShardedTensor, + ShardedTensorMetadata, + TensorProperties, +) + +from torch.distributed._shard.sharding_spec import ShardMetadata +from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec +from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard as DShard +from torch.distributed.device_mesh import _mesh_resources + +from torch.distributed.fsdp._common_utils import _set_fsdp_flattened +from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions +from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor +from torch.distributed.remote_device import _remote_device +from torch.distributed.tensor.parallel._data_parallel_utils import ( + _flatten_tensor, + _unflatten_tensor, +) + +__all__ = ["DTensorExtensions"] + + +def _get_box(tensor: DTensor) -> Tuple[torch.Size, torch.Size]: + device_mesh = tensor.device_mesh + assert device_mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + placement = tensor.placements[0] + offsets = [0] * len(tensor.size()) + num_chunks = device_mesh.size(mesh_dim=0) + + if tensor.placements[0].is_shard(): + shard_dim = cast(DShard, placement).dim + chunk_size = tensor.size(shard_dim) // num_chunks + offsets[shard_dim] = chunk_size + + return (torch.Size(offsets), tensor._local_tensor.size()) + + +def _get_box_for(tensor: DTensor, idx: int) -> Tuple[torch.Size, torch.Size]: + offsets, size = _get_box(tensor) + return (torch.Size([val * idx for val in offsets]), size) + + +def _get_local_box(tensor: DTensor) -> Tuple[torch.Size, torch.Size]: + device_mesh = tensor.device_mesh + coord = device_mesh.get_coordinate() + assert coord is not None + return _get_box_for(tensor, coord[0]) + + +def _create_shard_md_from_dt(dt: DTensor, current_rank: int) -> ShardMetadata: + mesh = dt.device_mesh + assert mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + offsets, sizes = _get_local_box(dt) + return ShardMetadata( + shard_offsets=list(offsets), + shard_sizes=list(sizes), + placement=f"rank:{current_rank}/{dt._local_tensor.device}", + ) + + +def _create_sharded_tensor_md_from_dt( + dt: DTensor, dt_pg: c10d.ProcessGroup +) -> ShardedTensorMetadata: + # This is where it gets tricky, we have to produce a ShardedTensor that has full coverage + # and yet has only one valid shard for the current rank. + + shards_md = [] + my_rank = dist.get_rank(dt_pg) + scapegoat_rank = 0 if my_rank > 0 else 1 + + if dt.placements[0].is_shard(): + shard_count = dt_pg.size() + else: + shard_count = 1 + + for i in range(shard_count): + offsets, sizes = _get_box_for(dt, i) + shards_md.append( + ShardMetadata( + shard_offsets=list(offsets), + shard_sizes=list(sizes), + placement=( + f"rank:{scapegoat_rank if i > 0 else my_rank}/{dt._local_tensor.device}" + ), + ) + ) + + return ShardedTensorMetadata( + shards_metadata=shards_md, + size=dt.size(), + tensor_properties=TensorProperties( + dtype=dt.dtype, + layout=dt.layout, + requires_grad=dt.requires_grad, + # ignore memory_format and pin_memory as those are not supported by DT + ), + ) + + +def _get_dt_pg(dt: DTensor) -> c10d.ProcessGroup: + mesh = dt.device_mesh + assert mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + dim_groups = mesh.get_group() + assert isinstance(dim_groups, list) + return dim_groups[0] + + +def _rewrite_spec_if_needed( + spec: shard_spec.ShardingSpec, tensor: torch.Tensor, rank: int +) -> shard_spec.ShardingSpec: + """ + Rewrite ``spec`` to match the device of ``tensor``. + + FSDP.sharded_optim_state_dict sneakly ships optimizer state to CPU so if the original ShardingSpec + produces CUDA metadata, ST construction bombs. + """ + if not isinstance(spec, ChunkShardingSpec): + return spec + + # let's see if we need + rewrite = False + for p in spec.placements: + p = cast(_remote_device, p) + if p.rank() == rank and p.device() != tensor.device: + rewrite = True + break + if rewrite: + spec = copy.deepcopy(spec) + for i, placement in enumerate(spec.placements): + placement = cast(_remote_device, placement) + if placement.rank() == rank and placement.device() != tensor.device: + spec.placements[i] = _remote_device(f"rank:{rank}/{tensor.device}") + + return spec + + +def _chunk_tensor( + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, +) -> torch.Tensor: + if type(tensor) is ShardedTensor: + assert len(tensor.local_shards()) == 1 + + inner_param = tensor.local_tensor() + inner_st = _create_chunk_sharded_tensor( + inner_param, + rank, + world_size, + num_devices_per_node, + pg, + ) + + outer_local_shard = tensor.local_shards()[0] + shards: List[Shard] = [ + Shard(inner_st, copy.deepcopy(outer_local_shard.metadata)) + ] + st_meta = copy.deepcopy(tensor.metadata()) + st_meta.tensor_properties.requires_grad = False + + st_outer = ShardedTensor._init_from_local_shards_and_global_metadata( + shards, + sharded_tensor_metadata=st_meta, + process_group=tensor._process_group, + init_rrefs=False, + ) + return st_outer + elif type(tensor) is DTensor: + device_mesh = tensor.device_mesh + assert device_mesh.ndim == 1, "Only 1D DeviceMeshes currently handled" + + inner_param = tensor._local_tensor + + inner_st = _create_chunk_sharded_tensor( + inner_param, + rank, + world_size, + torch.cuda.device_count(), + pg, + ) + + dt_pg = _get_dt_pg(tensor) + # We do this differently here, we create a ST with no local shards then patch it + shards = [ + Shard(inner_st, _create_shard_md_from_dt(tensor, dist.get_rank(dt_pg))) + ] + + st_meta = _create_sharded_tensor_md_from_dt(tensor, dt_pg) + st_meta.tensor_properties.requires_grad = False + + st_outer = ShardedTensor._init_from_local_shards_and_global_metadata( + shards, + sharded_tensor_metadata=st_meta, + process_group=dt_pg, + init_rrefs=False, + ) + + return st_outer + else: + return _create_chunk_sharded_tensor( + tensor, + rank, + world_size, + num_devices_per_node, + pg, + ) + + +def _chunk_dtensor( + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, +) -> DTensor: + """ + Shard a tensor to chunks along the first dimension. + + The local rank will gets its corresponding chunk as the local tensor to create a DTensor. + """ + parent_mesh = _mesh_resources.get_parent_mesh(device_mesh) + if parent_mesh is None: + raise RuntimeError("No parent device_mesh is found for FSDP device_mesh.") + if parent_mesh.ndim < 2: + raise RuntimeError( + f"Found parent device_mesh of ndim={parent_mesh.ndim},", + "but meshes must be at least 2D.", + ) + + # We need to explicitly call .detach() to return a new tensor detached from the current graph. + tensor = tensor.clone().detach() + + # When a layer is not involved in TP, then the tensor will not be a DTensor. + # e.g. When a layer is not sppecified in the parallelize_plan, TP will have no effect on the layer. + # e.g. When you do PairwiseParallel on a 3 layer model, TP will have no effect on the third layer. + if isinstance(tensor, torch.Tensor) and not isinstance(tensor, DTensor): + + # For tensors, it is replicated across tp dimension and sharded across FSDP dimension. + # TP is the inner dimension and FSDP is the outer dimension. + # Therefore, shard placements for tensor is (Shard(0), Replicate()). + replicate_placements = [Replicate() for _ in range(parent_mesh.ndim)] + shard_placements = [Replicate() for _ in range(parent_mesh.ndim)] + shard_placements[0] = DShard(0) # type: ignore[call-overload] + + return DTensor.from_local( + tensor, parent_mesh, replicate_placements + ).redistribute( + device_mesh=parent_mesh, + placements=shard_placements, + ) + + else: + tp_placements = tensor.placements + tp_placement = tp_placements[0] + + tensor = tensor.to_local() + + # For DTensors, it is sharded across tp dimension first and then sharded across FSDP dimension. + # TP is the inner dimension and FSDP is the outer dimension. + # Therefore, shard placements for tensor is (Shard(0), tp_placement). + # For higher dimensional meshes, it is replicated across other dimensions. For example, with + # HSDP the shard placements for tensor is (Replicate, Shard(0), tp_placement). + replicate_placements = [Replicate() for _ in range(parent_mesh.ndim)] + replicate_placements[-1] = tp_placement # type: ignore[call-overload] + shard_placements = [Replicate() for i in range(parent_mesh.ndim)] # type: ignore[misc] + shard_placements[-2] = DShard(0) # type: ignore[call-overload] + shard_placements[-1] = tp_placement # type: ignore[call-overload] + + return DTensor.from_local( + tensor, parent_mesh, replicate_placements + ).redistribute( + device_mesh=parent_mesh, + placements=shard_placements, + ) + + +def _pre_load_state_dict( + tensor: torch.Tensor, +) -> Tuple[torch.Tensor, List[Shard]]: + shards = cast(ShardedTensor, tensor).local_shards() + if len(shards) == 1 and type(shards[0].tensor) is ShardedTensor: + inner_tensor = shards[0].tensor + shards = inner_tensor.local_shards() # pyre-ignore[16] + tensor = inner_tensor + + return (tensor, shards if len(shards) > 0 else []) + + +def _all_gather_dtensor( + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], +) -> torch.Tensor: + """All gather a DTensor in its FSDP dimension and return the local tensor.""" + assert parent_mesh == tensor.device_mesh + + placements = list(copy.deepcopy(tensor.placements)) + # FSDP + TP: [Shard(0), tp_placement] -> [Replicate(), tp_placement] + # HSDP + TP: [Replicate(), Shard(0), tp_placement] -> [Replicate(), Replicate(), tp_placement] + for i in range(0, len(placements) - 1): + placements[i] = Replicate() + tensor = tensor.redistribute( + device_mesh=tensor.device_mesh, + placements=placements, + ) + + return tensor.to_local() + + +class DTensorExtensions(FSDPExtensions): + """ + DTensorExtension is the TensorFlattener extension needed for 2D FSDP + TP. + + This is the implementation for FSDPExtensions defined in + https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/_fsdp_extensions.py + """ + def __init__(self, device_handle) -> None: + super().__init__() + self.compute_stream = None + self.device_handle = device_handle + # we have to use the dynamo disable this way to disable dynamo as the decorater way would + # trigger build failure with torch deploy... + self.post_unflatten_transform = torch._dynamo.disable(self.post_unflatten_transform) # type: ignore[method-assign] + + def pre_flatten_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, Optional[Any]]: + return _flatten_tensor(tensor) + + def post_unflatten_transform( + self, tensor: torch.Tensor, param_extension: Any + ) -> torch.Tensor: + stream = self.compute_stream or self.device_handle.current_stream() + with self.device_handle.stream(stream): + # runtime we put the unflattened tensor call on the compute stream since + # the unflattened tensor might contain computations in fwd/bwd where we + # need to sync properly. + # TODO: this is a short term fix and we should make the get_unflat_views + # directly happen in the compute stream. + result = _unflatten_tensor( + tensor, + param_extension, + device_handle=self.device_handle, + compute_stream=self.compute_stream + ) + _set_fsdp_flattened(result) + return result + + def chunk_tensor( + self, + tensor: torch.Tensor, + rank: int, + world_size: int, + num_devices_per_node: int, + pg: dist.ProcessGroup, + device: Optional[torch.device] = None, + ) -> torch.Tensor: + return _chunk_tensor(tensor, rank, world_size, num_devices_per_node, pg) + + def chunk_dtensor( + self, + tensor: torch.Tensor, + rank: int, + device_mesh: DeviceMesh, + ) -> torch.Tensor: + return _chunk_dtensor(tensor, rank, device_mesh) + + def pre_load_state_dict_transform( + self, + tensor: torch.Tensor, + ) -> Tuple[torch.Tensor, List[Shard]]: + return _pre_load_state_dict(tensor) + + def all_gather_dtensor( + self, + tensor: DTensor, + parent_mesh: Optional[DeviceMesh], + ) -> torch.Tensor: + return _all_gather_dtensor(tensor, parent_mesh) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea97846e313a40a3e8c6a302244361146a8fd67 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/input_reshard.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from functools import partial +from typing import Any, Optional, Tuple + +import torch +from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard + +__all__ = [ + "input_reshard", +] + + +def input_reshard( + module: torch.nn.Module, + tp_device_mesh: DeviceMesh, + input_reshard_dim: Optional[int] = None, +) -> torch.nn.Module: + """ + Register hooks to an nn.Module for input resharding, enabling sharding and restoration during backward computation. + + Register hooks to an nn.Module with input resharding so that we can shard + per the given `tp_device_mesh` and `input_reshard_dim` and restore the + input back when recomputing the activations in the backward. The reason + why we can do this is that for Tensor Parallel(TP), the input are same + across all TP ranks. + + Args: + module (:class:`nn.Module`): + Module to be registered with input resharding. + tp_device_mesh (:class:`DeviceMesh`): + Object which describes the mesh topology + of devices for Tensor Parallel. + input_reshard_dim (Optional[int]): + The dimension of where we perform the sharding + of input. If set None, there is no sharding of input. + Default: None + + Return: + A :class:`nn.Module` object registered with TP input resharding. + """ + cx: Optional[torch.autograd.graph.saved_tensors_hooks] = None + + def input_reshard_forward_pre_hook(_: torch.nn.Module, _i: Tuple[Any, ...]) -> None: + saved_tensor_hooks = torch.autograd.graph.saved_tensors_hooks( + partial(_pack_hook_tp, tp_device_mesh, input_reshard_dim), + partial(_unpack_hook_tp, tp_device_mesh, input_reshard_dim), + ) + saved_tensor_hooks.__enter__() + nonlocal cx + cx = saved_tensor_hooks # type: ignore[name-defined] + + def input_reshard_backward_hook(_: torch.nn.Module, _i: Tuple[Any, ...], _o: Any) -> Any: + nonlocal cx + cx.__exit__() # type: ignore[name-defined, union-attr] + + if input_reshard_dim is None: + return module + module.register_forward_pre_hook(input_reshard_forward_pre_hook) + module.register_forward_hook(input_reshard_backward_hook) + return module + + +def _pack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: torch.Tensor) -> Any: # noqa: D401 + """Hook function called after FWD to shard input.""" + if isinstance(x, DTensor) and all(p.is_replicate() for p in x._spec.placements): + return x.redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)]) + elif ( + not isinstance(x, DTensor) + and isinstance(x, torch.Tensor) + and x.numel() >= mesh.size() + ): + return ( + DTensor.from_local(x, device_mesh=mesh) + .redistribute(device_mesh=mesh, placements=[Shard(input_reshard_dim)]) + .to_local() + ) + else: + return x + + +def _unpack_hook_tp(mesh: DeviceMesh, input_reshard_dim: int, x: Any) -> torch.Tensor: # noqa: D401 + """Hook function called before activation recomputing in BWD to restore input.""" + if ( + isinstance(x, DTensor) + and len(x._spec.placements) == 1 + and x._spec.placements[0].is_shard() + ): + return x.redistribute(device_mesh=mesh, placements=[Replicate()]) + elif ( + not isinstance(x, DTensor) + and isinstance(x, torch.Tensor) + and x.numel() >= mesh.size() + ): + return ( + DTensor.from_local( + x, device_mesh=mesh, placements=[Shard(input_reshard_dim)] + ) + .redistribute(device_mesh=mesh, placements=[Replicate()]) + .to_local() + ) + else: + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..f7144a38e92328fc69883af815e0e7bbc6a83d8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/loss.py @@ -0,0 +1,484 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import contextlib +from typing import cast, Dict, Optional, Tuple + +import torch +import torch._prims_common as utils +import torch.distributed._functional_collectives as funcol +import torch.distributed.distributed_c10d as c10d +from torch import Tensor +from torch.distributed._tensor import DTensor, Replicate, Shard +from torch.distributed._tensor.ops.embedding_ops import _MaskPartial +from torch.distributed._tensor.ops.math_ops import ( + _skip_dim, + Reduction, + replicate_reduction_dims, +) +from torch.distributed._tensor.placement_types import Placement, TensorMeta +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +__all__ = ["loss_parallel"] + + +@contextlib.contextmanager +def loss_parallel(): + """ + A context manager that enables loss parallelism, where efficient parallelized loss computation + can be performed when the input is sharded on the class dimension. Currently only the cross-entropy + loss is supported. + + Within this context manager, one can use :func:`~torch.nn.functional.cross_entropy` or + :class:`~torch.nn.CrossEntropyLoss` as usual, with the following assumptions on the input parameters. + The corresponding ``backward()`` call, if any, also needs to happen under this context manager. + + Args: + input (:class:`DTensor`): + Input logits. Assumed to be sharded on the class dimension. + target (Union[:class:`torch.Tensor`, :class:`DTensor`]): + Must be ground truth class indices (class probabilities currently not supported). + Assumed to be replicated across the ``DeviceMesh``. + weight (Union[:class:`torch.Tensor`, :class:`DTensor`], optional): + If given, assumed to be replicated across the ``DeviceMesh``. + label_smoothing: + Currently not supported. + + Returns: + A replicated :class:`DTensor`. + + Example: + A sharded DTensor is manually created here to showcase the usage. + In practice, it is usually the output of a TP module. + + >>> # xdoctest: +SKIP("distributed") + >>> from torch.distributed.tensor.parallel import loss_parallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> device_mesh = init_device_mesh("cuda", (8,)) + >>> input = torch.randn(4, 16, device="cuda", requires_grad=True) + >>> dist_input = distribute_tensor(input, device_mesh, placements=[Shard(1)]) + >>> target = torch.randint(16, (4,), device="cuda") + >>> with loss_parallel(): + >>> loss = F.cross_entropy(dist_input, target, reduction="mean") + >>> loss.backward() + >>> ... + """ + _enable_custom_loss_ops() + + yield + + _disable_custom_loss_ops() + + +# Currently only needs to support one dimensional DeviceMesh; in general return +# the mesh_dim with placements[mesh_dim].is_shard(dim) +def _find_all_reduce_mesh_dim(placements: Tuple[Placement, ...], dim: int) -> int: + if not len(placements) == 1: + raise ValueError( + "Currently loss_parallel() only supports input on one-dimensional DeviceMesh." + ) + if not placements[0].is_shard(dim): + raise ValueError( + f"loss_parallel() should be enabled only when the input tensor is sharded on dimension {dim}." + ) + return 0 + + +def _cast_to_dtensor( + tensor, placements: Tuple[Placement, ...], mesh: DeviceMesh +) -> DTensor: + if isinstance(tensor, DTensor): + if tensor.placements == placements: + return tensor + else: + raise RuntimeError(f"Expected {placements} but got {tensor.placements}.") + elif isinstance(tensor, torch.Tensor): + return DTensor.from_local( + tensor, device_mesh=mesh, placements=placements, run_check=False + ) + else: + raise TypeError(f"Unsupported type {type(tensor)}") + + +def _propagate_tensor_meta( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> TensorMeta: + op_info = DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + tensor_meta = DTensor._op_dispatcher.sharding_propagator._propagate_tensor_meta( + op_info.schema + ) + if isinstance(tensor_meta, TensorMeta): + return tensor_meta + elif isinstance(tensor_meta, tuple): + return tensor_meta[0] + else: + raise RuntimeError(f"Unexpected tensor meta type: {type(tensor_meta)}.") + + +# NOTE: The implementation follows torch._decomp.decomposition._log_softmax, +# with all_reduce manually inserted to perform distributed computation. +def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): + x = x.contiguous() + if half_to_float: + assert x.dtype == torch.half + computation_dtype, result_dtype = utils.elementwise_dtypes( + x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + x = x.to(computation_dtype) + if x.numel() == 0: + shifted = x + else: + x_max = torch.amax(x, dim, keepdim=True) + x_max = funcol.all_reduce( + x_max, reduceOp=c10d.ReduceOp.MAX.name, group=(mesh, mesh_dim) + ) + shifted = x - x_max + shifted_sumexp = torch.sum(torch.exp(shifted), dim, keepdim=True) + shifted_sumexp = funcol.all_reduce( + shifted_sumexp, reduceOp=c10d.ReduceOp.SUM.name, group=(mesh, mesh_dim) + ) + shifted_logsumexp = torch.log(shifted_sumexp) + result = shifted - shifted_logsumexp + if not half_to_float: + result = result.to(result_dtype) + return result + + +def _log_softmax_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + x = cast(DTensor, args[0]) + dim = cast(int, args[1]) + half_to_float = cast(bool, args[2]) + + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, dim) + + output_tensor_meta = _propagate_tensor_meta(op_call, args, kwargs) + + res = _log_softmax(x._local_tensor, dim, half_to_float, spec.mesh, mesh_dim) + + return DTensor( + res, + spec.mesh, + spec.placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=res.requires_grad, + stride=output_tensor_meta.stride, + ) + + +# NOTE: As explained below at _nll_loss_and_log_softmax_backward, the +# _log_softmax_backward_handler does not actually do any computation. +def _log_softmax_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + grad_output = cast(DTensor, args[0]) + input_dtype = cast(torch.dtype, args[3]) + return grad_output.to(input_dtype) + + +# NOTE: The implementation follows torch._decomp.decomposition._nll_loss_forward, +# with customized communication inserted to perform distributed computation. +def _nll_loss_forward( + x: Tensor, + target: Tensor, + weight: Optional[Tensor], + local_weight: Optional[Tensor], + reduction: int, + ignore_index: int, + channel_dim_size: int, + mesh: DeviceMesh, + mesh_dim: int, +) -> Tuple[Tensor, Tensor]: + n_dims = x.dim() + channel_dim = 1 + if n_dims < 2: + channel_dim = 0 + + def _weight_view(weight: Tensor) -> Tensor: + if n_dims > 1: + shape = [ + 1, + ] * n_dims + shape[channel_dim] = weight.shape[0] + w = weight.view(shape) + else: + w = weight + return w + + if weight is not None: + w = _weight_view(weight) + assert local_weight is not None + local_w = _weight_view(local_weight) + x = x * local_w + safe_target = torch.where(target != ignore_index, target, 0) + safe_target_ = safe_target.unsqueeze(channel_dim) + + # The following code block is a distributed version of + # result = -torch.gather(self, channel_dim, safe_target_).squeeze(channel_dim) + partial_placement = _MaskPartial(logical_dim_size=channel_dim_size) + safe_target_partial_ = partial_placement._partition_value( + safe_target_, mesh, mesh_dim + ) + result_partial = torch.gather(x, channel_dim, safe_target_partial_) + # an all_reduce happens here + result_reduced = partial_placement._reduce_value(result_partial, mesh, mesh_dim) + result = -result_reduced.squeeze(channel_dim) + + result = torch.where(target != ignore_index, result, 0) + + if reduction == Reduction.NONE.value and n_dims > 1: + total_weight = x.new_full((), 0.0) + return result, total_weight + + if weight is not None: + new_shape = list(x.shape) + new_shape[channel_dim] = -1 + w = w.expand(new_shape) + wsum = torch.gather(w, channel_dim, safe_target_).squeeze(channel_dim) + wsum = torch.where(target != ignore_index, wsum, 0) + total_weight = wsum.sum() + else: + total_weight = (target != ignore_index).sum().to(x) + + # NOTE: this is correct only on 1D DeviceMesh; o/w additional + # all-reduce on result and total_weight is needed + if reduction == Reduction.SUM.value: + result = result.sum() + elif reduction == Reduction.MEAN.value: + result = result.sum() / total_weight + + return result, total_weight + + +def _nll_loss_forward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + x = cast(DTensor, args[0]) + target = args[1] + weight = args[2] + reduction = cast(int, args[3]) + ignore_index = cast(int, args[4]) + + channel_dim = 1 if x.dim() >= 2 else 0 + channel_dim_size = x.shape[channel_dim] + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, channel_dim) + + # Check user input: if target and weight are not DTensors, convert them to DTensors; + # if they are DTensors, check that they have the desired placements. + target_placements = _skip_dim( + replicate_reduction_dims(spec.placements, [channel_dim]), channel_dim + ) + all_replicate_placements = (Replicate(),) * spec.mesh.ndim + target = _cast_to_dtensor(target, target_placements, spec.mesh) + local_weight = None + if weight is not None: + weight = _cast_to_dtensor(weight, all_replicate_placements, spec.mesh) + # For local computation, both (replicated) weight and (sharded) local_weight + # are needed in _nll_loss_forward(). local_weight is generated here using + # DTensor API, without incurring any communication. + sharded_placements = [ + Shard(0) if i == mesh_dim else Replicate() for i in range(spec.mesh.ndim) + ] + local_weight = weight.redistribute(spec.mesh, sharded_placements)._local_tensor + assert local_weight.shape[0] == x._local_tensor.shape[channel_dim] + + if reduction == Reduction.NONE.value: + output_placements = target_placements + else: + output_placements = all_replicate_placements + + # tensor inputs to _propagate_tensor_meta need to be DTensors + args = list(args) + args[1], args[2] = target, weight + output_tensor_meta = _propagate_tensor_meta(op_call, tuple(args), kwargs) + + result, total_weight = _nll_loss_forward( + x._local_tensor, + target._local_tensor, + weight._local_tensor if weight is not None else None, + local_weight, + reduction, + ignore_index, + channel_dim_size, + spec.mesh, + mesh_dim, + ) + + return ( + DTensor( + result, + spec.mesh, + output_placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=result.requires_grad, + stride=output_tensor_meta.stride, + ), + total_weight, + ) + + +# NOTE: The backward computation of cross_entropy goes through two steps: +# backward for nll_loss and then backward for log_softmax. In loss parallel, +# the two steps are fused into the following function (called by _nll_loss_backward_handler) +# to avoid communication when target contains class indices not class probabilities. +# Also note that the _log_softmax_backward_handler does not perform computation. +# The implementation resembles _nll_loss_backward and _log_softmax_backward_data +# from torch._decomp.decomposition. +def _nll_loss_and_log_softmax_backward( + grad_output: Tensor, + x: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, + total_weight: Tensor, + channel_dim_size: int, + mesh: DeviceMesh, + mesh_dim: int, +) -> Tensor: + channel_dim = 0 if x.dim() < 2 else 1 + if reduction == Reduction.MEAN.value: + grad_output = grad_output / total_weight + + target = target.unsqueeze(channel_dim) + safe_target = torch.where(target != ignore_index, target, 0) + grad_input = torch.zeros_like(x) + + # The following code block is a distributed version of + # grad_input = torch.scatter(grad_input, channel_dim, safe_target, -1.0) + partial_placement = _MaskPartial(logical_dim_size=channel_dim_size) + safe_target = safe_target.squeeze(channel_dim).flatten() + masked_safe_target = partial_placement._partition_value(safe_target, mesh, mesh_dim) + # only update grad_input to -1 if not masked + assert partial_placement.mask_buffer.data is not None + grad_update = partial_placement.mask_buffer.data.float() - 1.0 + arange_1d = torch.arange( + masked_safe_target.shape[0], device=masked_safe_target.device + ) + # The first two cases with x.dim() <= 2 are for aten.nll_loss_backward.default; + # the last case is for aten.nll_loss2d_backward.default. + if x.dim() == 1: + grad_input[masked_safe_target] = grad_update + elif x.dim() == 2: + grad_input[arange_1d, masked_safe_target] = grad_update + else: + grad_input_t = grad_input.transpose(channel_dim, -1) + intermidate_shape = grad_input_t.shape + grad_input_2d = grad_input_t.reshape(-1, x.shape[channel_dim]) + grad_input_2d[arange_1d, masked_safe_target] = grad_update + grad_input = grad_input_2d.view(intermidate_shape).transpose(channel_dim, -1) + + if grad_input.dim() > grad_output.dim() > 0: + grad_output = grad_output.unsqueeze(channel_dim) + + if weight is not None: + new_shape = [1 for _ in range(x.dim())] + new_shape[channel_dim] = weight.shape[0] + weight = weight.reshape(new_shape) + # In order for fused computation to work, the following line is rewritten. + # grad_output = grad_output * weight + new_shape = list(x.shape) + new_shape[channel_dim] = -1 + w = weight.expand(new_shape) + w_target = torch.gather(w, channel_dim, target) + grad_output = grad_output * w_target + + grad_output = torch.where(target != ignore_index, grad_output, 0) + + # NOTE: Instead of directly returning the grad_input as grad_output for log_softmax, + # here we perform backward computation for log_softmax altogether to avoid the + # otherwise extra all_gather communication. + # return grad_input * grad_output + return (grad_input + torch.exp(x)) * grad_output + + +def _nll_loss_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + grad_output = cast(DTensor, args[0]) + x = cast(DTensor, args[1]) + target = args[2] + weight = args[3] + reduction = cast(int, args[4]) + ignore_index = cast(int, args[5]) + total_weight = cast(Tensor, args[6]) + + channel_dim = 1 if x.dim() >= 2 else 0 + channel_dim_size = x.shape[channel_dim] + spec = x._spec + mesh_dim = _find_all_reduce_mesh_dim(spec.placements, channel_dim) + + # if target and weight are not DTensors, convert them to DTensors + target_placements = _skip_dim( + replicate_reduction_dims(spec.placements, [channel_dim]), channel_dim + ) + all_replicate_placements = (Replicate(),) * spec.mesh.ndim + target = _cast_to_dtensor(target, target_placements, spec.mesh) + if weight is not None: + weight = _cast_to_dtensor(weight, all_replicate_placements, spec.mesh) + + # tensor inputs to _propagate_tensor_meta need to be DTensors + args = list(args) + args[2], args[3] = target, weight + args[6] = _cast_to_dtensor(total_weight, all_replicate_placements, spec.mesh) + output_tensor_meta = _propagate_tensor_meta(op_call, tuple(args), kwargs) + + result = _nll_loss_and_log_softmax_backward( + grad_output._local_tensor, + x._local_tensor, + target._local_tensor, + weight._local_tensor if weight is not None else None, + reduction, + ignore_index, + total_weight, + channel_dim_size, + spec.mesh, + mesh_dim, + ) + + return DTensor( + result, + spec.mesh, + # the output sharding is the same as input sharding: Shard(channel_dim) on mesh_dim + spec.placements, + shape=output_tensor_meta.shape, + dtype=output_tensor_meta.dtype, + requires_grad=result.requires_grad, + stride=output_tensor_meta.stride, + ) + + +customized_loss_ops = { + aten._log_softmax.default: _log_softmax_handler, + aten._log_softmax_backward_data.default: _log_softmax_backward_handler, + aten.nll_loss_forward.default: _nll_loss_forward_handler, + aten.nll_loss2d_forward.default: _nll_loss_forward_handler, + aten.nll_loss_backward.default: _nll_loss_backward_handler, + aten.nll_loss2d_backward.default: _nll_loss_backward_handler, +} + + +def _enable_custom_loss_ops(): + DTensor._op_dispatcher._custom_op_handlers.update(customized_loss_ops) + + +def _disable_custom_loss_ops(): + for custom_op in customized_loss_ops: + DTensor._op_dispatcher._custom_op_handlers.pop(custom_op) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py new file mode 100644 index 0000000000000000000000000000000000000000..3cce417dbeff4a1907910c76e871caf97a0fdb74 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/style.py @@ -0,0 +1,489 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from abc import ABC, abstractmethod +from typing import Optional, Union, Tuple +from functools import partial + +import torch +import torch.nn as nn +from torch.distributed._tensor import DeviceMesh, DTensor, Placement, Replicate, Shard, distribute_tensor, distribute_module + + +__all__ = [ + "ParallelStyle", + "RowwiseParallel", + "SequenceParallel", + "ColwiseParallel", + "PrepareModuleInput", + "PrepareModuleOutput", +] + + +class ParallelStyle(ABC): + """ + The parallel style contract defines how the module or submodule should be parallelized. + + It only defines the ``apply`` method for ``parallelize_module`` to use, this allows maximum + flexibility for different kind of style implementations. + """ + + @abstractmethod + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + ... + + +class ColwiseParallel(ParallelStyle): + """ + Partition a compatible nn.Module in a column-wise fashion. Currently supports nn.Linear and nn.Embedding. + Users can compose it together with RowwiseParallel to achieve the sharding of more complicated modules. + (i.e. MLP, Attention) + + Keyword Args: + input_layouts (Placement, optional): + The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to + become a DTensor. If not specified, we assume the input tensor to be replicated. + output_layouts (Placement, optional): + The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module + with the user desired layout. If not specified, the output tensor is sharded on the last dimension. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True. + Returns: + A :class:`ParallelStyle` object that represents Colwise sharding of the nn.Module. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, ColwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "w1" nn.Linear submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "w1" Linear will be converted to Replicated DTensor + >>> # and the output of "w1" will return :class:`torch.Tensor` that shards on the last dim. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"w1": ColwiseParallel()}) + >>> ... + + .. note:: By default ``ColwiseParallel`` output is sharded on the last dimension if the ``output_layouts`` not + specified, if there're operators that require specific tensor shape (i.e. before the paired ``RowwiseParallel``), + keep in mind that if the output is sharded the operator might need to be adjusted to the sharded size. + """ + + def __init__( + self, + *, + input_layouts: Optional[Placement] = None, + output_layouts: Optional[Placement] = None, + use_local_output: bool = True + ): + super().__init__() + self.input_layouts = (input_layouts or Replicate(), ) + self.output_layouts = (output_layouts or Shard(-1), ) + # colwise linear runtime sharding (desired sharding): + # 1. requires replicate input + # 2. shard output on last dim + self.desired_input_layouts = (Replicate(), ) + self.use_local_output = use_local_output + + @staticmethod + def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): + # TODO: figure out dynamo support for instance method and switch this to instance method + + # annotate module input placements/sharding with input_layouts + input_tensor = inputs[0] + if not isinstance(input_tensor, DTensor): + input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False) + + # transform the input layouts to the desired layouts of ColwiseParallel + if input_layouts != desired_input_layouts: + input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True) + return input_tensor + + def _partition_linear_fn(self, name, module, device_mesh): + # colwise shard weight/bias to Shard(0), weight be Shard(0) + # means Colwise as Linear is input * weight^T + bias, where + # weight would become Shard(1) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(0)]) + ) + module.register_parameter(name, dist_param) + + def _partition_embedding_fn(self, name, module, device_mesh): + # colwise shard embedding.weight is straight forward as Shard(1) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(1)]) + ) + module.register_parameter(name, dist_param) + + @staticmethod + def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): + # outputs is a shard on last dimension DTensor, i.e. Shard(-1) + outputs = outputs.redistribute(placements=output_layouts, async_op=True) + # back to local tensor + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + if isinstance(module, nn.Linear): + partition_fn = self._partition_linear_fn + elif isinstance(module, nn.Embedding): + partition_fn = self._partition_embedding_fn + else: + raise NotImplementedError("ColwiseParallel currently only support nn.Linear and nn.Embedding!") + + return distribute_module( + module, + device_mesh, + partition_fn, + partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), + partial(self._prepare_output_fn, self.output_layouts, self.use_local_output), + ) + + +class RowwiseParallel(ParallelStyle): + """ + Partition a compatible nn.Module in a row-wise fashion. Currently supports nn.Linear and nn.Embedding. + Users can compose it with ColwiseParallel to achieve the sharding of more complicated modules. + (i.e. MLP, Attention) + + Keyword Args: + input_layouts (Placement, optional): + The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to + become a DTensor. If not specified, we assume the input tensor to be sharded on the last dimension. + output_layouts (Placement, optional): + The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module + with the user desired layout. If not specified, the output tensor is replicated. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True. + Returns: + A :class:`ParallelStyle` object that represents Rowwise sharding of the nn.Module. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, RowwiseParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "w2" nn.Linear submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "w2" Linear will be converted to DTensor that shards on the last dim + >>> # and the output of "w2" will return a replicated :class:`torch.Tensor`. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"w2": RowwiseParallel()}), + >>> ... + """ + + def __init__( + self, + *, + input_layouts: Optional[Placement] = None, + output_layouts: Optional[Placement] = None, + use_local_output: bool = True + ): + super().__init__() + self.input_layouts = (input_layouts or Shard(-1), ) + self.output_layouts = (output_layouts or Replicate(), ) + self.use_local_output = use_local_output + + @staticmethod + def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): + input_tensor = inputs[0] + if not isinstance(input_tensor, DTensor): + input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False) + + if input_layouts != desired_input_layouts: + input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True) + return input_tensor + + def _partition_linear_fn(self, name, module, device_mesh): + # Rowwise shard weight to Shard(1), bias to Replicate(), weight be Shard(1) + # means Rowwise as nn.Linear is input * weight^T + bias, where + # weight would become Shard(0) + module.register_parameter("weight", nn.Parameter( + distribute_tensor(module.weight, device_mesh, [Shard(1)]) + )) + if module.bias is not None: + module.register_parameter("bias", nn.Parameter( + distribute_tensor(module.bias, device_mesh, [Replicate()]) + )) + + def _partition_embedding_fn(self, name, module, device_mesh): + # rowwise shard embedding.weight is Shard(0) + for name, param in module.named_parameters(): + dist_param = nn.Parameter( + distribute_tensor(param, device_mesh, [Shard(0)]) + ) + module.register_parameter(name, dist_param) + + @staticmethod + def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): + # Rowwise sharding produces partial output, depending on output layouts: + # 1. to replicate -> allreduce + # 2. to shard -> reduce_scatter + outputs = outputs.redistribute(placements=output_layouts, async_op=True) + # back to local tensor if use_local_output is True + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + if isinstance(module, nn.Linear): + partition_fn = self._partition_linear_fn + # rowwise linear runtime sharding requires input tensor shard on last dim + self.desired_input_layouts: Tuple[Placement, ...] = (Shard(-1), ) + elif isinstance(module, nn.Embedding): + partition_fn = self._partition_embedding_fn + # rowwise embedding runtime sharding requires input tensor replicated + self.desired_input_layouts = (Replicate(), ) + else: + raise NotImplementedError("RowwiseParallel currently only support nn.Linear and nn.Embedding!") + + return distribute_module( + module, + device_mesh, + partition_fn, + partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), + partial(self._prepare_output_fn, self.output_layouts, self.use_local_output), + ) + + +class SequenceParallel(ParallelStyle): + """ + SequenceParallel replicates a compatible ``nn.Module`` parameters and runs the sharded computation with + input sharded on the sequence dimension. This currently supports ``nn.LayerNorm``, ``nn.Dropout``, and the + `RMSNorm python implementation `__ + + This style implements the operation that is described in the paper + `Reducing Activation Recomputation in Large Transformer Models `__ + + Both the input and output of the ``nn.Module`` will be sharded on the sequence dimension. + + Keyword Args: + sequence_dim (int, optional): + The sequence dimension of the input tensor for the ``nn.Module``, this is used to annotate the input tensor to + become a DTensor that is sharded on the sequence dimension, default: 1. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: False. + Returns: + A :class:`ParallelStyle` object that represents Sequence Parallel of the ``nn.Module``. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, SequenceParallel + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> m = Model(...) # m is a nn.Module that contains a "norm" nn.LayerNorm submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # By default, the input of the "norm" will be converted to DTensor that shards on the sequence dim + >>> # and the output of "norm" will return a sharded on sequence dimension :class:`DTensor`. + >>> + >>> sharded_mod = parallelize_module(m, tp_mesh, {"norm": SequenceParallel()}), + >>> ... + + .. note:: SequenceParallel style assumes ones initialization if there are weights in the nn.Module (i.e. + ``nn.LayerNorm`` or ``RMSNorm``, and they by default have ones initialization). If you have custom + inits for the weights on those modules, you need to broadcast the weights before/after parallelizing + to ensure that they are replicated. + """ + def __init__( + self, + *, + sequence_dim: int = 1, + use_local_output: bool = False + ): + super().__init__() + self.sequence_dim = sequence_dim + self.use_local_output = use_local_output + + def _replicate_module_fn(self, name: str, module: nn.Module, device_mesh: DeviceMesh): + for p_name, param in module.named_parameters(): + # simple replication with fixed ones_ init from LayerNorm/RMSNorm, which allow + # us to simply just use from_local + replicated_param = torch.nn.Parameter( + DTensor.from_local(param, device_mesh, [Replicate()], run_check=False) + ) + module.register_parameter(p_name, replicated_param) + + @staticmethod + def _prepare_input_fn(sequence_dim, mod, inputs, device_mesh): + input_tensor = inputs[0] + if isinstance(input_tensor, DTensor): + return inputs + elif isinstance(input_tensor, torch.Tensor): + return DTensor.from_local(input_tensor, device_mesh, [Shard(sequence_dim)], run_check=False) + else: + raise ValueError(f"expecting input of {mod} to be a torch.Tensor or DTensor, but got {input_tensor}") + + @staticmethod + def _prepare_output_fn(use_local_output, mod, outputs, device_mesh): + return outputs.to_local() if use_local_output else outputs + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + return distribute_module( + module, + device_mesh, + self._replicate_module_fn, + partial(self._prepare_input_fn, self.sequence_dim), + partial(self._prepare_output_fn, self.use_local_output), + ) + + +class PrepareModuleInput(ParallelStyle): + """ + Configure the nn.Module's inputs to convert the input tensors of the nn.Module to DTensors at runtime according to + ``input_layouts``, and perform layout redistribution according to the ``desired_input_layouts``. + + Keyword Args: + input_layouts (Union[Placement, Tuple[Placement]]): + The DTensor layouts of input tensors for the nn.Module, this is used to convert the input tensors to + DTensors. If some inputs are not torch.Tensor or no need to convert to DTensors, ``None`` need to be specified + as a placeholder. + desired_input_layouts (Union[Placement, Tuple[Placement]]): + The desired DTensor layout of input tensors for the nn.Module, this is used to ensure the inputs of the nn.Module + have the desired DTensor layouts. This argument needs to have the same length with ``input_layouts``. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module inputs, default: False. + Returns: + A :class:`ParallelStyle` object that prepares the sharding layouts of the nn.Module's inputs. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, PrepareModuleInput + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> block = TransformerBlock(...) # block is a nn.Module that contains an "attn" Attention submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # According to the style specified below, the first input of attn will be annotated to Sharded DTensor + >>> # and then redistributed to Replicated DTensor. + >>> parallelize_module( + >>> block, # this can be a submodule or module + >>> tp_mesh, + >>> parallelize_plan={ + >>> "attn": PrepareModuleInput( + >>> input_layouts=(Shard(0), None, None, ...), + >>> desired_input_layouts=(Replicate(), None, None, ...) + >>> ), + >>> } + >>> ) + """ + + def __init__( + self, + *, + input_layouts: Union[Placement, Tuple[Placement]], + desired_input_layouts: Union[Placement, Tuple[Placement]], + use_local_output: bool = False + ): + self.input_layouts = (input_layouts,) if isinstance(input_layouts, Placement) else input_layouts + self.desired_input_layouts = \ + (desired_input_layouts,) if isinstance(desired_input_layouts, Placement) else desired_input_layouts + self.use_local_output = use_local_output + assert len(self.input_layouts) == len(self.desired_input_layouts), \ + "input_layouts and desired_input_layouts should have same length!" + + def _prepare_input_fn(self, inputs, device_mesh): + prepared_inputs = [] + if not isinstance(inputs, tuple): + inputs = (inputs,) + if len(inputs) != len(self.input_layouts): + raise ValueError("module inputs and input_layouts should have same length!") + + for inp, input_layout, desired_layout in zip(inputs, self.input_layouts, self.desired_input_layouts): + if input_layout is not None: + if isinstance(inp, DTensor): + # TODO: re-enable the check once we fix the compile path + # assert inp.placements[0] == input_layout + dt_inp = inp + else: + dt_inp = DTensor.from_local(inp, device_mesh, (input_layout,), run_check=False) + if input_layout != desired_layout: + dt_inp = dt_inp.redistribute(placements=(desired_layout,)) + prepared_inputs.append(dt_inp.to_local() if self.use_local_output else dt_inp) + else: + prepared_inputs.append(inp) + return tuple(prepared_inputs) + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + module.register_forward_pre_hook(lambda _, inputs: self._prepare_input_fn(inputs, device_mesh)) # type: ignore[misc, call-arg] + return module + + +class PrepareModuleOutput(ParallelStyle): + """ + Configure the nn.Module's outputs to convert the output tensors of the nn.Module to DTensors at runtime according to + ``output_layouts``, and perform layout redistribution according to the ``desired_output_layouts``. + + Keyword Args: + output_layouts (Union[Placement, Tuple[Placement]]): + The DTensor layouts of output tensors for the nn.Module, this is used to convert the output tensors to + DTensors if they are :class:`torch.Tensor`. If some outputs are not torch.Tensor or no need to convert to DTensors, + ``None`` need to be specified as a placeholder. + desired_output_layouts (Union[Placement, Tuple[Placement]]): + The desired DTensor layouts of output tensors for the nn.Module, this is used to ensure the outputs of the nn.Module + have the desired DTensor layouts. + use_local_output (bool, optional): + Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module outputs, default: True. + Returns: + A ParallelStyle object that prepares the sharding layouts of the nn.Module's outputs. + + Example:: + >>> # xdoctest: +SKIP(failing) + >>> from torch.distributed.tensor.parallel import parallelize_module, PrepareModuleOutput + >>> from torch.distributed.device_mesh import init_device_mesh + >>> ... + >>> block = TransformerBlock(...) # block is a nn.Module that contains an "attn" Attention submodule + >>> tp_mesh = init_device_mesh("cuda", (8,)) + >>> + >>> # According to the style specified below, the output of the TransformerBlock will be converted to Replicated DTensor + >>> # and then redistributed to Sharded DTensor. + >>> parallelize_module( + >>> block, # this can be a submodule or module + >>> tp_mesh, + >>> parallelize_plan = PrepareModuleOutput( + >>> output_layouts=Replicate(), + >>> desired_output_layouts=Shard(0) + >>> ) + >>> ) + """ + def __init__( + self, + *, + output_layouts: Union[Placement, Tuple[Placement]], + desired_output_layouts: Union[Placement, Tuple[Placement]], + use_local_output: bool = True + ): + self.output_layouts = (output_layouts,) if isinstance(output_layouts, Placement) else output_layouts + self.desired_output_layouts = \ + (desired_output_layouts,) if isinstance(desired_output_layouts, Placement) else desired_output_layouts + self.use_local_output = use_local_output + assert len(self.output_layouts) == len(self.desired_output_layouts), \ + "output_layouts and desired_output_layouts should have same length!" + + def _prepare_out_fn(self, outputs, device_mesh): + prepared_outputs = [] + if not isinstance(outputs, tuple): + outputs = (outputs,) + if len(outputs) != len(self.output_layouts): + raise ValueError("module outputs and output_layouts should have same length!") + for out, out_layout, desired_out_layout in zip(outputs, self.output_layouts, self.desired_output_layouts): + if out_layout is not None: + if isinstance(out, DTensor): + # TODO: re-enable the check once we fix the compile path + # assert out.placements[0] == out_layout + dt_out = out + else: + dt_out = DTensor.from_local(out, device_mesh, (out_layout,), run_check=False) + + if out_layout != desired_out_layout: + dt_out = dt_out.redistribute(placements=(desired_out_layout,)) + prepared_outputs.append(dt_out.to_local() if self.use_local_output else dt_out) + else: + prepared_outputs.append(out) + if len(prepared_outputs) == 1: + return prepared_outputs[0] + else: + return tuple(prepared_outputs) + + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: + module.register_forward_hook(lambda _, inputs, outputs: self._prepare_out_fn(outputs, device_mesh)) # type: ignore[misc, call-arg] + return module