diff --git a/.gitattributes b/.gitattributes index d1f14715cd6a5b9652c2f757e42432b2c9aa7298..a905506009f1c55d1b26199e02e26ce087940a64 100644 --- a/.gitattributes +++ b/.gitattributes @@ -113,3 +113,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6978fcfa630ccdf2d81604a9a16fb0ee8bba2e03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/join.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce32ea91f726516a183a97b1179a848d634a43da Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__pycache__/join.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97bf6d17972131e6727c41201fc5a70f4aed160 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/checkpoint_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/checkpoint_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..530a290d885f0af97e771fe02e50d0f0b6de0c4e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/__pycache__/checkpoint_wrapper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..364648f1a7f7e6d4de71baea18412b258f15bef7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py @@ -0,0 +1,314 @@ +import warnings +from enum import auto, Enum +from functools import partial +from typing import Any, Callable, Dict, Iterator, Optional, Tuple + +import torch +import torch.nn as nn +from torch.autograd.graph import save_on_cpu +from torch.distributed.utils import _pack_kwargs, _replace_by_prefix, _unpack_kwargs +from torch.utils.checkpoint import checkpoint as torch_utils_checkpoint + +_CHECKPOINT_WRAPPED_MODULE = "_checkpoint_wrapped_module" +_CHECKPOINT_PREFIX = _CHECKPOINT_WRAPPED_MODULE + "." + + +class CheckpointImpl(Enum): + REENTRANT = auto() + NO_REENTRANT = auto() + + +class ActivationWrapper(torch.nn.Module): + """ + Base class for Activation Checkpoint and Activation Offload. + + Not meant to be instantiated directly. + """ + + def __init__(self, mod): + super().__init__() + self._checkpoint_wrapped_module = mod + # state_dict post hook to remove prefix to allow loading into a + # non-checkpoint wrapped module. + self._register_state_dict_hook(self._post_state_dict_hook) + # load_state_dict pre-hook to allow loading back into + # checkpoint-wrapped module. + self._register_load_state_dict_pre_hook( + self._pre_load_state_dict_hook, with_module=True + ) + + def forward(self, *args, **kwargs): + raise ValueError("Subclasses should implement forward().") + + def __getattr__(self, name: str) -> Any: + """Forward missing attributes to wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self._checkpoint_wrapped_module, name) + + def __getitem__(self, key: int) -> Any: + """Forward indexing calls in case the module is a nn.Sequential.""" + return self._checkpoint_wrapped_module.__getitem__(key) # type: ignore[operator] + + def named_parameters( + self, + *args, + **kwargs, + ) -> Iterator[Tuple[str, torch.nn.Parameter]]: + """ + Override :meth:`named_parameters()` to intercept parameter names. + + remove all occurrences of ``_CHECKPOINT_PREFIX``. + """ + for param_name, param in super().named_parameters(*args, **kwargs): + yield param_name.replace(_CHECKPOINT_PREFIX, ""), param + + @staticmethod + def _post_state_dict_hook( + module: nn.Module, + state_dict: Dict[str, Any], + prefix: str, + *args: Any, + ) -> Dict[str, Any]: + """ + _post_state_dict_hook() is called after the state_dict() of this FSDP module is executed. + + For ``checkpoint_wrapper``, it will strip checkpoint-wrapped module prefix, + so that this module can be loaded into non-checkpointed modules. + It would still be able to be loaded into checkpoint-wrapped modules as this class, + adds the prefix back before loading the state_dict. + """ + _replace_by_prefix(state_dict, f"{prefix}{_CHECKPOINT_PREFIX}", prefix) + return state_dict + + @staticmethod + def _pre_load_state_dict_hook( + module: nn.Module, + state_dict: Dict[str, Any], + prefix: str, + *args: Any, + ) -> None: + """ + ``_pre_state_dict_hook` is called before ``self._load_from_state_dict()`` is called. + + For ``checkpoint_wrapper``, it will add back the module + prefix so that non-checkpointed modules can be loaded into + checkpoint_wrapper modules properly. + """ + _replace_by_prefix(state_dict, prefix, prefix + f"{_CHECKPOINT_PREFIX}") + + +class OffloadWrapper(ActivationWrapper): + def __init__(self, mod): + super().__init__(mod) + + def forward(self, *args, **kwargs): + with save_on_cpu(pin_memory=True): + return self._checkpoint_wrapped_module(*args, **kwargs) + + +class CheckpointWrapper(ActivationWrapper): + """ + An ``nn.Module`` that wraps another ``nn.Module`` with checkpointing. + + Note that this module is not meant to be used directly but instead, + it is to be used through the ``checkpoint_wrapper`` function. + """ + + def __init__( + self, + mod: torch.nn.Module, + checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT, + checkpoint_fn=None, + **checkpoint_fn_kwargs, + ): + super().__init__(mod) + self.checkpoint_impl = checkpoint_impl + if checkpoint_fn is None: + # use torch.utils.checkpoint + self.checkpoint_fn = partial( + torch_utils_checkpoint, + use_reentrant=(self.checkpoint_impl == CheckpointImpl.REENTRANT), + **checkpoint_fn_kwargs, + ) + else: + # Construct user-specified checkpoint function. + self.checkpoint_fn = partial( + checkpoint_fn, + **checkpoint_fn_kwargs, + ) + + def forward(self, *args, **kwargs): + # Support keyword arguments for reentrant checkpoint. Note that this + # only works if user has specified self.checkpoint_impl and is not + # using their own custom checkpoint_fn. + if self.checkpoint_impl == CheckpointImpl.REENTRANT and kwargs != {}: + # Pack the args and kwargs + flat_args, kwarg_keys = _pack_kwargs(*args, **kwargs) + + # Function that only takes (packed) args, but can unpack them + # into the original args and kwargs for the checkpointed + # function, and runs that function. + def my_function(*inputs): + # unpack back into args and kwargs + unpacked_args, unpacked_kwargs = _unpack_kwargs(inputs, kwarg_keys) + # run original module + return self._checkpoint_wrapped_module( + *unpacked_args, **unpacked_kwargs + ) + + # Pass the function that only takes packed args into reentrant + # checkpoint API. + return self.checkpoint_fn( # type: ignore[misc] + my_function, + *flat_args, + ) + else: + return self.checkpoint_fn( # type: ignore[misc] + self._checkpoint_wrapped_module, *args, **kwargs + ) + + +def offload_wrapper(module: torch.nn.Module) -> torch.nn.Module: + """ + Wrap a module for activation offloading to CPU. + + Offloads intermediate activations to the CPU for modules wrapped with this function. + Wrappers with activation offload can be composed with ones that do recomputation-based + checkpoint to trade off increased compute versus increased CPU + memory usage and additional H2D transfers. + + Usage:: + offloaded_module = offload_wrapper(module) + outputs = checkpointed_module(inputs) + Args: + module (nn.Module): + The module to be wrapped + Returns: + (nn.Module): + Wrapped module + """ + return OffloadWrapper(module) + + +def checkpoint_wrapper( + module: torch.nn.Module, + checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT, + checkpoint_fn=None, + **checkpoint_fn_kwargs, +) -> torch.nn.Module: + """ + Wrap a module for activation checkpointing. + + If the module is wrapped with this function, all subsequent calls to the module will, + automatically perform checkpointing without the user having to explicitly call ``checkpoint`` function. + + Usage:: + checkpointed_module = checkpoint_wrapper(module) + outputs = checkpointed_module(inputs) + Args: + module (nn.Module): + The module to be wrapped + checkpoint_impl (Optional[CheckpointImpl]): + The checkpointing implementation to use. Note that this will only + be passed into the ``torch.utils.checkpoint.checkpoint`` + implementation, and is ignored if a custom ``checkpoint_fn`` is + specified. Note that for implementations using reentrant checkpoint + from ``torch.utils.checkpoint``, keyword arguments will only be + supported if ``checkpoint_impl`` is passed as ``CheckpointImpl.REENTRANT`. + checkpoint_fn (Optional[Callable]): + Functional checkpoint implementation to use. If this is specified, + it will be used over the default ``torch.utils.checkpoint.checkpoint`` + implementation and the `checkpoint_impl` argument will be ignored. + **checkpoint_fn_kwargs: (Dict[str, Any]): Keyword arguments to pass into `checkpoint_fn`. + + Returns: + (nn.Module): + Wrapped module + """ + + if checkpoint_impl == CheckpointImpl.REENTRANT: + warnings.warn( + f"Please specify {CheckpointImpl.NO_REENTRANT} as " + f"{CheckpointImpl.REENTRANT} will soon be removed as " + "the default and eventually deprecated.", + stacklevel=1, + ) + return CheckpointWrapper( + module, + checkpoint_impl, + checkpoint_fn, + **checkpoint_fn_kwargs, + ) + + +def apply_activation_checkpointing( + model, + checkpoint_wrapper_fn=checkpoint_wrapper, + check_fn=lambda _: True, + auto_wrap_policy: Optional[Callable[[nn.Module, bool, int], bool]] = None, +): + """ + Apply :func:`checkpoint_wrapper` to modules within `model` based on a user-defined configuration. + + For each module within `model`, the `check_fn` is used to decide + whether `module` should be wrapped with :func:`checkpoint_wrapper` or not. + + Note:: + This function modifies `model` in place and replaces appropriate layers with + their checkpoint-wrapped modules. + Note:: + This function will not wrap the overall root module. If this is needed, please directly use + :func:`checkpoint_wrapper` or :func:`offload_wrapper`. + Usage:: + model = nn.Sequential( + nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10) + ) + check_fn = lambda l: isinstance(l, nn.Linear) + # checkpoint activations + apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn) + # Or offload activations to CPU + apply_activation_checkpointing(model, checkpoint_wrapper_fn=offload_wrapper, check_fn=check_fn) + Args: + model (nn.Module): + The model whose submodules should be wrapped with activation checkpointing. + checkpoint_wrapper_fn (Optional[Callable[nn.Module]]) + A ``Callable`` which will wrap modules + check_fn (Optional[Callable[nn.Module, nn.Module]]) + A lambda function which will be passed each child submodule of ``model`` and returns + ``True`` or ``False`` depending on whether the submodule should be wrapped. + auto_wrap_policy (Optional[Callable[[nn.Module, bool, int], bool]]): A policy to wrap model's + submodules with AC. Note that if this is specified, it takes precedence over ``check_fn``. + Returns: None (`model` is modified inplace) + """ + # TODO: Importing inside function to avoid circular import issue between FSDP and + # checkpoint_wrapper. This can be resolved once wrap() APIs are decoupled from FSDP code. + from torch.distributed.fsdp.wrap import _recursive_wrap, lambda_auto_wrap_policy, _Policy + from torch.distributed.fsdp._wrap_utils import _construct_wrap_fn, _post_order_apply + + policy = ( + auto_wrap_policy + if auto_wrap_policy is not None + else partial(lambda_auto_wrap_policy, lambda_fn=check_fn) + ) + if not callable(policy): + if not isinstance(policy, _Policy): + raise ValueError( + f"Expected {policy} to be callable or be a pre-defined wrap policy" + ) + target_module_to_kwargs = policy._run_policy( + model, ignored_modules=set(), root_kwargs={} + ) + wrap_fn = _construct_wrap_fn(model, target_module_to_kwargs, checkpoint_wrapper_fn) + _post_order_apply(model, wrap_fn) + return + + _recursive_wrap( + module=model, + auto_wrap_policy=policy, # type: ignore[arg-type] + wrapper_cls=checkpoint_wrapper_fn, + ignored_modules=set(), + ignored_params=set(), + only_wrap_children=True, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d07adc17247b71fc3d0496ff193b32bf7fb8ce52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__init__.py @@ -0,0 +1,7 @@ + +from . import default_hooks as default + +LOW_PRECISION_HOOKS = [ + default.fp16_compress_hook, + default.bf16_compress_hook, +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4623a02104dc8533a0b3549faff1fe578fcb7521 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/default_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/default_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0488f55e6dd59b556bcb6afeabc75521e899aae0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/__pycache__/default_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/default_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/default_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..419b883418c607ae068000370ae980f6901a5582 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_comm_hooks/default_hooks.py @@ -0,0 +1,165 @@ +import functools +import torch +import torch.distributed as dist +from typing import Optional + + +class DefaultState: + r""" + Stores state needed to perform the default communication algorithm within a communication hook. + + Args: + process_group (ProcessGroup): The process group to be used. + """ + + __slots__ = [ + "process_group", + "world_size", + "gradient_predivide_factor", + "gradient_postdivide_factor" + ] + + def __init__( + self, + process_group: dist.ProcessGroup + ): + if process_group is None: + raise ValueError(f"Expected to pass in an explicit ProcessGroup to {self}.") + self.process_group = process_group + self.world_size = dist.get_world_size(process_group) + # Setting two factors `self.gradient_predivide_factor` + # and `self.gradient_postdivide_factor` to avoid underflow and overflow + self.gradient_predivide_factor = self._get_gradient_predivide_factor( + self.world_size + ) + self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor + + @staticmethod + def _get_gradient_predivide_factor(world_size: int) -> float: + factor: int = 1 + while world_size % factor == 0 and world_size / factor > factor: + factor *= 2 + return float(factor) + +class LowPrecisionState(DefaultState): + r""" + Stores state needed to perform gradient communication in a lower precision within a communication hook. + + Communication hook will cast gradients back to the original + parameter precision specified by ``parameter_type`` (default: torch.float32). + Builds on top of the :class:`DefaultState`. + + Args: + parameter_type (torch.dtype): The precision of model's parameters. + Required for a hook to cast gradients back to a parameter's precision. + """ + + __slots__ = [ + "parameter_type", + ] + + def __init__( + self, + process_group, + parameter_type=torch.float32, + ): + super().__init__(process_group) + self.parameter_type = parameter_type + + +def _decompress(state: LowPrecisionState, grad: torch.Tensor): + """ + Casts gradients back to full parameter precision so that further computation happens in full precision. + """ + orig_grad_data = grad.data + grad.data = grad.data.to(state.parameter_type) + # Don't let this memory get reused until after the transfer. + orig_grad_data.record_stream(torch.cuda.current_stream()) # type: ignore[arg-type] + +def allreduce_hook(state: DefaultState, grad: torch.Tensor): + r""" + Implement the FSDP communication hook for ``all_reduce`` algorithm and a necessary pre- and post-division of gradients. + + Args: + state (DefaultState): State information, configures pre- and post-division factors. + grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks. + """ + # Average grad by pre-division factor. Together pre- and post-division factors + # lead to an overall averaging by world_size, required for consistency with PyTorch DDP. + # This is a two-step process to avoid potential underflow and overflow. + if state.gradient_predivide_factor > 1: + grad.div_(state.gradient_predivide_factor) + dist.all_reduce(grad, group=state.process_group) + # Average grad by post-division factor. + if state.gradient_postdivide_factor > 1: + grad.div_(state.gradient_postdivide_factor) + +def reduce_scatter_hook(state: DefaultState, grad: torch.Tensor, output: torch.Tensor): + r""" + Implement the FSDP communication hook for ``reduce_scatter`` algorithm. + + For sharded FSDP strategies and a necessary pre- and post-division of gradients. + + Args: + state (DefaultState): State information, configures pre- and post-division factors. + grad (torch.Tensor): An unsharded gradient for the local batch that needs to be + communicated across ranks. + output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``. + """ + # Average grad by pre-division factor. + if state.gradient_predivide_factor > 1: + grad.div_(state.gradient_predivide_factor) + dist.reduce_scatter_tensor( + output, grad, group=state.process_group + ) + # Average grad's shard by post-division factor. + if state.gradient_postdivide_factor > 1: + output.div_(state.gradient_postdivide_factor) + +def _low_precision_hook(prec: torch.dtype, state: LowPrecisionState, grad: torch.Tensor, output: torch.Tensor): + if grad.dtype != prec: + grad.data = grad.data.to(prec) + if output is not None: + if output.dtype != prec: + output.data = output.data.to(prec) + reduce_scatter_hook(state, grad, output) + _decompress(state, output) + else: + allreduce_hook(state, grad) + _decompress(state, grad) + +def fp16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor] = None): + r""" + Implement FSDP communication hook for a simple gradient compression approach. + Casts ``grad`` to half-precision floating-point format (``torch.float16``). + + It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a + ``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``) + gradients are averaged by a ``state.gradient_postdivide_factor``. + Once post-division is done, compressed gradients are casted back to parameters' precision. + + Args: + state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision. + grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision. + output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``. + """ + fp16_hook = functools.partial(_low_precision_hook, torch.float16) + return fp16_hook(state, grad, output) + +def bf16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor] = None): + r""" + Implement FSDP communication hook for a simple gradient compression approach . + Casts ``grad`` to half-precision floating-point format. + + It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a + ``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``) + gradients are averaged by a ``state.gradient_postdivide_factor``. + Once post-division is done, compressed gradients are casted back to parameters' precision. + + Args: + state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision. + grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision. + output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``. + """ + bf16_hook = functools.partial(_low_precision_hook, torch.bfloat16) + return bf16_hook(state, grad, output) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba62bfb68f42a136dcfa27bcf378d3892cf6751a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__init__.py @@ -0,0 +1 @@ +from .optimizer_overlap import _as_overlapped_optim diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e71339860d3d93974daa0084d4d6d15b381b3fd8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/optimizer_overlap.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/optimizer_overlap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a5c2a235941b40b7bc122fbc7b9d8fab01d66fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/__pycache__/optimizer_overlap.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py new file mode 100644 index 0000000000000000000000000000000000000000..8044557e71dc199657ee42776e329b69dec6dfaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py @@ -0,0 +1,93 @@ +from abc import ABC, abstractmethod +import inspect +from typing import Dict, Type + +from torch.distributed.fsdp import FullyShardedDataParallel +from torch.nn.parallel import DistributedDataParallel +from torch.optim import Optimizer +from torch.distributed.optim import as_functional_optim + +from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook + +from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import ( + _OptimizerHookState, + _hook_then_optimizer +) + +# Contains the mappings between the regular and overlapped optimizer types. +_registered_overlapped_optims: Dict[Type, Type] = {} + + +def register_overlapped(optim_cls): + def decorator(target_overlapped_optim_cls): + if target_overlapped_optim_cls in _registered_overlapped_optims: + raise ValueError( + f"{target_overlapped_optim_cls} already registered with optim_cls " + f"{_registered_overlapped_optims[optim_cls]} {optim_cls}, trying to" + f"re-register it for {optim_cls} is not supported." + ) + _registered_overlapped_optims[optim_cls] = target_overlapped_optim_cls + return target_overlapped_optim_cls + return decorator + + +class OverlappedOptimizer(ABC): + def __init__(self, optim_cls: Type) -> None: + """ + Initialize the OverlappedOptimizer. + + Overlappedoptimizer is a base class that child classes can implement to + specify how different optimizers will register themselves with DDP. + """ + self.optim_cls = optim_cls + + @abstractmethod + def register_ddp(self, ddp: DistributedDataParallel) -> None: + """Registers the overlapped optimizer with DDP.""" + raise NotImplementedError( + f"{self.__class__.__name__} does not support overlapped DDP." + ) + + @abstractmethod + def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None: + """Registers the overlapped optimizer with FSDP.""" + raise NotImplementedError( + f"{self.__class__.__name__} does not support overlapped FSDP." + ) + + +@register_overlapped(Optimizer) +class _OverlappedStandardOptimizer(OverlappedOptimizer): + """Overlaps a regular ``Optimizer``.""" + + def __init__(self, optim_cls: Type, params, *optim_args, **optim_kwargs) -> None: + super().__init__(optim_cls) + f_optim = as_functional_optim(self.optim_cls, *optim_args, **optim_kwargs) + self._opt_hook_state = _OptimizerHookState(f_optim, params) + + def register_ddp(self, ddp_inst: DistributedDataParallel): + # NOTE: using a custom communication hook and fused optimizer is not + # yet supported. + ddp_inst.register_comm_hook( # type: ignore[operator] + None, # wrapped hook state + _hook_then_optimizer(allreduce_hook, self._opt_hook_state) + ) + + # TODO: register_fsdp once FSDP supports communication hook. + def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None: + """Register the overlapped optimizer with FSDP.""" + raise NotImplementedError( + f"{self.__class__.__name__} does not support overlapped FSDP." + ) + +def _as_overlapped_optim(optim_cls: Type, params, *args, **kwargs): + """Return a new ``OverlappedOptimizer`` instance that supports ``optim_cls``.""" + for clz in inspect.getmro(optim_cls): + try: + return _registered_overlapped_optims[clz](optim_cls, params, *args, **kwargs) + except KeyError: + pass + + # Fallback to standard overlapped optimizer, which will raise errors if user + # is attempting to use an unsupported optimizer. + return _OverlappedStandardOptimizer(optim_cls, params, *args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ba01060f925d354eeabe8a8265c7cdf746ba5b2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/quantization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d17663ac61bac051306d001421e4d4dbb04b41e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/__pycache__/quantization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/quantization.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..911cc8255ee5c970ee8cb2db0464695b1b401a2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/_quantization/quantization.py @@ -0,0 +1,144 @@ +import functools +import torch +import torch.distributed as dist + + +from enum import Enum + + +TORCH_HALF_MIN = torch.finfo(torch.float16).min +TORCH_HALF_MAX = torch.finfo(torch.float16).max + +class DQuantType(Enum): + """ + Different quantization methods for auto_quantize API are identified here. + + auto_quantize API currently supports fp16 and bfp16 methods. + """ + FP16 = "fp16", + BFP16 = "bfp16" + + def __str__(self) -> str: + return self.value + + +def _fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor: + return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half() + +def _quantize_tensor(tensor, qtype): + if not isinstance(tensor, torch.Tensor): + raise RuntimeError( + f"_quantize_tensor expecting torch.Tensor as input but found {type(tensor)}" + ) + if qtype == DQuantType.FP16: + return _fp32_to_fp16_with_clamp(tensor) + elif qtype == DQuantType.BFP16: + return torch.ops.quantization._FloatToBfloat16Quantized(tensor) + else: + raise RuntimeError( + f'Quantization type {qtype} is not supported' + ) + +def _quantize_tensor_list(tensor_list, qtype): + if not isinstance(tensor_list, list) or not all( + isinstance(p, torch.Tensor) for p in tensor_list + ): + raise RuntimeError( + f"_quantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}" + ) + quantized_tensor_list = [_quantize_tensor(t, qtype) for t in tensor_list] + return quantized_tensor_list + +def _dequantize_tensor(tensor, qtype, quant_loss=None): + if not isinstance(tensor, torch.Tensor): + raise RuntimeError( + f"_dequantize_tensor expecting torch.Tensor as input but found {type(tensor)}" + ) + if qtype == DQuantType.FP16: + if tensor.dtype != torch.float16: + raise RuntimeError( + f"tensor dtype is {tensor.dtype} while expected to be FP16." + ) + elif tensor.dtype == torch.float16 and quant_loss is None: + return tensor.float() + else: + return tensor.float() / quant_loss + elif qtype == DQuantType.BFP16: + if tensor.dtype != torch.float16: + raise RuntimeError( + f"tensor dtype is {tensor.dtype} while expected to be FP16." + ) + else: + return torch.ops.quantization._Bfloat16QuantizedToFloat(tensor) + else: + raise RuntimeError( + f'Quantization type {qtype} is not supported' + ) + + +def _dequantize_tensor_list(tensor_list, qtype, quant_loss=None): + if not isinstance(tensor_list, list) or not all( + isinstance(p, torch.Tensor) for p in tensor_list + ): + raise RuntimeError( + f"_dequantize_tensor_list expecting list of torch.Tensor as input but found {type(tensor_list)}" + ) + dequantized_tensor_list = [_dequantize_tensor(t, qtype) for t in tensor_list] + return dequantized_tensor_list + + +def auto_quantize(func, qtype, quant_loss=None): + """ + Quantize the input tensors, choose the precision types, and pass other necessary arguments and then dequantizes the output. + + Currently it only supports: + . FP16 and BFP16 quantization method supported for gloo and nccl backends + . all_gather, all_to_all collective ops + Note: BFP16 only supports 2D tensors. + Args: + func (Callable): A function representing collective operations. + qtype (QuantType): Quantization method + quant_loss (float, optional): This can be used to improve accuracy in the dequantization. + Returns: + (Callable): the same collective as func but enables automatic quantization/dequantization. + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + group = kwargs.get('group', None) + async_op = kwargs.get('async_op', False) + if async_op is True: + raise RuntimeError( + 'The async_op=True mode is not supported yet.' + ) + if func == dist.all_gather: + tensors = args[0] + input_tensors = _quantize_tensor(args[1], qtype) + out_tensors = _quantize_tensor_list(tensors, qtype) + dist.all_gather(out_tensors, input_tensors, group=group, async_op=async_op) + for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)): + tensors[i] = t + + elif func == dist.all_to_all: + tensors = args[0] + input_tensors = _quantize_tensor_list(args[1], qtype) + out_tensors = _quantize_tensor_list(tensors, qtype) + dist.all_to_all(out_tensors, input_tensors, group=group, async_op=async_op) + for i, t in enumerate(_dequantize_tensor_list(out_tensors, qtype, quant_loss=quant_loss)): + tensors[i] = t + + elif func == dist.all_to_all_single: + tensors = args[0] + out_splits = kwargs.get('out_splits', None) + in_splits = kwargs.get('in_splits', None) + # Quantizing the input/output tensor + input_tensors = _quantize_tensor(args[1], qtype) + out_tensors = _quantize_tensor(tensors, qtype) + dist.all_to_all_single(out_tensors, input_tensors, out_splits, in_splits, group=group) + for i, t in enumerate(_dequantize_tensor(out_tensors, qtype, quant_loss=quant_loss)): + tensors[i] = t + else: + raise RuntimeError( + f"The collective op {func} is not supported yet" + ) + + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..570aa34cf02ee41ca24dce90ec2a2c5aee843e7d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__init__.py @@ -0,0 +1,108 @@ +from enum import Enum +from functools import partial + +import torch.distributed as dist + +from . import ( + debugging_hooks as debugging, + default_hooks as default, + powerSGD_hook as powerSGD, + quantization_hooks as quantization, + optimizer_overlap_hooks as optimizer_overlap, +) + +__all__ = ['DDPCommHookType', 'register_ddp_comm_hook'] + +def _ddp_comm_hook_wrapper(comm_hook, model, state): + model.register_comm_hook(state, comm_hook) + + +def _powerSGD_comm_hook_wrapper( + comm_hook, + model, + state, + matrix_approximation_rank, + start_powerSGD_iter=1_000, +): + """ + Wrap PowerSGD communication hook. + + To be consistent with the wrappers of other DDP comm hooks, the input state only needs to be a process group, + which will be wrapped up with other state info. + """ + powerSGD_state = powerSGD.PowerSGDState( + process_group=state, + matrix_approximation_rank=matrix_approximation_rank, + start_powerSGD_iter=start_powerSGD_iter, + ) + model.register_comm_hook(powerSGD_state, comm_hook) + + +class DDPCommHookType(Enum): + """ + Enumerate ``ddp_comm_hooks`` and ``ddp_comm_hook_wrapper`` communucation hook types. + + DDPCommHookType enumerates the hooks of ``torch.distributed.algorithms.ddp_comm_hooks`` + as names and ``ddp_comm_hook_wrapper`` partials with hook specified. As an example, + you can register allreduce hook by + ``DDPCommHookType.ALLREDUCE.value(model=model, state=process_group)``. + """ + + ALLREDUCE = partial(_ddp_comm_hook_wrapper, comm_hook=default.allreduce_hook) + FP16_COMPRESS = partial( + _ddp_comm_hook_wrapper, comm_hook=default.fp16_compress_hook + ) + BF16_COMPRESS = partial( + _ddp_comm_hook_wrapper, comm_hook=default.bf16_compress_hook + ) + QUANTIZE_PER_TENSOR = partial( + _ddp_comm_hook_wrapper, comm_hook=quantization.quantization_pertensor_hook + ) + QUANTIZE_PER_CHANNEL = partial( + _ddp_comm_hook_wrapper, comm_hook=quantization.quantization_perchannel_hook + ) + POWER_SGD = partial( + _powerSGD_comm_hook_wrapper, + comm_hook=powerSGD.powerSGD_hook, + matrix_approximation_rank=1, + ) + # Rank-2 PowerSGD can give a higher accuracy than the default rank-1 version, + # but it runs slower and consumes more memory. + POWER_SGD_RANK2 = partial( + _powerSGD_comm_hook_wrapper, + comm_hook=powerSGD.powerSGD_hook, + matrix_approximation_rank=2, + ) + # Batching can lead to a faster training at the cost of accuracy. + BATCHED_POWER_SGD = partial( + _powerSGD_comm_hook_wrapper, + comm_hook=powerSGD.batched_powerSGD_hook, + matrix_approximation_rank=1, + ) + BATCHED_POWER_SGD_RANK2 = partial( + _powerSGD_comm_hook_wrapper, + comm_hook=powerSGD.batched_powerSGD_hook, + matrix_approximation_rank=2, + ) + NOOP = partial( + _ddp_comm_hook_wrapper, comm_hook=debugging.noop_hook, + ) + + +def register_ddp_comm_hook( + comm_hook_type: DDPCommHookType, model, state=None +): + """ + Register ``ddp_comm_hooks`` to DDP model. + + Registers the hooks of ``torch.distributed.algorithms.ddp_comm_hooks`` + to the DDP model. User can specify the type of hook as an enum + ``DDPCommHookType`` type using ``comm_hook_type`` input. State input will + be passed to the model. + Uses Python comm hook implementations. + + Example:: + >>> # xdoctest: +SKIP + >>> register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, model, state) + """ + comm_hook_type.value(model=model, state=state) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9a1e1a276e2d9b059f8ad10ccf7c72e0a10dd20 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/ddp_zero_hook.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/ddp_zero_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1448908e5844f9e453e0a394f2cd225fa56a3ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/ddp_zero_hook.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/debugging_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/debugging_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd4704272b69fb2c7d73b2fc7e7ded14932fab34 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/debugging_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/default_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/default_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a688154a08c6b5fcd0bf44c8b6b7b844bc3e594 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/default_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/mixed_precision_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/mixed_precision_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86cebc95af2bfc566449d1ca53fa9a874bd0cc53 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/mixed_precision_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/optimizer_overlap_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/optimizer_overlap_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa0073f0a0806058faf9bb2eb821e87de643312a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/optimizer_overlap_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/post_localSGD_hook.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/post_localSGD_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b079c99cf8cb902cd893cd58f3c65b55f6337f8f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/post_localSGD_hook.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/powerSGD_hook.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/powerSGD_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8955c240bb42069e6fec5ae79755f51fc5961070 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/powerSGD_hook.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/quantization_hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/quantization_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de6bddb6dafe125bed985293b14fde7739dd76cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/__pycache__/quantization_hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..52f9b419ab14718b97e0da5dcb62f7cbd66685c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py @@ -0,0 +1,448 @@ +import weakref +from typing import Any, Callable, List, Optional + +import torch +import torch.distributed as dist +from torch.distributed.optim import ZeroRedundancyOptimizer +from torch.distributed.optim.zero_redundancy_optimizer import ( + _OverlapStatus, +) +from torch.nn.parallel.distributed import DistributedDataParallel + +__all__ = ["hook_with_zero_step", "hook_with_zero_step_interleaved"] + +# Functional optimizers require passing a list of gradients to their `step()` +# method, and ZeRO requires a functional optimizer to overlap with DDP +# Passing a `None` instead of an actual gradient indicates to the optimizer +# to not update the corresponding parameter +_NO_PARAM_UPDATE: None = None + + +def _perform_local_step( + bucket: dist.GradBucket, + zero: ZeroRedundancyOptimizer, + rank: int, +): + r""" + Perform a local optimizer step using the gradients provided by ``bucket``. + + Arguments: + bucket (dist.GradBucket): the bucket providing the gradients. + zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer` + instance to perform the :meth:`_local_step`. + rank (int): the calling process's rank. + + .. warning:: + This function assumes that appropriate synchronization has taken place + so that the bucket's gradients can be used. + """ + overlap_info = zero._overlap_info + bucket_index = bucket.index() + assert len(zero.optim.param_groups) == 1, \ + "Overlapping DDP with ZeRO only supports a single parameter group" + + # Construct the `gradients` input for the local optimizer step, which + # expects `None` in a list position to indicate that the corresponding + # parameter should not be updated + num_local_optim_params = len(zero.optim.param_groups[0]["params"]) + gradients: List[Optional[torch.Tensor]] = \ + [_NO_PARAM_UPDATE for _ in range(num_local_optim_params)] + assert bucket_index in overlap_info.offsets, \ + f"Bucket index {bucket_index} was not assigned to rank {rank}" + gradients_offset = overlap_info.offsets[bucket_index] + bucket_assignment = zero._bucket_assignments_per_rank[rank][bucket_index] + bucket_offset = bucket_assignment.offset + length = len(bucket_assignment.parameters) + bucket_gradients = bucket.gradients()[bucket_offset:bucket_offset + length] + for i, grad in enumerate(bucket_gradients): + gradients[gradients_offset + i] = grad + + zero._local_step(gradients) + + +def _broadcast_bucket( + bucket_index: int, + zero: ZeroRedundancyOptimizer, +): + r""" + Broadcasts a bucket's parameters. + + Arguments: + bucket_index (int): the index of the bucket corresponding to the + parameters to broadcast. + zero (ZeroRedundancyOptimizer): the calling process's + :class:`ZeroRedundancyOptimizer` instance. + """ + overlap_info = zero._overlap_info + assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \ + "`assigned_ranks_per_bucket` is not fully constructed" + # Sort to ensure the same ordering across ranks + assigned_ranks = sorted(overlap_info.assigned_ranks_per_bucket[bucket_index]) + assert len(assigned_ranks) > 0, f"Bucket {bucket_index} should be " \ + "assigned to at least one rank" + for assigned_rank in assigned_ranks: + bucket_assignments = zero._bucket_assignments_per_rank[assigned_rank] + if bucket_index in bucket_assignments: + overlap_info.broadcast_handles.append( + dist.broadcast( + bucket_assignments[bucket_index].tensor, + src=dist.get_global_rank(zero.process_group, assigned_rank), + group=zero.process_group, + async_op=True, + ) + ) + + +def _save_ddp_bucket_info( + bucket: dist.GradBucket, + zero: ZeroRedundancyOptimizer, +): + r""" + Save :class:`DistributedDataParallel` gradient bucket information for :class:`ZeroRedundancyOptimizer` instance ``zero``. + + In particular, this function is meant to be called upon seeing each + gradient bucket to use when overlapping, meaning it does not save or compute any global + information. + + Arguments: + bucket (dist.GradBucket): the current gradient bucket. + zero (ZeroRedundancyOptimizer): the calling process's + :class:`ZeroRedundancyOptimizer` instance. + """ + overlap_info = zero._overlap_info + bucket_params = bucket.parameters() + assert len(bucket_params) > 0, "Empty bucket" + + # Save the parameters in the bucket + overlap_info.params_per_bucket.append(bucket_params) + if overlap_info.shard_buckets: + # Additionally save the bucket size for the assignment heuristic to use + bucket_size = 0 + for param in bucket_params: + bucket_size += param.numel() + assert overlap_info.total_size is not None + overlap_info.total_size += bucket_size + + +def _hook_with_zero_step_setup( + ddp_ref: weakref.ReferenceType, + zero: ZeroRedundancyOptimizer, + bucket: dist.GradBucket, +): + r""" + Encapsulate the setup logic for :func:`hook_with_zero_step` and :func:`hook_with_zero_step_interleaved`. + + This means the logic to run in the + hook before the backward pass and optimizer step can actually be + overlapped. This is factored out since it is common to both + :func:`hook_with_zero_step` and :func:`hook_with_zero_step_interleaved`. + + Arguments: + ddp_ref (weakref.ReferenceType): weak reference to the process's + :class:`DistributedDataParallel` instance. + zero (ZeroRedundancyOptimizer): the calling process's + :class:`ZeroRedundancyOptimizer` instance. + bucket (dist.GradBucket): the current gradient bucket. + """ + # Proceed as normal until the DDP buckets have been rebuilt + if not ddp_ref()._has_rebuilt_buckets: # type: ignore[union-attr] + assert zero._overlap_info.status == _OverlapStatus.UNINITIALIZED + return + + bucket_index = bucket.index() + overlap_info = zero._overlap_info + if overlap_info.status == _OverlapStatus.UNINITIALIZED: + overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS + + if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS: + if bucket_index == 0 and len(overlap_info.params_per_bucket) > 0: + # This corresponds to the first bucket of the backward pass + # immediately after all information has been saved, so we + # can perform the delayed ZeRO initialization + zero._init_zero_for_overlap() + else: + # Once DDP buckets have been rebuilt but ZeRO has not been + # properly initialized yet, save the information needed + _save_ddp_bucket_info(bucket, zero) + + +def hook_with_zero_step( + hook: Callable[[Any, dist.GradBucket], torch.futures.Future], + ddp: DistributedDataParallel, + zero: ZeroRedundancyOptimizer, + shard_buckets: bool = False, +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + r""" + Modify ``hook`` to overlap :class:`ZeroRedundancyOptimizer` optimizer step with :class:`DistributedDataParallel` backward pass. + + This approach overlaps the optimizer computation and communication with the + backward communication. In particular, the backward computation proceeds + contiguously, and the optimizer computation follows, overlapping with + outstanding backward communication (i.e. all-reduces) and possibly other + optimizer communication (i.e. broadcasts). + The optimizer step computation begins after the last gradient bucket computation has finished. + + This approach may be preferred over :meth:`hook_with_zero_step_interleaved` + if communication is relatively slow compared to computation. + + Arguments: + hook (Callable[[Any, dist.GradBucket], torch.futures.Future]): the hook + to modify. + ddp (DistributedDataParallel): the :class:`DistributedDataParallel` + instance to use. + zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer` + instance to use. + shard_buckets (bool): if ``True``, then the assignment of each + :class:`DistributedDataParallel` bucket is partitioned across + possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e. + across possibly multiple ranks) to approximate uniformity; if + ``False``, then each bucket is wholly assigned to a single + :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank). + + Returns: + The modified hook. + + Raises: + ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``. + RuntimeError: if using any backend other than NCCL/HCCL since currently + Gloo may hang. + + .. warning:: + Given the way that overlapping :class:`DistributedDataParallel` with + :class:`ZeroRedundancyOptimizer` is currently implemented, the first + two or three training iterations do not perform parameter updates in + the optimizer step, depending on if ``static_graph=False`` or + ``static_graph=True``, respectively. This is because it needs + information about the gradient bucketing strategy used by + :class:`DistributedDataParallel`, which is not finalized until the + second forward pass if ``static_graph=False`` or until the third + forward pass if ``static_graph=True``. + """ + if not zero._overlap_with_ddp: + raise ValueError( + "ZeroRedundancyOptimizer must be constructed with " + "`overlap_with_ddp=True` to use this hook properly" + ) + ddp_ref = weakref.ref(ddp) + + # NOTE: Gloo may hang with this overlapping approach, so we require + # NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300 + pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr] + if ((pg != dist.Backend.NCCL) and (pg != 'hccl')): + raise RuntimeError( + "Overlapping DDP with ZeRO using this approach currently requires " + "NCCL/HCCL backend to avoid hangs" + ) + + if shard_buckets: + zero._overlap_info.shard_buckets = True + zero._overlap_info.total_size = 0 + + def hook_with_zero_fn( + state: Any, + bucket: dist.GradBucket, + ) -> torch.futures.Future[torch.Tensor]: + r""" + Return :class:`Future` that runs the optimizer step if this corresponds to the last gradient bucket. + + Perform equivalent of :class:`ZeroRedundancyOptimizer` :meth:`step` if ``bucket`` is last gradient bucket. + The function gives a gradient bucket tensor and + performs additional computation on the iteration that + the :class:`DistributedDataParallel` buckets are rebuilt to collect + information used to implement the modified hook. + + Arguments: + state (Any): any state for the hook. + bucket (dist.GradBucket): the :class:`DistributedDataParallel` + gradient bucket. + """ + fut = hook(state, bucket) + _hook_with_zero_step_setup(ddp_ref, zero, bucket) + if zero._overlap_info.status != _OverlapStatus.INITIALIZED: + return fut + + overlap_info = zero._overlap_info + bucket_index = bucket.index() + rank = zero.global_rank + + assert overlap_info.status == _OverlapStatus.INITIALIZED + assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, \ + "`assigned_ranks_per_bucket` is not fully constructed" + assigned_to_bucket = rank in overlap_info.assigned_ranks_per_bucket[bucket_index] + + # Save the bucket reference and all-reduce future for the final bucket + if assigned_to_bucket: + overlap_info.bucket_index_to_bucket[bucket_index] = bucket + overlap_info.bucket_index_to_future[bucket_index] = fut + + # Check that buckets are indexed incrementally starting from 0 in the + # order of their autograd hooks firing + if len(overlap_info.bucket_indices_seen) > 0: + assert overlap_info.bucket_indices_seen[-1] == bucket_index - 1, \ + "Bucket indices are not in incremental order" + else: + assert bucket_index == 0, "Bucket indices do not start from 0" + overlap_info.bucket_indices_seen.append(bucket_index) + + # Directly return the future without any optimizer computation if this + # is not the last bucket + num_buckets = len(overlap_info.params_per_bucket) + is_last_bucket = bucket_index == num_buckets - 1 + if not is_last_bucket: + return fut + + # Perform partial optimizer step on all buckets after the final + # bucket has been computed + # NOTE: This should not be chained as a callback to the last bucket's + # all-reduce future since that would add synchronization that delays + # all optimizer computation to wait for that last all-reduce + for bucket_index in range(num_buckets): + assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index] + if rank in assigned_ranks: + # Wait on the bucket's all-reduce future to ensure correct + # gradients + assert bucket_index in overlap_info.bucket_index_to_future, \ + f"All-reduce future for bucket {bucket_index} not saved " \ + f"on rank {rank}" + allreduce_future = overlap_info.bucket_index_to_future[bucket_index] + allreduce_future.wait() + + # Perform the partial optimizer step + curr_bucket = overlap_info.bucket_index_to_bucket[bucket_index] + _perform_local_step(curr_bucket, zero, rank) + + _broadcast_bucket(bucket_index, zero) + + # Ensure that all parameter updates are finished before the + # next forward pass + overlap_info.wait_for_broadcasts() + overlap_info.clear_per_iter_info() + + return fut + + return hook_with_zero_fn + + +def hook_with_zero_step_interleaved( + hook: Callable[[Any, dist.GradBucket], torch.futures.Future], + ddp: DistributedDataParallel, + zero: ZeroRedundancyOptimizer, + shard_buckets: bool = False, +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + r""" + Modify ``hook`` to overlap :class:`ZeroRedundancyOptimizer` optimizer step with :class:`DistributedDataParallel` backward pass + + This approach overlaps the optimizer computation and communication with the + backward computation and communication. In particular, once a bucket's + gradients have been computed, the optimizer computation using those + gradients is launched (though the actual computation must wait for the + bucket's all-reduce to complete). This yields an interleaving of all- + reduces and broadcasts in the communication stream. + + This approach may be preferred over :meth:`hook_with_zero_step` if + communication is relatively fast compared to computation. + + Arguments: + hook (Any * dist.GradBucket -> torch.futures.Future): the hook to + modify. + ddp (DistributedDataParallel): the :class:`DistributedDataParallel` + instance to use. + zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer` + instance to use. + shard_buckets (bool): if ``True``, then the assignment of each + :class:`DistributedDataParallel` bucket is partitioned across + possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e. + across possibly multiple ranks) to approximate uniformity; if + ``False``, then each bucket is wholly assigned to a single + :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank). + + Returns: + The modified hook. + + Raises: + ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``. + RuntimeError: if using any backend other than NCCL since currently + Gloo may hang. + + .. warning:: + Given the way that overlapping :class:`DistributedDataParallel` with + :class:`ZeroRedundancyOptimizer` is currently implemented, the first + two or three training iterations do not perform parameter updates in + the optimizer step, depending on if ``static_graph=False`` or + ``static_graph=True``, respectively. This is because it needs + information about the gradient bucketing strategy used by + :class:`DistributedDataParallel`, which is not finalized until the + second forward pass if ``static_graph=False`` or until the third + forward pass if ``static_graph=True``. + """ + if not zero._overlap_with_ddp: + raise ValueError( + "ZeroRedundancyOptimizer must be constructed with " + "`overlap_with_ddp=True` to use this hook properly" + ) + ddp_ref = weakref.ref(ddp) + + # NOTE: Gloo may hang with this overlapping approach, so we require + # NCCL/HCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300 + pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr] + if ((pg != dist.Backend.NCCL) and (pg != 'hccl')): + raise RuntimeError( + "Overlapping DDP with ZeRO using this approach currently requires " + "NCCL/HCCL backend to avoid hangs" + ) + + if shard_buckets: + zero._overlap_info.shard_buckets = True + zero._overlap_info.total_size = 0 + + def hook_with_zero_interleaved_fn( + state, + bucket: dist.GradBucket, + ) -> torch.futures.Future[torch.Tensor]: + r""" + Return :class:`Future` that gives gradient bucket tensor and performs partial :class:`ZeroRedundancyOptimizer` :meth:`step`. + + This function uses the gradients in gradient in given bucket to perform a partial + :class:`ZeroRedundancyOptimizer` :meth:`step` + + Arguments: + state: any state for the hook. + bucket (dist.GradBucket): the :class:`DistributedDataParallel` + gradient bucket. + """ + fut = hook(state, bucket) + _hook_with_zero_step_setup(ddp_ref, zero, bucket) + if zero._overlap_info.status != _OverlapStatus.INITIALIZED: + return fut + + def zero_step(fut: torch.futures.Future) -> torch.Tensor: + r""" + Perform partial :class:`ZeroRedundancyOptimizer` :meth:`step` using gradients in the :class:`DistributedDataParallel`. + + Returns: + A :class:`torch.Tensor` representing the contents of the + gradient bucket. + """ + overlap_info = zero._overlap_info + bucket_index = bucket.index() + rank = zero.global_rank + + assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index] + overlap_info.bucket_indices_seen.append(bucket_index) + if rank in assigned_ranks: + _perform_local_step(bucket, zero, rank) + + _broadcast_bucket(bucket_index, zero) + + num_buckets = len(overlap_info.params_per_bucket) + if len(overlap_info.bucket_indices_seen) == num_buckets: + # Ensure that all parameter updates are finished before the + # next forward pass + overlap_info.wait_for_broadcasts() + overlap_info.clear_per_iter_info() + + return bucket.buffer() + + return fut.then(zero_step) + + return hook_with_zero_interleaved_fn diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..a552f9a359f7edbab147c9802e3fd8e7299d4ab2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py @@ -0,0 +1,28 @@ +from typing import Any + +import torch +from torch.distributed import GradBucket + +__all__ = ["noop_hook"] + + +def noop_hook(_: Any, bucket: GradBucket) -> torch.futures.Future[torch.Tensor]: + """ + Return a future that wraps the input, so it is a no-op that does not incur any communication overheads. + + This hook should **only** be used for headroom analysis of allreduce optimization, + instead of the normal gradient synchronization. + For example, if only less than 10% speedup of training time can be observed after this hook is registered, + it usually implies that allreduce is not a performance bottleneck for this case. + Such instrumentation can be particularly useful + if GPU traces cannot be easily retrieved or the trace analysis is complicated + some factors such as the overlap between allreduce and computation or the desynchronization across ranks. + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(None, noop_hook) + """ + fut: torch.futures.Future[torch.Tensor] = torch.futures.Future() + fut.set_result(bucket.buffer()) + + return fut diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..bff55327e84744dbd564e4067a69f081f5bc972d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py @@ -0,0 +1,223 @@ +from typing import Any, Callable, cast, Tuple + +import torch +import torch.distributed as dist + +__all__ = [ + "allreduce_hook", + "fp16_compress_hook", + "bf16_compress_hook", + "fp16_compress_wrapper", + "bf16_compress_wrapper", +] + + +def _allreduce_fut( + process_group: dist.ProcessGroup, tensor: torch.Tensor +) -> torch.futures.Future[torch.Tensor]: + """Average the input gradient tensor by allreduce and returns a future.""" + group_to_use = process_group if process_group is not None else dist.group.WORLD + + # Apply the division first to avoid overflow, especially for FP16. + tensor.div_(group_to_use.size()) + + return ( + dist.all_reduce(tensor, group=group_to_use, async_op=True) + .get_future() + .then(lambda fut: fut.value()[0]) + ) + + +def allreduce_hook( + process_group: dist.ProcessGroup, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + """ + Call ``allreduce`` using ``GradBucket`` tensors. + + Once gradient tensors are aggregated across all workers, its ``then`` + callback takes the mean and returns the result. + + If user registers this DDP communication hook, + DDP results is expected to be same as the case where no hook was registered. + Hence, this won't change behavior of DDP and user can use this as a reference + or modify this hook to log useful information or any other purposes while + unaffecting DDP behavior. + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(process_group, allreduce_hook) + """ + return _allreduce_fut(process_group, bucket.buffer()) + + +def fp16_compress_hook( + process_group: dist.ProcessGroup, + bucket: dist.GradBucket, +) -> torch.futures.Future[torch.Tensor]: + """ + Compress by casting ``GradBucket`` to ``torch.float16`` divided by process group size. + + This DDP communication hook implements a simple gradient compression + approach that casts ``GradBucket`` tensor to half-precision floating-point format (``torch.float16``) + and then divides it by the process group size. + It allreduces those ``float16`` gradient tensors. Once compressed gradient + tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``). + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(process_group, fp16_compress_hook) + """ + group_to_use = process_group if process_group is not None else dist.group.WORLD + world_size = group_to_use.size() + + buffer = ( + cast(Tuple[torch.Tensor, ...], bucket)[0] + if isinstance(bucket, tuple) + else bucket.buffer() + ) + compressed_tensor = buffer.to(torch.float16).div_(world_size) + + def decompress(fut): + decompressed_tensor = buffer + # Decompress in place to reduce the peak memory. + # See: https://github.com/pytorch/pytorch/issues/45968 + value = fut if isinstance(fut, torch.Tensor) else fut.value()[0] + decompressed_tensor.copy_(value) + return decompressed_tensor + + if torch._utils.is_compiling(): + grad = dist._functional_collectives.all_reduce( + compressed_tensor, "sum", group_to_use + ) + return decompress(grad) + else: + fut = dist.all_reduce( + compressed_tensor, group=group_to_use, async_op=True + ).get_future() + return fut.then(decompress) + + +# TODO: create an internal helper function and extract the duplicate code in FP16_compress and BF16_compress. +def bf16_compress_hook( + process_group: dist.ProcessGroup, + bucket: dist.GradBucket, +) -> torch.futures.Future[torch.Tensor]: + """ + Warning: This API is experimental, and it requires NCCL version later than 2.9.6. + + This DDP communication hook implements a simple gradient compression + approach that casts ``GradBucket`` tensor to half-precision + `Brain floating point format `_ (``torch.bfloat16``) + and then divides it by the process group size. + It allreduces those ``bfloat16`` gradient tensors. Once compressed gradient + tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``). + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(process_group, bf16_compress_hook) + """ + group_to_use = process_group if process_group is not None else dist.group.WORLD + world_size = group_to_use.size() + + buffer = ( + cast(Tuple[torch.Tensor, ...], bucket)[0] + if isinstance(bucket, tuple) + else bucket.buffer() + ) + compressed_tensor = buffer.to(torch.bfloat16).div_(world_size) + + def decompress(fut): + decompressed_tensor = buffer + # Decompress in place to reduce the peak memory. + # See: https://github.com/pytorch/pytorch/issues/45968 + value = fut if isinstance(fut, torch.Tensor) else fut.value()[0] + decompressed_tensor.copy_(value) + return decompressed_tensor + + if torch._utils.is_compiling(): + grad = dist._functional_collectives.all_reduce( + compressed_tensor, "sum", group_to_use + ) + return decompress(grad) + else: + fut = dist.all_reduce( + compressed_tensor, group=group_to_use, async_op=True + ).get_future() + return fut.then(decompress) + + +def fp16_compress_wrapper( + hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]] +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + """ + Cast input tensor to ``torch.float16``, cast result of hook back to input dtype. + + This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision + floating point format (``torch.float16``), and casts the resulting tensor of the given hook back to + the input data type, such as ``float32``. + Therefore, ``fp16_compress_hook`` is equivalent to ``fp16_compress_wrapper(allreduce_hook)``. + + Example:: + >>> # xdoctest: +SKIP + >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10) + >>> ddp_model.register_comm_hook(state, fp16_compress_wrapper(powerSGD_hook)) + """ + + def fp16_compress_wrapper_hook( + hook_state, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + # Cast bucket tensor to FP16. + bucket.set_buffer(bucket.buffer().to(torch.float16)) + + fut = hook(hook_state, bucket) + + def decompress(fut): + decompressed_tensor = bucket.buffer() + # Decompress in place to reduce the peak memory. + # See: https://github.com/pytorch/pytorch/issues/45968 + decompressed_tensor.copy_(fut.value()) + return decompressed_tensor + + # Decompress after hook has run. + return fut.then(decompress) + + return fp16_compress_wrapper_hook + + +def bf16_compress_wrapper( + hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]] +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + """ + Warning: This API is experimental, and it requires NCCL version later than 2.9.6. + + This wrapper casts the input gradient tensor of a given DDP communication hook to half-precision + `Brain floating point format `_ (``torch.bfloat16``), + and casts the resulting tensor of the given hook back to the input data type, such as ``float32``. + + Therefore, ``bf16_compress_hook`` is equivalent to ``bf16_compress_wrapper(allreduce_hook)``. + + Example:: + >>> # xdoctest: +SKIP + >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, start_powerSGD_iter=10) + >>> ddp_model.register_comm_hook(state, bf16_compress_wrapper(powerSGD_hook)) + """ + + def bf16_compress_wrapper_hook( + hook_state, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + # Cast bucket tensor to BF16. + bucket.set_buffer(bucket.buffer().to(torch.bfloat16)) + + fut = hook(hook_state, bucket) + + def decompress(fut): + decompressed_tensor = bucket.buffer() + # Decompress in place to reduce the peak memory. + # See: https://github.com/pytorch/pytorch/issues/45968 + decompressed_tensor.copy_(fut.value()) + return decompressed_tensor + + # Decompress after hook has run. + return fut.then(decompress) + + return bf16_compress_wrapper_hook diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..31b243d44e0fd2ef88be9c7c2e2f7c1305a0a40c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py @@ -0,0 +1,85 @@ +import torch +import torch.distributed as dist +from torch.autograd import Variable + +from dataclasses import dataclass +from typing import Any, no_type_check +from torch.distributed.utils import _free_storage + +@dataclass +class _AllreduceUpcastHookState: + """ + State to manage DDP mixed precision in backward / gradient communication. + + This contains a weakref to the DDP module for access to reducer and process + group, and a stream to run parameter and gradient upcasts. + """ + + ddp_weakref: Any + upcast_stream: torch.cuda.Stream + wait_for_stream_enqueued: bool = False + +@no_type_check +def _reducer_allreduce_and_upcast_hook( + hook_state: _AllreduceUpcastHookState, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + """ + Perform allreduce in precision ``reduce_dtype``, upcast to prepare for optimizer. + + Performs allreduce in the reduced precision given by DDP's mixed precision + reduce_dtype, and upcasts parameters and gradients to fp32 in preparation + to run the optimizer. + """ + ddp_weakref = hook_state.ddp_weakref + reducer, process_group = ddp_weakref().reducer, ddp_weakref().process_group + gradient_is_bucket_view = ddp_weakref().gradient_as_bucket_view + # Cast bucket if different than param_dtype. + if ( + ddp_weakref().mixed_precision.param_dtype != ddp_weakref().mixed_precision.reduce_dtype + ): + # Cast bucket tensor to reduce_dtype + bucket.set_buffer(bucket.buffer().to(ddp_weakref().mixed_precision.reduce_dtype)) + fut = reducer._run_allreduce_hook(bucket) + ret_fut = torch.futures.Future() + stream = hook_state.upcast_stream + with torch.cuda.stream(stream): + fut.wait() + bucket.buffer().div_(process_group.size()) + ret_fut.set_result(bucket.buffer()) + + # Upcast parameters and gradients so optimizer step can run in fp32. + params, grads = bucket.parameters(), bucket.gradients() + for p, g in zip(params, grads): + p.data = p._fp_param + # free storage for mp param as it will be allocated again in next + # forward pass. + _free_storage(p._mp_param) + p.grad.data = p.grad.to(p.data.dtype) + + # enqueue a callback to wait for this stream at end of backward + def wait_for_stream_cb(): + torch.cuda.current_stream().wait_stream(stream) + # Remove post-backward hooks since they are re-installed in next + # iteration, similar to FSDP. + # Parameters that don't require grad still needed to be casted since + # they may participate in computation. However, they would not be recast + # by hook above as they don't have a grad hook installed, so cast them + # back here. + for n, p in ddp_weakref().module.named_parameters(): + if hasattr(p, '_ddp_mp_hook_state'): + p._ddp_mp_hook_state[1].remove() + delattr(p, '_ddp_mp_hook_state') + if not p.requires_grad and not hasattr(p, '_ddp_ignored'): + p.data = p._fp_param + + # reset for next backward pass + hook_state.wait_for_stream_enqueued = False + + if not hook_state.wait_for_stream_enqueued: + Variable._execution_engine.queue_callback( + wait_for_stream_cb + ) + # mark that the callback is enqueued + hook_state.wait_for_stream_enqueued = True + + return ret_fut diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..dc7e5ee2fdc5f2f3f6e4d95a0613de582f06debb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py @@ -0,0 +1,154 @@ +from typing import Any, Callable, List, no_type_check + +import torch +import torch.distributed as dist +from torch.autograd import Variable +from functools import partial +from dataclasses import dataclass + +__all__: List[str] = [] + +_FUNCTIONAL_OPTIM_STEP_METHOD_NAME = "step_param" + +class _OptimizerHookState: + """ + Holds state for running optimizer in-line after DDP communication hook. + + Currently contains only optimizer class which must have a method `step_param`. + """ + + __slots__ = ["functional_optimizer", "params_to_optimize"] + + def __init__(self, functional_optim, params=None): + self.functional_optimizer = functional_optim + self._check_valid_functional_optim() + self._set_params_to_optimize(params) + + def _set_params_to_optimize(self, params): + if params is not None: + self.params_to_optimize = set(params) + + def _check_valid_functional_optim(self): + if not hasattr(self.functional_optimizer, _FUNCTIONAL_OPTIM_STEP_METHOD_NAME): + raise ValueError( + f"Class {type(self.functional_optimizer)} must implement method " + f"{_FUNCTIONAL_OPTIM_STEP_METHOD_NAME}." + ) + + +@dataclass +class _OptimInBackwardHookState: + optim_stream: torch.cuda.Stream + wait_for_optim_stream_enqueued: bool + +@no_type_check +def _apply_optim_in_backward_hook( + gradient_is_bucket_view: bool +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + r""" + Register hook to apply the optimizer in backward. + + If torch.distributed.optim._apply_optimizer_in_backward is used to overlap + optimizer with backward pass, DDP will run the below hook to run optimizer + step for parameters after gradient communication has taken place. + """ + optim_in_bwd_state = _OptimInBackwardHookState( + optim_stream=torch.cuda.Stream(), + wait_for_optim_stream_enqueued=False, + ) + + def apply_optim_in_backward_hook( + hook_state: Any, bucket: dist.GradBucket, optim_stream_state, + ) -> torch.futures.Future[torch.Tensor]: + # Run original hook + ddp_weakref = hook_state + ddp_inst = ddp_weakref() + reducer, process_group = ddp_inst.reducer, ddp_inst.process_group + fut = reducer._run_allreduce_hook(bucket) + optimizer_stream = optim_stream_state.optim_stream + with torch.cuda.stream(optimizer_stream): + fut.wait() + # Apply gradient division since C++ side only allreduces and does + # not average. TODO: (rohan-varma) the div factor may be different + # when running with join hook + bucket.buffer().div_(process_group.size()) + model_params = bucket.parameters() + grads = bucket.gradients() + # TODO (rohan-varma): upcast as needed for DDP mixed precision, + # once optimizer in backward + DDP mixed precision is supported. + for p, g in zip(model_params, grads): + if hasattr(p, '_in_backward_optimizers'): + # Note: need to set grad to the bucket's grad, because + # running allreduce results in the bucket's grad being + # reduced, but not grad field. + if not gradient_is_bucket_view: + p.grad = g + for optim in p._in_backward_optimizers: + optim.step() + + # Need to return a Future[Tensor] to obey comm hook API contract. + ret_fut = torch.futures.Future() + ret_fut.set_result(bucket.buffer()) + + # enqueue a callback to wait for this optimizer stream at the end of + # backward and set all DDP managed grads to None. + def wait_for_optim_stream_callback(): + torch.cuda.current_stream().wait_stream( + optim_stream_state.optim_stream + ) + # Set DDP managed grads to None + for param in ddp_inst._get_data_parallel_params(ddp_inst.module): + if hasattr(param, '_in_backward_optimizers'): + param.grad = None + + # reset for the next backwards pass + optim_stream_state.wait_for_optim_stream_enqueued = False + + if not optim_stream_state.wait_for_optim_stream_enqueued: + Variable._execution_engine.queue_callback( + wait_for_optim_stream_callback + ) + # mark that the callback is enqueued + optim_stream_state.wait_for_optim_stream_enqueued = True + + return ret_fut + + comm_hook = partial( + apply_optim_in_backward_hook, optim_stream_state=optim_in_bwd_state + ) + # These are needed for DDP's logging of comm hooks + comm_hook.__name__ = apply_optim_in_backward_hook.__name__ + comm_hook.__qualname__ = apply_optim_in_backward_hook.__qualname__ + + return comm_hook + +def _hook_then_optimizer( + hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]], + optimizer_state: _OptimizerHookState, +) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: + r"""Run optimizer in a functional fashion after DDP communication hook.""" + has_set_params = ( + hasattr(optimizer_state, 'params_to_optimize') + and optimizer_state.params_to_optimize is not None + ) + + def hook_then_optimizer_wrapper( + hook_state, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + # Run original hook + fut = hook(hook_state, bucket) + + def optimizer_step(fut): + gradient_tensors = bucket.gradients() + model_params = bucket.parameters() + for grad_tensor, model_param in zip(gradient_tensors, model_params): + if not has_set_params or model_param in optimizer_state.params_to_optimize: + optimizer_state.functional_optimizer.step_param( + model_param, + grad_tensor, + ) + return bucket.buffer() + + return fut.then(optimizer_step) + + return hook_then_optimizer_wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..218ee08dbd46f5097ec0d6ea4acd3775e065f0ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py @@ -0,0 +1,123 @@ +import logging + +import torch +import torch.distributed as dist + +from . import default_hooks as default + +logger = logging.getLogger(__name__) + + +class PostLocalSGDState: + r""" + Store state for all-reducing gradients globally until given step, then locally after. + + Stores the state for all-reducing gradients globally using ``process_group`` until step ``start_localSGD_iter``, + and all-reducing gradients locally using ``subgroup`` afterwards. + + If ``process_group`` is ``None``, the global process group will be used. + If ``subgroup`` is ``None``, the intra-node process group on each machine will be used. + + Additionally, ``post_local_gradient_allreduce`` may be worth tuning, + because both true and false may give a faster convergence. + """ + + __slots__ = [ + "process_group", + "subgroup", + "start_localSGD_iter", + "post_local_gradient_allreduce", + "iter", + ] + + def __init__( + self, + process_group, + subgroup, + start_localSGD_iter, + post_local_gradient_allreduce=True, + ): + """Initialize state object with given parameters and log when localSGD start.""" + logger.info( + "Local SGD will be started after %s iterations", start_localSGD_iter + ) + + # The group used for all-reducing gradients globally. + self.process_group = process_group + # The group used for all-reducing gradients locally. + self.subgroup = subgroup + self.start_localSGD_iter = start_localSGD_iter + # Allreduce gradients locally since iteration `start_localSGD_iter`. + # This may help with the convergence efficiency at the cost of relatively cheap intra-subgroup communication. + self.post_local_gradient_allreduce = post_local_gradient_allreduce + # Iteration/step in the training loop. + self.iter = 0 + + def maybe_increase_iter(self, bucket): + """Track iterations and trigger log message at start of local SGD.""" + # Since bucket 0 is the last bucket to allreduce in an iteration. + # Only increase `iter` when bucket 0 is processed. + if bucket.is_last(): + self.iter += 1 + + if self.iter == self.start_localSGD_iter: + logger.info( + "Start to apply local SGD after %s iterations.", self.iter + ) + +def post_localSGD_hook( + state: PostLocalSGDState, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + """ + Run post-localSGD algorithm. + + This DDP communication hook is used for running post-localSGD algorithm, + by combining with a model averaging component (e.g., + :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager`) + that runs after the optimizer step. + + Args: + state (PostLocalSGDState): State information to run post-localSGD. + Users mainly need to tune ``start_localSGD_iter`` to determine when to start local SGD. + bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors. + Note that since DDP comm hook only supports single process single device mode, + only exactly one tensor is stored in this bucket. + + Returns: + Future handler of the communication, which updates the gradients in place. + + Example:: + >>> # xdoctest: +SKIP + >>> state = PostLocalSGDState(process_group=process_group, subgroup=subgroup, + start_localSGD_iter=10) + >>> ddp_model.register_comm_hook(state, post_localSGD_hook) + >>> # Also need to establish a model averaging module and run model averaging after ``optimizer.step()``. + >>> # Please refer to the examples in ``torch.distributed.algorithms.model_averaging.averagers`` module. + """ + global_group_to_use = ( + state.process_group if state.process_group is not None else dist.group.WORLD + ) + + # The input tensor is a flattened 1D tensor. + input_tensor = bucket.buffer() + + # Run allreduce using `global_group_to_use` in the first `start_localSGD_iter` iterations. + if state.iter < state.start_localSGD_iter: + state.maybe_increase_iter(bucket) + return default._allreduce_fut(global_group_to_use, input_tensor) + + # If `post_local_gradient_allreduce` is not set, + # then no gradient synchronization after the first `start_localSGD_iter` iterations. + if not state.post_local_gradient_allreduce: + fut: torch.futures.Future[torch.Tensor] = torch.futures.Future() + fut.set_result(input_tensor) + return fut + + # Run allreduce using `subgroup` after the first `start_localSGD_iter` iterations. + # Note that by default, a separate subgroup for each node is created which + # causes an intra-node allreduce to be done at each training step. + # From this moment, model averaging should run after the optimizer step, + # to globally allreduce all the parameters. + if state.subgroup is None: + state.subgroup, _ = dist.new_subgroups() + return default._allreduce_fut(state.subgroup, input_tensor) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..9d2d5649f745663a7ebf6b9d483a06e9464f906d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py @@ -0,0 +1,850 @@ +from collections import defaultdict +import logging +import math +from typing import Dict + +import torch +import torch.distributed as dist + +from . import default_hooks as default +from torch.distributed import distributed_c10d + +__all__ = [ + "PowerSGDState", "powerSGD_hook", "batched_powerSGD_hook" +] + +logger = logging.getLogger(__name__) + + +def _orthogonalize(matrices, epsilon=0): + """ + Decide between Gram-Schmidt or QR factorization to orthogonalize a batch of matrices. + + QR factorization doesn't work with half-precision, but it is usually faster with a rank > 2. + """ + assert len(matrices.shape) == 3 and matrices.shape[2] <= matrices.shape[1] + + num_matrices = matrices.shape[0] + rank = matrices.shape[2] + dtype = matrices.dtype + if rank <= 2 or dtype in [torch.float16, torch.bfloat16]: + _orthogonalize_gram_schmidt(matrices, epsilon=epsilon) + else: + torch.linalg.qr( + matrices, + out=( + matrices, + torch.empty(num_matrices, rank, rank, device=matrices.device, dtype=dtype) + ) + ) + +def _orthogonalize_gram_schmidt(matrices, epsilon=0): + """ + Apply Gram-Schmidt procedure to orthogonalize a batch of matrices. + + If epsilon is 0, this is equivalent to `torch.qr(matrices, out=(matrices, _))`, + """ + num_cols = matrices.shape[2] + for i in range(num_cols): + # Normalize the i'th column. + col = matrices[:, :, i : i + 1] + # If no epsilon is added here, division by zero may be caused by vanishing gradients. + # This epsilon is not needed if the input batch of matrices covers the gradients of at least one entire layer + # in the neural network. + if epsilon == 0: + # Note that col ** 2 can underflow/overflow if we use FP16. + # May need to consider multiplying a scaling factor and dividing it later, or using bfloat16 instead. + try: + col /= torch.norm(col, dim=1, keepdim=True) + except ZeroDivisionError: + logger.error( + "The matrices to be orthogonalized has at least a column of all 0s. Please set a small value such as 1e-8 " + "as `orthogonalization_epsilon` in PowerSGD state." + ) + # Recover the values from NaNs to 0s. + col.fill_(0.0) + else: + col /= torch.norm(col, dim=1, keepdim=True) + epsilon + # Project it on the rest and remove it. + if i + 1 < num_cols: + rest = matrices[:, :, i + 1 :] + rest -= torch.sum(col * rest, dim=1, keepdim=True) * col + + +def _should_compress( + num_rows, num_cols, matrix_approximation_rank, min_compression_rate +): + """ + Recommend if tensor given is worth compressing. + + Returns a recommendation as to whether the 2D tensor described by the arguments is worth compressing, + including statistics describing the expected savings from compression. We consider a tensor worth + compressing when ``min_compression_rate`` < uncompressed size / compressed size, where + uncompressed size = ``num_rows`` * ``num_cols``, + and compressed size = (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``. + + The result of this function is a tuple of the form (compression_recommendation, uncompressed_el_count, compressed_el_count), where: + + compression_recommendation is true if the tensor is worth compressing, and false otherwise (see above); + + uncompressed_el_count is the uncompressed element count, i.e. ``num_rows`` * ``num_cols``; and, + + compress_el_count is the element count after compression, i.e. (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``. + """ # noqa: B950 + uncompressed_size = num_rows * num_cols + compressed_size = (num_rows + num_cols) * matrix_approximation_rank + return ( + compressed_size * min_compression_rate < uncompressed_size, + uncompressed_size, + compressed_size, + ) + + +def _report_compression_stats(bucket, state): + """Report compression stats at frequency of ``compression_stats_logging_frequency`` specified in PowerSGD state.""" + if ( + bucket.is_last() + and state.iter >= state.next_stats_report + ): + stats = state.compression_stats() + logger.info( + "Compression stats: iter %s, total before compression %s, total after compression %s, " + "rate %s", state.iter, stats[1], stats[2], stats[0] + ) + state.next_stats_report = state.iter + state.compression_stats_logging_frequency + + +class PowerSGDState: + r""" + Store both the algorithm's hyperparameters and internal state for all gradients during training. + + Particularly, ``matrix_approximation_rank`` and ``start_powerSGD_iter`` are the main hyperparameters that should be tuned by the user. + For performance, we suggest to keep binary hyperparameters ``use_error_feedback`` and ``warm_start`` on. + + 1. ``matrix_approximation_rank`` controls the size of compressed low-rank tensors, which determines the compression rate. The lower the rank, the stronger the compression. + + 1.1. If ``matrix_approximation_rank`` is too low, the full model quality will need more training steps to reach or will never reach and yield loss in accuracy. + + 1.2. The increase of ``matrix_approximation_rank`` can substantially increase the computation costs of the compression, and the accuracy may not be further improved beyond a certain ``matrix_approximation_rank`` threshold. + + To tune ``matrix_approximation_rank``, we suggest to start from 1 and increase by factors of 2 (like an exponential grid search, 1, 2, 4, ...), until a satisfactory accuracy is reached. Typically only a small value 1-4 is used. For some NLP tasks (as shown in Appendix D of the original paper), this value has been increased to 32. + + 2. ``start_powerSGD_iter`` defers PowerSGD compression until step ``start_powerSGD_iter``, and vanilla allreduce runs prior to step ``start_powerSGD_iter``. This hybrid scheme of **vanilla allreduce + PowerSGD** can effectively improve the accuracy, even a relatively small ``matrix_approximation_rank`` is used. This is because that, the beginning of training phase is usually very sensitive to inaccurate gradients, and compressing gradients too early may make the training quickly take a suboptimal trajectory, which can result in an irrecoverable impact on the accuracy. + + To tune ``start_powerSGD_iter``, we suggest to start with 10% of total training steps, and increase it until a satisfactory accuracy is reached. If there is a warm-up stage in the training, ``start_powerSGD_iter`` typically should be no less than the number of warm-up steps. + + 3. ``min_compression_rate`` is the minimum compression rate required when a layer is compressed. Due to the computation overheads incurred by the compression, a tensor is worth compressing only if there can be sufficient saving in bandwidth, where ``(num_rows + num_cols) * matrix_approximation_rank * min_compression_rate < num_rows * num_cols``. If the specified compression rate threshold cannot be satisfied, the tensor will be directly allreduced without compression. + + Compression statistics are logged every ``compression_stats_logging_frequency`` iterations once PowerSGD compression starts. + + 4. ``orthogonalization_epsilon`` can be a very small value (e.g., 1e-8) added to every normalized matrix column in orthogonalization step, to prevent div-by-zero error if any column has all 0s. If this can already be prevented (e.g., by batch normalization), an epsilon of 0 is recommended for accuracy. + + 5. ``batch_tensors_with_same_shape`` controls whether to compress and decompress tensors with same shape in a batched operation to achieve higher parallelism. Note that you should also increase the bucket size (i.e., ``bucket_cap_mb`` arg in DDP constructor) to make more same-shaped tensors appear in the same bucket, however this may reduce the overlap between computation and communication, and increase the memory footprint due to stacking the tensors of the same shape. Set to ``True`` if the compression / decompression computation is a bottleneck. + + .. warning :: + If error feedback or warm-up is enabled, the minimum value of ``start_powerSGD_iter`` allowed in DDP is 2. + This is because there is another internal optimization that rebuilds buckets at iteration 1 in DDP, + and this can conflict with any tensor memorized before the rebuild process. + """ # noqa: B950 + + __slots__ = [ + "process_group", + # The fields below are the hyperparameters that often need to be tuned by the user. + "matrix_approximation_rank", + "start_powerSGD_iter", + # The fields below are the hyperparameters that seldom need be tuned by the user. + "min_compression_rate", + "orthogonalization_epsilon", + # The fields below are the binary hyperparameters recommended to be turned on for performance and accuracy. + "use_error_feedback", + "warm_start", + "batch_tensors_with_same_shape", + # The fields below are internal state. + "rng", + "error_dict", + "p_memory_dict", + "q_memory_dict", + "iter", + # The fields below are for recording compression stats. + "total_numel_before_compression", + "total_numel_after_compression", + "compression_stats_logging_frequency", + "next_stats_report", + ] + + def __init__( + self, + process_group, + matrix_approximation_rank=1, + start_powerSGD_iter=1_000, + min_compression_rate=2, + use_error_feedback=True, + warm_start=True, + orthogonalization_epsilon=0, + random_seed=0, + compression_stats_logging_frequency=10_000, + batch_tensors_with_same_shape: bool = False, + ): + logger.info( + "PowerSGD config: matrix_approximation_rank = %s; start_powerSGD_iter = %s; " + "min_compression_rate = %s; orthogonalization_epsilon = %s; use_error_feedback = %s; warm_start = %s; " + "random_seed = %s; compression_stats_logging_frequency = %s; batch_tensors_with_same_shape = %s", + matrix_approximation_rank, + start_powerSGD_iter, + min_compression_rate, + orthogonalization_epsilon, + use_error_feedback, + warm_start, + random_seed, + compression_stats_logging_frequency, + batch_tensors_with_same_shape, + ) + + self.process_group = process_group + self.matrix_approximation_rank = matrix_approximation_rank + # Deferring PowerSGD compression util step 'start_powerSGD_iter' can have two advantages: + # 1) It turns out that PowerSGD may lead to a non-trivial accuracy loss, + # even if the matrix approximation rank is increased to a large value. + # To mitigate the accuracy loss, a simple yet effective way is mixing vanilla allreduce + # (or a more conservative compression such as FP16 compression) with PowerSGD. + # 2) There is an internal optimization of rebuilding buckets process in DDP, + # in order to save the memory space. + # This step takes place after the first iteration. + # However, this means that the shape of input bucketized tensors is subject to change, + # which will complicate the implementations of error feedback and warm-up. + # Running vanilla allreduce in the first few iterations can avoid this complexity. + if (use_error_feedback or warm_start) and start_powerSGD_iter <= 1: + raise ValueError( + "Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, " + "because PowerSGD can only be applied after the first two iterations in DDP." + ) + self.start_powerSGD_iter = start_powerSGD_iter + self.min_compression_rate = min_compression_rate + # Error feedback is usually crucial for both for convergence and generalization, + # because PowerSGD is a biased compressor, + # i.e., compressing and decompressing a random gradient does not yield the original in expectation. + # This mechanism requires a temporary copy of the input gradients, + # so it increases the peak memory consumption by the size of the gradient tensor. + # However, if the target matrices are known to be exactly low-ranked (instead of just low stable rank), + # sometimes it is possible to converge to the optima without error feedback. + # See: http://proceedings.mlr.press/v54/yurtsever17a/yurtsever17a.pdf + self.use_error_feedback = use_error_feedback + # Warm-start reuses P(s) and Q(s) from the previous iteration. + # This can improve the approximation quality and hence improve the accuracy. + # Additionally, by avoiding the initialization of these low-rank tensors at every step, + # this can also accelerate training. + # However, this is at the cost of extra memory. + self.warm_start = warm_start + # Can use a very small value to prevent div-by-zero error caused by orthogonalization of vanishing gradients. + self.orthogonalization_epsilon = orthogonalization_epsilon + # The purpose of this RNG is to generate different random seeds for initializing Q across iterations, + # but in the same order for all the DDP replicas. + # Different random seeds across iterations indicate different 'projections' of the gradients at different SGD steps. + # If the same random projection is used, + # there will be differences between the gradients that are never synchronized. + import numpy as np + self.rng = np.random.RandomState(random_seed) + # Since there is only a single state instance for all the input buckets, + # need to maintain a dictionary that maps each bucket index to the local error. + self.error_dict: Dict[int, torch.Tensor] = {} + self.p_memory_dict: Dict[int, torch.Tensor] = {} + self.q_memory_dict: Dict[int, torch.Tensor] = {} + # Iteration/step in the training loop. + self.iter = 0 + # Compression stats accumulators + self.total_numel_before_compression = 0 + self.total_numel_after_compression = 0 + # We'll report compression stats every 'compression_stats_logging_frequency' iterations + # Note that we always report compression stats at least once. + self.compression_stats_logging_frequency = max( + 1, compression_stats_logging_frequency + ) + self.next_stats_report = 0 + # Batching tensors with same shape can increase parallelism in compression / decompression computation. + # This requires a larger bucket size to make more same-shaped tensor to appear in one bucket, however + # this may reduce the overlap between computation and communication, and increase the memory footprint + # due to stacking tensors. + # Turn on if compression / decompression computation is a bottleneck. + self.batch_tensors_with_same_shape = batch_tensors_with_same_shape + + def __getstate__(self): + r""" + Return a ``Dict[str, Any]`` which will be pickled and saved. + + ``process_group`` is not serializable and excluded from + a returned state. + """ + logger.warning( + "NOTE: Process group is not serializable and excluded from a saved state." + ) + return { + slot: getattr(self, slot) + for slot in self.__slots__ if slot != "process_group" + } + + def __setstate__(self, state): + r""" + Take a provided ``state`` and set to this ``PowerSGDState`` instance. + + ``process_group`` is set to default. + """ + self.process_group = distributed_c10d._get_default_group() + logger.warning( + "NOTE: Process group will be set to a default group (i.e. the world size).\ + If a different group is desired, please set `self.process_group` after PowerSGD state is loaded." + ) + for slot, value in state.items(): + setattr(self, slot, value) + + def maybe_increase_iter(self, bucket): + """Track iterations and trigger log message at start of local SGD.""" + # Since bucket 0 is the last bucket to allreduce in an iteration. + # Only increase `iter` when bucket 0 is processed. + if bucket.is_last(): + self.iter += 1 + + if self.iter == self.start_powerSGD_iter: + logger.info( + "Start to apply PowerSGD after %s iterations.", self.iter + ) + + def compression_stats(self): + r""" + Return latest compression statistics as tuple. + + Returns tuple of form (compress_rate, numel_before_compression, numel_after_compression) where: + + compress_rate is the effective compression rate i.e. (number of elements before compression) / (number of elements after compression); + + numel_before_compression is the total number of elements before compression was applied; and, + + numel_after_compression is the total number of elements after compression was applied. + """ # noqa: B950 + compress_rate = ( + self.total_numel_before_compression / self.total_numel_after_compression + if self.total_numel_after_compression > 0 + else 0 + ) + return ( + compress_rate, + self.total_numel_before_compression, + self.total_numel_after_compression, + ) + + +def powerSGD_hook( + state: PowerSGDState, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + r""" + Implement PowerSGD algorithm. + + This DDP communication hook implements PowerSGD gradient compression + algorithm described in the `paper `_. + Once gradient tensors are aggregated across all workers, this hook applies + compression as follows: + + 1. Views the input flattened 1D gradient tensor as a list of per-parameter tensors, and divides all the tensors into two groups: + + 1.1 The tensors that should be compressed before allreduce, because the compression can give enough saving in bandwidth. + + 1.2 Rest of the tensors will be directly allreduced without compression, including all the vector tensors (for biases). + + 2. Handles uncompressed tensors: + + 2.1. Allocate contiguous memory for those uncompressed tensors, and allreduces all the uncompressed tensors as a batch, without compression; + + 2.2. Copies the individual uncompressed tensors from the contiguous memory back to the input tensor. + + 3. Handles the tensors that should be compressed by PowerSGD compression: + + 3.1. For each tensor M, creates two low-rank tensors P and Q for decomposing M, + such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized; + + 3.2. Computes each P in Ps, which is equal to MQ; + + 3.3. Allreduces Ps as a batch; + + 3.4. Orthogonalizes each P in Ps; + + 3.5. Computes each Q in Qs, which is approximately equal to M^TP; + + 3.6. Allreduces Qs as a batch; + + 3.7. Computes each M among all the compressed tensors, which is approximately equal to PQ^T. + + Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations. + This not only gives the user more control over the tradeoff between speedup and accuracy, + but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers. + + Args: + state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc. + To tune the compression configs, mainly need to tune ``matrix_approximation_rank``, ``start_powerSGD_iter`` + and ``min_compression_rate``. + bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors. + Note that since DDP comm hook only supports single process single device mode, + only exactly one tensor is stored in this bucket. + + Returns: + Future handler of the communication, which updates the gradients in place. + + Example:: + >>> # xdoctest: +SKIP + >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1, + start_powerSGD_iter=10, min_compression_rate=0.5) + >>> ddp_model.register_comm_hook(state, powerSGD_hook) + """ # noqa: B950 + process_group = state.process_group + group_to_use = process_group if process_group is not None else dist.group.WORLD + world_size = group_to_use.size() + + # The input tensor is a flattened 1D tensor. + input_tensor = bucket.buffer() + + # Run vanilla allreduce in the first `start_powerSGD_iter` iterations. + if state.iter < state.start_powerSGD_iter: + state.maybe_increase_iter(bucket) + return default._allreduce_fut(group_to_use, input_tensor) + + # Apply PowerSGD after `start_powerSGD_iter` iterations. + device = input_tensor.device + dtype = input_tensor.dtype + + # Incorporate the error from the previous state into the gradients. + bucket_index = bucket.index() + input_tensor_cp = None + total_length = input_tensor.shape[0] + if state.use_error_feedback: + if bucket_index in state.error_dict: + input_tensor.add_(state.error_dict[bucket_index]) + else: + logger.info( + "A zero tensor of length %s that represents local error is created.", + total_length + ) + state.error_dict[bucket_index] = torch.zeros( + total_length, device=device, dtype=dtype + ) + + # Keep a copy of the input tensor, + # so that we can compute the local error caused by compression later, + # by comparing this copy and the input tensor updated after decompression. + input_tensor_cp = torch.clone(input_tensor).detach() + + # Unflatten the input tensor into per-parameter tensors, for layer-wise compression. + tensors = bucket.gradients() + + # Step I: Divide all the tensors into two groups, + # one will be compressed before allreduce and the other will be directly allreduced without compression. + tensors_to_compress, uncompressed_tensors = [], [] + total_Ps_size = 0 + total_Qs_size = 0 + for tensor in tensors: + matrix = tensor.view(tensor.shape[0], -1) + n, m = matrix.shape + matrix_approximation_rank = min(n, m, state.matrix_approximation_rank) + compress_test = _should_compress( + n, m, matrix_approximation_rank, state.min_compression_rate + ) + state.total_numel_before_compression += compress_test[1] + if compress_test[0]: + tensors_to_compress.append(matrix) + total_Ps_size += n * matrix_approximation_rank + total_Qs_size += m * matrix_approximation_rank + state.total_numel_after_compression += compress_test[2] + else: + uncompressed_tensors.append(tensor) + state.total_numel_after_compression += compress_test[1] + + _report_compression_stats(bucket, state) + + # Step II: Handle uncompressed tensors. + # Allocate contiguous memory for these tensors to allreduce efficiently. + uncompressed_tensors_memory = ( + torch.cat([tensor.view(-1) for tensor in uncompressed_tensors]) + if uncompressed_tensors + else torch.tensor([], device=device, dtype=dtype) + ) + + # Step III: Handle the tensors that should be compressed. + # Allocate contiguous memory for Ps and Qs to allreduce efficiently. + # If warm-start is enabled, reuse Ps and Qs from the previous iteration if possible. + # The memory spaces of Ps and Qs need to be allocated in the first iteration when PowerSGD is applied. + need_randomize_qs = False + if not state.warm_start or bucket_index not in state.p_memory_dict: + need_randomize_qs = True + # If warm-start is disabled, low-rank tensors will be initialized at every step. + # Only log this if warm-start to avoid spamming. + if state.warm_start: + logger.info( + "Allocating contiguous memory of length %s for Ps, and of length %s for Qs, respectively.", + total_Ps_size, total_Qs_size + ) + state.p_memory_dict[bucket_index] = torch.empty( + total_Ps_size, device=device, dtype=dtype + ) + state.q_memory_dict[bucket_index] = torch.empty( + total_Qs_size, device=device, dtype=dtype + ) + + # Batch tensors to compress by shape. + shape_to_tensors = defaultdict(list) + for tensor in tensors_to_compress: + shape_to_tensors[tensor.shape].append(tensor) + + # This function decides whether to batch tensors with same shape or not according to the argument, + # so the following process could share the same code. + def maybe_batched_tensors_to_compress(): + for tensors in shape_to_tensors.values(): + if state.batch_tensors_with_same_shape: + batch_size = len(tensors) + if batch_size == 1: + # Use the original tensor to avoid copy. + yield tensors[0].unsqueeze(0) + else: + yield torch.stack(tensors) + else: + for tensor in tensors: + yield tensor.unsqueeze(0) + + # Create Ps and Qs that point to the allocated memory. + tensors_to_compress = [] + ps = [] + qs = [] + p_idx = 0 + q_idx = 0 + for tensor in maybe_batched_tensors_to_compress(): + batch_size, n, m = tensor.shape + matrix_approximation_rank = min(n, m, state.matrix_approximation_rank) + tensors_to_compress.append(tensor) + ps.append( + state.p_memory_dict[bucket_index][ + p_idx : p_idx + batch_size * n * matrix_approximation_rank + ].view(batch_size, n, matrix_approximation_rank) + ) + qs.append( + state.q_memory_dict[bucket_index][ + q_idx : q_idx + batch_size * m * matrix_approximation_rank + ].view(batch_size, m, matrix_approximation_rank) + ) + p_idx += batch_size * n * matrix_approximation_rank + q_idx += batch_size * m * matrix_approximation_rank + + # If warm-start is enabled, reuse Qs from the previous iteration if possible and skip filling random values. + # The exception is the first iteration when PowerSGD is applied. + if not need_randomize_qs: + for q in qs: + _orthogonalize(q, state.orthogonalization_epsilon) + else: + with torch.random.fork_rng(devices=[]): + # Fork this RNG to avoid changing the seed globally and affecting the random sampling anywhere else in the training. + # The seed makes sure that the initial random values are the same across all the DDP replicas. + # This seed should differ at every step. + # Since it is very slow to fork RNG state across all the CUDA devices, + # only fork on CPU and then move the generated tensor to the CUDA device (by overwriting q). + torch.manual_seed(state.rng.randint(1_000_000_000)) + for q in qs: + q.copy_( + torch.randn( + *q.shape, + device="cpu", + dtype=dtype, + ) + ) + _orthogonalize(q, state.orthogonalization_epsilon) + + # Compute Ps. + for tensor, q, p in zip(tensors_to_compress, qs, ps): + torch.bmm(tensor, q, out=p) + + # This allreduce is only applied to uncompressed tensors, + # so it should have been kicked off before the above computation on the compressed tensors to hide more communication costs. + # However, this somehow requires a separate future chain at this time. + allreduce_contiguous_uncompressed_tensors_fut = dist.all_reduce( + uncompressed_tensors_memory, group=group_to_use, async_op=True + ).get_future() + + def unpack_uncompressed_tensors_and_allreduce_ps(fut): + uncompressed_tensors_memory = fut.value()[0].div_(world_size) + idx = 0 + for tensor in uncompressed_tensors: + tensor.copy_( + uncompressed_tensors_memory[idx : idx + tensor.numel()].view_as(tensor) + ) + idx += tensor.numel() + + # Since these Ps will be orthogonalized later, no need to divide them by world size. + return ( + dist.all_reduce( + state.p_memory_dict[bucket_index], group=group_to_use, async_op=True + ) + .get_future() + .wait()[0] + ) + + def compute_qs(fut): + state.p_memory_dict[bucket_index] = fut.value() + for p in ps: + _orthogonalize(p, state.orthogonalization_epsilon) + + # Compute Qs. + for tensor, p, q in zip(tensors_to_compress, ps, qs): + torch.bmm(tensor.transpose(1, 2), p, out=q) + + # TODO: The above procedure does two matmul+allreduce steps per iteration -- + # one left multiplication and one right multiplication. + # For warm-start, can take one such step at a time, and alternate between them. + + # Allreduce Qs. + return ( + dist.all_reduce( + state.q_memory_dict[bucket_index], group=group_to_use, async_op=True + ) + .get_future() + .wait()[0] + ) + + def decompress(fut): + state.q_memory_dict[bucket_index] = fut.value().div_(world_size) + + for p, q, tensor in zip(ps, qs, tensors_to_compress): + torch.bmm(p, q.transpose(1, 2), out=tensor) + + # Copy batched tensors back to original buffer. + if state.batch_tensors_with_same_shape: + for tensor in tensors_to_compress: + if tensor.shape[0] == 1: + # Skip tensor with batch_size == 1 since itself is the original tensor. + continue + original_tensors = shape_to_tensors[tensor.shape[1:]] + for i, original_tensor in enumerate(original_tensors): + original_tensor.copy_(tensor[i]) + + if torch.cuda.is_available(): + torch.cuda.synchronize(device) + + if state.use_error_feedback: + # Memorize the local errors. + state.error_dict[bucket_index] = input_tensor_cp - input_tensor + if not state.warm_start: + state.p_memory_dict.clear() + state.q_memory_dict.clear() + + state.maybe_increase_iter(bucket) + + return input_tensor + + return ( + allreduce_contiguous_uncompressed_tensors_fut.then( + unpack_uncompressed_tensors_and_allreduce_ps + ) + .then(compute_qs) + .then(decompress) + ) + + +def batched_powerSGD_hook( + state: PowerSGDState, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + r""" + Implement simplified PowerSGD algorithm. + + This DDP communication hook implements a simplified PowerSGD gradient compression + algorithm described in the `paper `_. + This variant does not compress the gradients layer by layer, + but instead compresses the flattened input tensor that batches all the gradients. + Therefore, it is **faster** than :meth:`powerSGD_hook`, + but usually results in a **much lower accuracy**, unless ``matrix_approximation_rank`` is 1. + + .. warning :: + Increasing ``matrix_approximation_rank`` here may not necessarily increase the accuracy, + because batching per-parameter tensors without column/row alignment can destroy low-rank structure. + Therefore, the user should always consider :meth:`powerSGD_hook` first, + and only consider this variant when a satisfactory accuracy can be achieved when ``matrix_approximation_rank`` is 1. + + Once gradient tensors are aggregated across all workers, this hook applies + compression as follows: + + 1. Views the input flattened 1D gradient tensor as a square-shaped tensor M with 0 paddings; + + 2. Creates two low-rank tensors P and Q for decomposing M, such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized; + + 3. Computes P, which is equal to MQ; + + 4. Allreduces P; + + 5. Orthogonalizes P; + + 6. Computes Q, which is approximately equal to M^TP; + + 7. Allreduces Q; + + 8. Computes M, which is approximately equal to PQ^T. + + 9. Truncates the input tensor to the original length. + + Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations. + This not only gives the user more control over the tradeoff between speedup and accuracy, + but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers. + + Args: + state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc. + To tune the compression configs, mainly need to tune ``matrix_approximation_rank`` and ``start_powerSGD_iter``. + bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors. + Note that since DDP comm hook only supports single process single device mode, + only exactly one tensor is stored in this bucket. + + Returns: + Future handler of the communication, which updates the gradients in place. + + Example:: + >>> # xdoctest: +SKIP + >>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1) + >>> ddp_model.register_comm_hook(state, batched_powerSGD_hook) + """ # noqa: B950 + process_group = state.process_group + group_to_use = process_group if process_group is not None else dist.group.WORLD + world_size = group_to_use.size() + + # The input tensor is a flattened 1D tensor. + input_tensor = bucket.buffer() + + # Run vanilla allreduce in the first `start_powerSGD_iter` iterations. + if state.iter < state.start_powerSGD_iter: + state.maybe_increase_iter(bucket) + return default._allreduce_fut(group_to_use, input_tensor) + + # Apply PowerSGD after `start_powerSGD_iter` iterations. + device = input_tensor.device + total_length = input_tensor.shape[0] + state.total_numel_before_compression += total_length + + # View the input tensor as a 2D square-shape tensor, and pad 0s if necessary. + square_side_length = math.ceil(math.sqrt(total_length)) + state.total_numel_after_compression += ( + square_side_length * state.matrix_approximation_rank * 2 + ) + padded_total_length = square_side_length ** 2 + input_tensor.resize_(padded_total_length) + input_tensor[total_length:padded_total_length].fill_(0) + + _report_compression_stats(bucket, state) + + # Incorporate the error from the previous state into the gradients. + bucket_index = bucket.index() + input_tensor_cp = None + if state.use_error_feedback: + if bucket_index in state.error_dict: + input_tensor.add_(state.error_dict[bucket_index]) + else: + logger.info( + "A zero tensor of length %s that represents local error is created.", + padded_total_length + ) + state.error_dict[bucket_index] = torch.zeros( + padded_total_length, device=device, dtype=input_tensor.dtype + ) + + # Keep a copy of the input tensor, + # so that we can compute the local error caused by compression later, + # by comparing this copy and the input tensor updated after decompression. + input_tensor_cp = torch.clone(input_tensor).detach() + matrix = input_tensor.view(square_side_length, square_side_length) + + # Reuse P and Q from the previous iteration if possible. + # The memory spaces of P and Q need to be allocated in the first iteration when PowerSGD is applied. + if not state.warm_start or bucket_index not in state.p_memory_dict: + # If warm-start is disabled, low-rank tensors will be initialized at every step. + # Only log this if warm-start to avoid spamming. + if state.warm_start: + logger.info( + "Initializing low-rank tensors P and Q, each of which has a shape of %s x %s.", + square_side_length, state.matrix_approximation_rank + ) + + def create_low_rank_tensor(fill_random_values, rng): + """Return a low-rank 2D tensor of square_side_length * matrix_approximation_rank.""" + if fill_random_values: + with torch.random.fork_rng(devices=[]): + # Fork this RNG to avoid changing the seed globally and affecting the random sampling + # anywhere else in the training. + # The seed makes sure that the initial random values are the same across all the DDP replicas. + # This seed should differ at every step. + # Since it is very slow to fork RNG state across all the CUDA devices, + # only fork on CPU and then move the generated tensor to the CUDA device. + torch.manual_seed(rng.randint(1_000_000_000)) + return torch.randn( + square_side_length, + state.matrix_approximation_rank, + device="cpu", + dtype=input_tensor.dtype, + ).to(device) + else: + return torch.empty( + square_side_length, + state.matrix_approximation_rank, + device=device, + dtype=input_tensor.dtype, + ) + + state.p_memory_dict[bucket_index] = create_low_rank_tensor( + fill_random_values=False, rng=state.rng + ) + state.q_memory_dict[bucket_index] = create_low_rank_tensor( + fill_random_values=True, rng=state.rng + ) + _orthogonalize(state.q_memory_dict[bucket_index]) + + torch.matmul( + matrix, state.q_memory_dict[bucket_index], out=state.p_memory_dict[bucket_index] + ) + allreduce_p_fut = dist.all_reduce( + state.p_memory_dict[bucket_index], group=group_to_use, async_op=True + ).get_future() + + def compute_q(fut): + state.p_memory_dict[bucket_index] = fut.value()[0] + _orthogonalize(state.p_memory_dict[bucket_index]) + + torch.matmul( + matrix.t(), + state.p_memory_dict[bucket_index], + out=state.q_memory_dict[bucket_index], + ) + + # TODO: The above procedure does two matmul+allreduce steps per iteration -- + # one left multiplication and one right multiplication. + # For warm-start, can take one such step at a time, and alternate between them. + + return ( + dist.all_reduce( + state.q_memory_dict[bucket_index], group=group_to_use, async_op=True + ) + .get_future() + .wait()[0] + ) + + def decompress(fut): + state.q_memory_dict[bucket_index] = fut.value().div_(world_size) + torch.matmul( + state.p_memory_dict[bucket_index], + state.q_memory_dict[bucket_index].t(), + out=matrix, + ) + + if state.use_error_feedback: + # Memorize the local errors. + state.error_dict[bucket_index] = input_tensor_cp - input_tensor + # Removing this seemingly unnecessary sync somehow may cause failures. + # See: https://github.com/pytorch/pytorch/pull/54838 + if torch.cuda.is_available(): + torch.cuda.synchronize(device) + if not state.warm_start: + state.p_memory_dict.clear() + state.q_memory_dict.clear() + ret = input_tensor.resize_(total_length) + + state.maybe_increase_iter(bucket) + + return ret + + return allreduce_p_fut.then(compute_q).then(decompress) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..9d5cd573eed681b3faf6bef0af3bef92148bd40e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py @@ -0,0 +1,217 @@ +import torch +import torch.distributed as dist +from torch import nn + + +def _quantize_per_tensor_cuda(x, scale, zero_point): + y = torch.round(x / scale) + zero_point + y = torch.clamp(y, 0, 255).to(torch.uint8) + return y + + +def _dequantize_per_tensor_cuda(y, scale, zero_point): + x = scale * (y.to(torch.float32) - zero_point) + return x + + +def _quantize_per_channel_cuda(x, scale, zero_point): + y = torch.zeros(x.size(), device=x.device) + for i in range(x.size()[0]): + y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i] + y = torch.clamp(y, 0, 255).to(torch.uint8) + return y + + +def _dequantize_per_channel_cuda(y, scale, zero_point): + y = y.to(torch.float32).cuda(y.device) + x = torch.zeros_like(y, device=y.device) + for i in range(x.size()[0]): + x[i, :] = scale[i] * (y[i, :] - zero_point[i]) + return x + + +def _get_allgather_out_list(all_gather_in_list, world_size): + out_list = [ + torch.zeros_like( + all_gather_in_list, + device=all_gather_in_list.device, + dtype=all_gather_in_list.dtype, + ) + for _ in range(world_size) + ] + return out_list + + +def quantization_pertensor_hook( + process_group: dist.ProcessGroup, bucket: dist.GradBucket +) -> torch.futures.Future[torch.Tensor]: + """ + Apply ``torch.quantize_per_tensor`` logic to DDP using ``allgather`` protocol. + + Workers first allgather the scale and zero point of their own + ``GradBucket`` prior to the quantization. After all workers have that information, + the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's + own gradient tensor, and uses ``allgather`` to communicate these across all workers. + The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes and + aggregates each quantized gradient tensor locally and returns the mean. + + .. warning :: + This is experimental, and uses ``allgather`` protocol which is considerably slower than + ``allreduce`` protocol. It works only with flattened grads. + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(process_group, quantization_pertensor_hook) + """ + group_to_use = process_group if process_group is not None else dist.group.WORLD + rank = process_group.rank() if process_group is not None else dist.get_rank() + world_size = group_to_use.size() + + tensor = bucket.buffer() + + myObserver = torch.ao.quantization.MinMaxObserver().cuda(tensor.device) + myObserver(tensor) + + s, z = myObserver.calculate_qparams() + s_and_z = torch.FloatTensor([s, z]).cuda(tensor.device) + + all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size) + + # First, allgather scale and zeros. + fut = dist.all_gather( + all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True + ).get_future() + + def quantize_and_allgather(fut): + # Store scale and zeros across all workers. + all_ranks_s_and_z = fut.wait()[0] + # All workers quantize their own ``GradBucket`` tensors. + quantized_tensor = _quantize_per_tensor_cuda( + tensor, all_ranks_s_and_z[rank][0], all_ranks_s_and_z[rank][1] + ) + # Allgather quantized tensors. + fut = dist.all_gather( + _get_allgather_out_list(quantized_tensor, world_size), + quantized_tensor, + group=group_to_use, + async_op=True, + ).get_future() + + return fut.wait() + + def dequantize_and_aggregate(fut): + all_ranks_quantized_tensor = fut.wait()[0] + + aggregated_dequantized_tensor = torch.zeros_like( + all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32 + ) + # Using previously allgathered scales and zeros, dequantize gradient tensors + # locally and then aggregate them. + for r, quantized_tensor in enumerate(all_ranks_quantized_tensor): + aggregated_dequantized_tensor += _dequantize_per_tensor_cuda( + quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1] + ) + + return aggregated_dequantized_tensor / world_size + + return fut.then(quantize_and_allgather).then(dequantize_and_aggregate) + + +def quantization_perchannel_hook( + process_group: dist.ProcessGroup, bucket: dist.GradBucket, bucket_size=512 +) -> torch.futures.Future[torch.Tensor]: + """ + Apply``torch.quantize_per_channel`` logic to DDP using ``allgather`` protocol. + + Compared to per-tensor, the main motivation of per-channel is + for considerably large tensors such as a tensor that contains 6 million + elements quantizing per a bucket size of 512 (or 128) elements may significantly + increase the resolution. + + It first splits ``GradBucket`` tensor into multiple chunks (channels) of ``bucket_size`` + elements. Then, workers allgather the scales and zero points of their own + ``GradBucket`` prior to the quantization. After all workers have that information, + the first ``then`` callback called ``quantize_and_allgather`` quantizes worker's + own gradient tensor, and uses ``allgather`` to communicate these across all workers. + The final ``then`` callback called ``dequantize_and_aggregate``, dequantizes, flattens, and + aggregates each quantized gradient tensor locally and returns the mean. + + .. warning :: + This is experimental, and uses ``allgather`` protocol which is considerably slower than + ``allreduce`` protocol. It works only with flattened grads. + + Example:: + >>> # xdoctest: +SKIP + >>> ddp_model.register_comm_hook(process_group, quantization_perchannel_hook) + """ + group_to_use = process_group if process_group is not None else dist.group.WORLD + rank = process_group.rank() if process_group is not None else dist.get_rank() + world_size = group_to_use.size() + + tensor = bucket.buffer() + + tensor_in_channels = ( + nn.functional.pad( + input=tensor, + pad=(0, bucket_size - len(tensor) % bucket_size), + mode="constant", + value=0, + ) + .view(-1, bucket_size) + .cuda(tensor.device) + ) + + myPerChannelObserver = torch.ao.quantization.PerChannelMinMaxObserver().cuda( + tensor.device + ) + myPerChannelObserver(tensor_in_channels) + + s_ch, z_ch = myPerChannelObserver.calculate_qparams() + s_and_z = torch.stack((s_ch, z_ch)).cuda(tensor.device) + + all_ranks_s_and_z = _get_allgather_out_list(s_and_z, world_size) + # First, allgather scale and zeros. + fut = dist.all_gather( + all_ranks_s_and_z, s_and_z, group=group_to_use, async_op=True + ).get_future() + + def quantize_and_allgather(fut): + # Store scale and zeros across all workers. + all_ranks_s_and_z = fut.wait()[0] + # All workers quantize their corresponding ``GradBucket`` tensors. + quantized_tensor = _quantize_per_channel_cuda( + tensor_in_channels, + all_ranks_s_and_z[rank, 0, :], + all_ranks_s_and_z[rank, 1, :], + ) + # Allgather quantized tensors. + fut = dist.all_gather( + _get_allgather_out_list(quantized_tensor, world_size), + quantized_tensor, + group=group_to_use, + async_op=True, + ).get_future() + + return fut.wait() + + def dequantize_and_aggregate(fut): + all_ranks_quantized_tensor = fut.wait()[0] + + aggregated_dequantized_tensor = torch.zeros_like( + all_ranks_quantized_tensor[0], device=tensor.device, dtype=torch.float32 + ) + # Using previously allgathered scales and zeros, dequantize gradient tensors + # locally and then aggregate them. + for r, quantized_tensor in enumerate(all_ranks_quantized_tensor): + aggregated_dequantized_tensor += _dequantize_per_channel_cuda( + quantized_tensor, all_ranks_s_and_z[r][0], all_ranks_s_and_z[r][1] + ) + + return ( + torch.flatten(aggregated_dequantized_tensor).cuda(tensor.device)[ + : tensor.size()[0] + ] + / world_size + ) + + return fut.then(quantize_and_allgather).then(dequantize_and_aggregate) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e830fd550a6ad48c82bf9e01e2a84413bfcc9825 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/averagers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/averagers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d8b680c4c65cdf3c1dbbefe6ac105d162189485 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/averagers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/hierarchical_model_averager.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/hierarchical_model_averager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fde1a4684e4ebf91da4c2a262e8aadc176b8557 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/hierarchical_model_averager.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..016730732aa6ceaa71188fe377091d9430246aea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py new file mode 100644 index 0000000000000000000000000000000000000000..637ae144b379ea01a206fd08d340827cea40d4b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py @@ -0,0 +1,167 @@ +# Copyright 2022 Cruise LLC +import logging +import warnings +from collections import OrderedDict +from typing import Union, Iterable, Dict + +import torch +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.averagers as averagers +import torch.distributed.algorithms.model_averaging.utils as utils + +logger = logging.getLogger(__name__) + + +class HierarchicalModelAverager(averagers.ModelAverager): + r""" + Runs hierarchical model averaging (`hierarchical SGD `_). + + Process groups of different sizes are organized in a hierarchy, and they average parameters + by using different periods concurrently after the warm-up stage. + This is an extension of :class:`~torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager` + that supports `post-local SGD `_, which essentially only supports + a two-level hierarchy: the intra-machine level and the global level, where the intra-machine + level is usually embedded in :meth:`~torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook`. + Similarly, the process groups within this class do not have such an intra-machine process + subgroup, which should be embedded by the post-local SGD communication hook instead. + + Args: + period_group_size_dict: An ordered dict mapping keys of model averaging period to + process group size, used for initializing process groups of + different sizes in a hierarchy to average parameters concurrently. + Particularly, at each iteration, there will be at most a single + process group that runs averaging -- the period of such group should + have the largest period which the current step can be divided by. + For example, if the dict has three keys: 2, 4, and 8, + then this means totally three process groups will be created to + average parameters every 2, 4, and 8 iterations, respectively. + At the 4th iteration, only the second process group will run + averaging, because the first process group should be a + subset of the second process group, and no need to execute the first + process group redundantly. + On the other hand, the third process group can only be triggered + every 8 iterations, so it will not be triggered at the 4th iteration. + warmup_steps (int): The number of warm-up steps. During this stage, model averaging is skipped. + process_group (ProcessGroup, optional): The overall process group containing all the processes that runs model averaging. + If ``None``, the default process group, which is created + by :func:`torch.distributed.init_process_group`, will be used. + (default: ``None``) + + Example:: + >>> # xdoctest: +SKIP('undefined rank') + >>> from collections import OrderedDict + >>> import torch + >>> import torch.distributed as dist + >>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import ( + >>> PostLocalSGDState, + >>> post_localSGD_hook, + >>> ) + >>> import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD + >>> import torch.nn as nn + >>> + >>> dist.init_process_group("nccl", rank=rank, world_size=16) + >>> torch.cuda.set_device(rank) + >>> module = nn.Linear(1, 1, bias=False).to(rank) + >>> model = nn.parallel.DistributedDataParallel( + >>> module, device_ids=[rank], output_device=rank + >>> ) + >>> # Register a post-localSGD communication hook. + >>> # Assume that each machine has 4 GPUs, then each intra-machine subgroup has a size of 4. + >>> subgroup, _ = dist.new_subgroups() + >>> state = PostLocalSGDState(process_group=None, subgroup=subgroup, start_localSGD_iter=100) + >>> model.register_comm_hook(state, post_localSGD_hook) + >>> + >>> # Average parameters among each group of 8 processes every 4 iterations, and among all + >>> # the 16 processes every 16 iterations. + >>> averager = hierarchicalSGD.HierarchicalModelAverager( + >>> period_group_size_dict=OrderedDict([(4, 8), (16, 16)]), warmup_steps=100) + >>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``. + >>> # In the first 100 steps, run global gradient averaging like normal DDP at every step. + >>> # After 100 steps, run model averaging at two levels. + >>> for step in range(0, 200): + >>> optimizer.zero_grad() + >>> loss = loss_fn(output, labels) + >>> loss.backward() + >>> optimizer.step() + >>> # Average parameters after ``optimizer.step()``. + >>> # Thus, the inter-node communication only occurs periodically after ``warmup_steps``. + >>> averager.average_parameters(model.parameters()) + + .. warning :: + The last group size in the dict must be the size of the provided ``process_group``, + which indicates model averaging at the highest level of the hierarchy. + If ``process_group`` is not provided, then the last group size should be equal to the world size. + + .. warning :: + `HierarchicalModelAverager` is experimental and subject to change. + """ + + def __init__(self, period_group_size_dict=None, warmup_steps=0, process_group=None): + super().__init__(process_group) + if not period_group_size_dict: + raise ValueError("Arg ``period_group_size_dict`` must not be empty.") + self._periods = list(period_group_size_dict.keys()) + if self._periods[0] <= 0: + raise ValueError("The minimum period in arg ``period_group_size_dict`` must be a positive value.") + elif self._periods[-1] == 1: + warnings.warn( + "When the maximum period in arg ``period_group_size_dict`` is 1, " + "no need to use model averaging because the communication cost " + "of all-reducing parameters will be no less than the cost of all-reducing gradients " + "by DistributedDataParallel in the backward pass. Therefore, only " + "DistributedDataParallel should be used for this case." + ) + overall_group_size = dist.get_world_size(group=self.process_group) + if list(period_group_size_dict.values())[-1] != overall_group_size: + raise ValueError( + f"The last value in arg ``period_process_group_dict`` {list(period_group_size_dict.values())[-1]} " + f"must be equal to the size of arg ``process_group`` {overall_group_size}." + ) + + self.period_process_group_dict = OrderedDict() + logger.info("Model averaging hierarchy:") + for period, group_size in period_group_size_dict.items(): + logger.info( + "\tEach group that has %s processes average parameters every %s iterations, " + "if no higher-level averaging.", group_size, period) + if group_size != overall_group_size: + self.period_process_group_dict[period], _ = dist.new_subgroups( + group_size=group_size, group=self.process_group) + else: + self.period_process_group_dict[period] = self.process_group + + if warmup_steps < 0: + raise ValueError("Arg ``warmup_steps`` must be a non-negative number.") + self.warmup_steps = warmup_steps + + def _find_process_group(self): + """ + Return a process group as the value of an ``period_process_group_dict`` entry. + + If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``, + then the returned process group is the one corresponding to the largest period, + since this process group will be used for averaging parameters at this ``step``. + Returns ``None`` if not found. + """ + for period in reversed(self._periods): + if self.step % period == 0: + return self.period_process_group_dict[period] + return None + + def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]): + """ + Averages parameters or parameter groups of an optimizer. + + Averaging only occurs if ``step`` is no less than ``warmup_steps`` + and it can be divided by a period in the keys of ``period_process_group_dict``, + where ``step`` is increased by 1 at each iteration in the training loop. + If ``step`` can be divided by multiple periods in the keys of ``period_process_group_dict``, + only the largest period is used, and the corresponding process group is used for averaging parameters. + Args: + params: The parameters of a model or parameter groups of an optimizer. + """ + if self.step >= self.warmup_steps: + group = self._find_process_group() + if group is not None: + utils.average_parameters_or_parameter_groups(params, group) + self.step += 1 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b50157739ba45e1cbb50bde1063bf3f5001bdad6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdbaf6dadf8a17d5b35cfd7d103c90e9073ae0d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc20cfc17bf2113fdb1b977dbfa7fa8344a24dab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90702e3eb2a6d517fcae50b1316ffcd2d5bf3537 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db6cb639ef1c6efd200f2f415e965a3a84f6d5b9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py @@ -0,0 +1,134 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Module contains events processing mechanisms that are integrated with the standard python logging. + +Example of usage: + +:: + + from torch.distributed.elastic import events + event = events.Event(name="test_event", source=events.EventSource.WORKER, metadata={...}) + events.get_logging_handler(destination="console").info(event) + +""" + +import inspect +import logging +import os +import socket +import traceback +from enum import Enum +from typing import Dict, Optional + +from torch.distributed.elastic.events.handlers import get_logging_handler + +from .api import ( # noqa: F401 + Event, + EventMetadataValue, + EventSource, + NodeState, + RdzvEvent, +) + +_events_loggers: Dict[str, logging.Logger] = {} + +def _get_or_create_logger(destination: str = "null") -> logging.Logger: + """ + Construct python logger based on the destination type or extends if provided. + + Available destination could be found in ``handlers.py`` file. + The constructed logger does not propagate messages to the upper level loggers, + e.g. root logger. This makes sure that a single event can be processed once. + + Args: + destination: The string representation of the event handler. + Available handlers found in ``handlers`` module + """ + global _events_loggers + + if destination not in _events_loggers: + _events_logger = logging.getLogger(f"torchelastic-events-{destination}") + _events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO")) + # Do not propagate message to the root logger + _events_logger.propagate = False + + logging_handler = get_logging_handler(destination) + _events_logger.addHandler(logging_handler) + + # Add the logger to the global dictionary + _events_loggers[destination] = _events_logger + + return _events_loggers[destination] + + +def record(event: Event, destination: str = "null") -> None: + _get_or_create_logger(destination).info(event.serialize()) + +def record_rdzv_event(event: RdzvEvent) -> None: + _get_or_create_logger("dynamic_rendezvous").info(event.serialize()) + + +def construct_and_record_rdzv_event( + run_id: str, + message: str, + node_state: NodeState, + name: str = "", + hostname: str = "", + pid: Optional[int] = None, + master_endpoint: str = "", + local_id: Optional[int] = None, + rank: Optional[int] = None, +) -> None: + # We don't want to perform an extra computation if not needed. + if isinstance(get_logging_handler("dynamic_rendezvous"), logging.NullHandler): + return + + # Set up parameters. + if not hostname: + hostname = socket.getfqdn() + if not pid: + pid = os.getpid() + + # Determines which file called this function. + callstack = inspect.stack() + filename = "no_file" + if len(callstack) > 1: + stack_depth_1 = callstack[1] + filename = os.path.basename(stack_depth_1.filename) + if not name: + name = stack_depth_1.function + + # Delete the callstack variable. If kept, this can mess with python's + # garbage collector as we are holding on to stack frame information in + # the inspect module. + del callstack + + # Set up error trace if this is an exception + if node_state == NodeState.FAILED: + error_trace = traceback.format_exc() + else: + error_trace = "" + + # Initialize event object + event = RdzvEvent( + name=f"{filename}:{name}", + run_id=run_id, + message=message, + hostname=hostname, + pid=pid, + node_state=node_state, + master_endpoint=master_endpoint, + rank=rank, + local_id=local_id, + error_trace=error_trace, + ) + + # Finally, record the event. + record_rdzv_event(event) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7c16e3fd808db47d257158cedb4aad185d41e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import Dict + + +_log_handlers: Dict[str, logging.Handler] = { + "console": logging.StreamHandler(), + "dynamic_rendezvous": logging.NullHandler(), + "null": logging.NullHandler(), +} + + +def get_logging_handler(destination: str = "null") -> logging.Handler: + global _log_handlers + return _log_handlers[destination] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..767abcc1d60b233049f082afc35d047cee1ca742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py @@ -0,0 +1,163 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +"""Metrics API. + +**Overview**: + +The metrics API in torchelastic is used to publish telemetry metrics. +It is designed to be used by torchelastic's internal modules to +publish metrics for the end user with the goal of increasing visibility +and helping with debugging. However you may use the same API in your +jobs to publish metrics to the same metrics ``sink``. + +A ``metric`` can be thought of as timeseries data +and is uniquely identified by the string-valued tuple +``(metric_group, metric_name)``. + +torchelastic makes no assumptions about what a ``metric_group`` is +and what relationship it has with ``metric_name``. It is totally up +to the user to use these two fields to uniquely identify a metric. + +.. note:: The metric group ``torchelastic`` is reserved by torchelastic for + platform level metrics that it produces. + For instance torchelastic may output the latency (in milliseconds) + of a re-rendezvous operation from the agent as + ``(torchelastic, agent.rendezvous.duration.ms)`` + +A sensible way to use metric groups is to map them to a stage or module +in your job. You may also encode certain high level properties +the job such as the region or stage (dev vs prod). + +**Publish Metrics**: + +Using torchelastic's metrics API is similar to using python's logging +framework. You first have to configure a metrics handler before +trying to add metric data. + +The example below measures the latency for the ``calculate()`` function. + +:: + + import time + import torch.distributed.elastic.metrics as metrics + + # makes all metrics other than the one from "my_module" to go /dev/null + metrics.configure(metrics.NullMetricsHandler()) + metrics.configure(metrics.ConsoleMetricsHandler(), "my_module") + + def my_method(): + start = time.time() + calculate() + end = time.time() + metrics.put_metric("calculate_latency", int(end-start), "my_module") + +You may also use the torch.distributed.elastic.metrics.prof` decorator +to conveniently and succinctly profile functions + +:: + + # -- in module examples.foobar -- + + import torch.distributed.elastic.metrics as metrics + + metrics.configure(metrics.ConsoleMetricsHandler(), "foobar") + metrics.configure(metrics.ConsoleMetricsHandler(), "Bar") + + @metrics.prof + def foo(): + pass + + class Bar(): + + @metrics.prof + def baz(): + pass + +``@metrics.prof`` will publish the following metrics +:: + + .success - 1 if the function finished successfully + .failure - 1 if the function threw an exception + .duration.ms - function duration in milliseconds + +**Configuring Metrics Handler**: + +`torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting +the added metric values to a particular destination. Metric groups can be +configured with different metric handlers. + +By default torchelastic emits all metrics to ``/dev/null``. +By adding the following configuration metrics, +``torchelastic`` and ``my_app`` metric groups will be printed out to +console. + +:: + + import torch.distributed.elastic.metrics as metrics + + metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic") + metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app") + +**Writing a Custom Metric Handler**: + +If you want your metrics to be emitted to a custom location, implement +the `torch.distributed.elastic.metrics.MetricHandler` interface +and configure your job to use your custom metric handler. + +Below is a toy example that prints the metrics to ``stdout`` + +:: + + import torch.distributed.elastic.metrics as metrics + + class StdoutMetricHandler(metrics.MetricHandler): + def emit(self, metric_data): + ts = metric_data.timestamp + group = metric_data.group_name + name = metric_data.name + value = metric_data.value + print(f"[{ts}][{group}]: {name}={value}") + + metrics.configure(StdoutMetricHandler(), group="my_app") + +Now all metrics in the group ``my_app`` will be printed to stdout as: + +:: + + [1574213883.4182858][my_app]: my_metric= + [1574213940.5237644][my_app]: my_metric= + +""" + +from typing import Optional + +from .api import ( # noqa: F401 + ConsoleMetricHandler, + MetricData, + MetricHandler, + MetricsConfig, + NullMetricHandler, + configure, + get_elapsed_time_ms, + getStream, + prof, + profile, + publish_metric, + put_metric, +) + + +def initialize_metrics(cfg: Optional[MetricsConfig] = None): + pass + + +try: + from torch.distributed.elastic.metrics.static_init import * # type: ignore[import] # noqa: F401 F403 +except ModuleNotFoundError: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b25fec6e37caa1d0419364e2086f429164c314b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e17a128acfe639ed120e3da8ddeae49f6b841646 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py new file mode 100644 index 0000000000000000000000000000000000000000..1499943c78d24d0fdaac31526318c3067743c79c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import time +import warnings +from collections import namedtuple +from functools import wraps +from typing import Dict, Optional + +__all__ = ['MetricsConfig', 'MetricHandler', 'ConsoleMetricHandler', 'NullMetricHandler', 'MetricStream', + 'configure', 'getStream', 'prof', 'profile', 'put_metric', 'publish_metric', 'get_elapsed_time_ms', + 'MetricData'] + +MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"]) + + +class MetricsConfig: + __slots__ = ["params"] + + def __init__(self, params: Optional[Dict[str, str]] = None): + self.params = params + if self.params is None: + self.params = {} + + +class MetricHandler(abc.ABC): + @abc.abstractmethod + def emit(self, metric_data: MetricData): + pass + + +class ConsoleMetricHandler(MetricHandler): + def emit(self, metric_data: MetricData): + print( + f"[{metric_data.timestamp}][{metric_data.group_name}]: {metric_data.name}={metric_data.value}" + ) + + +class NullMetricHandler(MetricHandler): + def emit(self, metric_data: MetricData): + pass + + +class MetricStream: + def __init__(self, group_name: str, handler: MetricHandler): + self.group_name = group_name + self.handler = handler + + def add_value(self, metric_name: str, metric_value: int): + self.handler.emit( + MetricData(time.time(), self.group_name, metric_name, metric_value) + ) + + +_metrics_map: Dict[str, MetricHandler] = {} +_default_metrics_handler: MetricHandler = NullMetricHandler() + + +# pyre-fixme[9]: group has type `str`; used as `None`. +def configure(handler: MetricHandler, group: Optional[str] = None): + if group is None: + global _default_metrics_handler + # pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used + # as `MetricHandler`. + _default_metrics_handler = handler + else: + _metrics_map[group] = handler + + +def getStream(group: str): + if group in _metrics_map: + handler = _metrics_map[group] + else: + handler = _default_metrics_handler + return MetricStream(group, handler) + + +def _get_metric_name(fn): + qualname = fn.__qualname__ + split = qualname.split(".") + if len(split) == 1: + module = fn.__module__ + if module: + return module.split(".")[-1] + "." + split[0] + else: + return split[0] + else: + return qualname + + +def prof(fn=None, group: str = "torchelastic"): + r""" + @profile decorator publishes duration.ms, count, success, failure metrics for the function that it decorates. + + The metric name defaults to the qualified name (``class_name.def_name``) of the function. + If the function does not belong to a class, it uses the leaf module name instead. + + Usage + + :: + + @metrics.prof + def x(): + pass + + @metrics.prof(group="agent") + def y(): + pass + """ + + def wrap(f): + @wraps(f) + def wrapper(*args, **kwargs): + key = _get_metric_name(f) + try: + start = time.time() + result = f(*args, **kwargs) + put_metric(f"{key}.success", 1, group) + except Exception: + put_metric(f"{key}.failure", 1, group) + raise + finally: + put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group) # type: ignore[possibly-undefined] + return result + + return wrapper + + if fn: + return wrap(fn) + else: + return wrap + + +def profile(group=None): + """ + @profile decorator adds latency and success/failure metrics to any given function. + + Usage + + :: + + @metrics.profile("my_metric_group") + def some_function(): + """ + warnings.warn("Deprecated, use @prof instead", DeprecationWarning) + + def wrap(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + start_time = time.time() + result = func(*args, **kwargs) + publish_metric(group, f"{func.__name__}.success", 1) + except Exception: + publish_metric(group, f"{func.__name__}.failure", 1) + raise + finally: + publish_metric( + group, + f"{func.__name__}.duration.ms", + get_elapsed_time_ms(start_time), # type: ignore[possibly-undefined] + ) + return result + + return wrapper + + return wrap + + +def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"): + """ + Publish a metric data point. + + Usage + + :: + + put_metric("metric_name", 1) + put_metric("metric_name", 1, "metric_group_name") + """ + getStream(metric_group).add_value(metric_name, metric_value) + + +def publish_metric(metric_group: str, metric_name: str, metric_value: int): + warnings.warn( + "Deprecated, use put_metric(metric_group)(metric_name, metric_value) instead" + ) + metric_stream = getStream(metric_group) + metric_stream.add_value(metric_name, metric_value) + + +def get_elapsed_time_ms(start_time_in_seconds: float): + """Return the elapsed time in millis from the given start time.""" + end_time = time.time() + return int((end_time - start_time_in_seconds) * 1000) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..668849fa6d2078a9da89c5049b0706ac072984a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py @@ -0,0 +1,150 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +In the context of Torch Distributed Elastic we use the term *rendezvous* to +refer to a particular functionality that combines a **distributed +synchronization** primitive with **peer discovery**. + +It is used by Torch Distributed Elastic to gather participants of a training +job (i.e. nodes) such that they all agree on the same list of participants and +everyone's roles, as well as make a consistent collective decision on when +training can begin/resume. + +Torch Distributed Elastic rendezvous provides the following critical +functionalities: + +**Barrier**: + +Nodes performing rendezvous will all block until the rendezvous is considered +complete - this happens when at least ``min`` total number of nodes have joined +the rendezvous barrier (for the same job). This also implies the barrier is not +necessarily of fixed size. + +There's an additional small waiting time after reaching ``min`` number of +nodes - this is used to ensure the rendezvous is not completed "too quickly" +(which could potentially exclude additional nodes attempting to join at +approximately the same time). + +If ``max`` number of nodes is gathered at the barrier, the rendezvous is +completed immediately. + +There's also an overall timeout which causes the rendezvous to fail if ``min`` +number of nodes is never reached - this is meant to be a simple fail-safe to +help release partially allocated job resources, in case there's a problem with +the resource manager, and is meant to be interpreted as non-retryable. + +**Exclusivity**: + +A simple distributed barrier would not be sufficient, as we also need to ensure +that only one group of nodes exists at any given time (for a given job). In +other words, new nodes (i.e. joining late) should not be able to form a parallel +independent group of workers for the same job. + +Torch Distributed Elastic rendezvous ensures that if a group of nodes has +already completed a rendezvous (and hence might already be training), then +additional "late" nodes attempting to rendezvous will only announce themselves +as waiting, and will have to wait until the (previously completed) existing +rendezvous is destroyed first. + +**Consistency**: + +When a rendezvous is completed, all its members will agree on the job membership +and everyone's role in it. This role is represented using an integer, called +rank, that is between between 0 and world size. + +Note that ranks are *not stable*, in the sense that the same node can be +assigned a different rank in the next (re-)rendezvous. + +**Fault-tolerance**: + +Torch Distributed Elastic rendezvous is designed to tolerate node failures +during the rendezvous process. Should a process crash (or lose network +connectivity, etc), between joining the rendezvous and it being completed, then +a re-rendezvous with remaining healthy nodes will happen automatically. + +A node can also fail *after* it has completed (or *has been observered* by other +nodes to have completed) the rendezvous - this scenario will be handled by the +Torch Distributed Elastic ``train_loop`` instead (where it will also trigger a +re-rendezvous). + +**Shared key-value store**: + +When the rendezvous is completed, a shared key-value store is created and +returned. This store implements a ``torch.distributed.Store`` API (see +`distributed communication docs +`__). + +This store is only shared by the members of the completed rendezvous. It +is intended to be used by Torch Distributed Elastic to exchange information +necessary to initialize job control and data-planes. + +**Waiting workers and rendezvous closing**: + +Torch Distributed Elastic rendezvous handler object provides additional +functionalities, which are technically not part of the rendezvous process: + +1. Querying how many workers arrived late at the barrier, who can participate in + *next* rendezvous. + +2. Setting the rendezvous *closed* to signal all nodes not to participate in + next rendezvous. + +**DynamicRendezvousHandler**: + +Torch Distributed Elastic comes with the :py:class:`.DynamicRendezvousHandler` +class that implements the rendezvous mechanism described above. It is a backend- +agnostic type that expects a particular :py:class:`.RendezvousBackend` instance +to be specified during construction. + +Torch distributed users can either implement their own backend type or use one +of the following implementations that come with PyTorch: + +- :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default + ``TCPStore``) as the rendezvous backend. The main advantage of using a C10d + store is that it requires no 3rd-party dependency (such as etcd) to establish + a rendezvous. +- :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy + :py:class:`.EtcdRendezvousHandler` class. Passing an + :py:class:`.EtcdRendezvousBackend` instance to + :py:class:`.DynamicRendezvousHandler` is functionally equivalent to + instantiating an :py:class:`.EtcdRendezvousHandler`. + + :: + + store = TCPStore("localhost") + + backend = C10dRendezvousBackend(store, "my_run_id") + + rdzv_handler = DynamicRendezvousHandler.from_backend( + run_id="my_run_id", + store=store, + backend=backend, + min_nodes=2, + max_nodes=4 + ) +""" + +from .api import * # noqa: F403 +from .registry import _register_default_handlers + + +_register_default_handlers() + + +__all__ = [ + "RendezvousClosedError", + "RendezvousConnectionError", + "RendezvousError", + "RendezvousGracefulExitError", + "RendezvousHandler", + "RendezvousHandlerCreator", + "RendezvousHandlerRegistry", + "RendezvousParameters", + "RendezvousStateError", + "RendezvousTimeoutError", + "rendezvous_handler_registry", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8f6d738f13ea316702e2de43c8ec7516afb0c96 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eedc4bc41d72318ca07d271d140bc384f3db3f47 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb33fe807ea9a29ab69c3bf84d3ae1e284ecf183 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/dynamic_rendezvous.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9c59655ee4ae323f4b71fa9c4296803b1f62e6d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4539c131e1cea3e476b0b560ac309d120307d8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_rendezvous_backend.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80d04c9f3d7562e1f5f23c7e0edaa789fd8ca4e6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_store.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..990169b9eed2b86ebcd4680aa090da7945c2f30c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/static_tcp_rendezvous.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6105edd6c638e90679e0a460ee2eaba8def8a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py new file mode 100644 index 0000000000000000000000000000000000000000..9e096c4ee409028cb7bb0ad6796ab6295d35ef22 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/api.py @@ -0,0 +1,277 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, Optional, Tuple + +from torch.distributed import Store + + +class RendezvousError(Exception): + """Represents the base type for rendezvous errors.""" + + +class RendezvousClosedError(RendezvousError): + """Raised when a rendezvous is closed.""" + + +class RendezvousTimeoutError(RendezvousError): + """Raised when a rendezvous did not complete on time.""" + + +class RendezvousConnectionError(RendezvousError): + """Raised when the connection to a rendezvous backend has failed.""" + + +class RendezvousStateError(RendezvousError): + """Raised when the state of a rendezvous is corrupt.""" + +class RendezvousGracefulExitError(RendezvousError): + """Raised when node wasn't not included in rendezvous and gracefully exits. + + Exception is a mechanism to exit the stack, however does not mean a failure. + """ + +class RendezvousHandler(ABC): + """Main rendezvous interface. + + Note: + Distributed Torch users normally **do not** need to implement their own + ``RendezvousHandler``. An implementation based on C10d Store is already + provided, and is recommended for most users. + """ + + @abstractmethod + def get_backend(self) -> str: + """Return the name of the rendezvous backend.""" + + @abstractmethod + def next_rendezvous( + self, + ) -> Tuple[Store, int, int]: + """Main entry-point into the rendezvous barrier. + + Blocks until the rendezvous is complete and the current process is + included in the formed worker group, or a timeout occurs, or the + rendezvous was marked closed. + + Returns: + A tuple of :py:class:`torch.distributed.Store`, ``rank``, and + ``world size``. + + Raises: + RendezvousClosedError: + The rendezvous is closed. + RendezvousConnectionError: + The connection to the rendezvous backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + RendezvousTimeoutError: + The rendezvous did not complete on time. + """ + + @abstractmethod + def is_closed(self) -> bool: + """Check whether the rendezvous has been closed. + + A closed rendezvous means all future attempts to re-rendezvous within + same job will fail. + + ``is_closed()`` and :py:meth:`set_closed` have semantics of eventual + propagation and should not be used for synchronization. The intention is + that if at least one node decides the job is finished, it will close the + rendezvous, and other nodes will soon observe this and stop running as + well. + """ + + @abstractmethod + def set_closed(self): + """Mark the rendezvous as closed.""" + + @abstractmethod + def num_nodes_waiting(self) -> int: + """Return the number of nodes who arrived late at the rendezvous + barrier, hence were not included in the current worker group. + + Callers should periodically call this method to check whether new + nodes are waiting to join the job and if so admit them by calling + :py:meth:`next_rendezvous()` (re-rendezvous). + """ + + @abstractmethod + def get_run_id(self) -> str: + """Return the run id of the rendezvous. + + The run id is a user-defined id that uniquely identifies an instance of + a distributed application. It typically maps to a job id and is used to + allow nodes to join the correct distributed application. + """ + + @abstractmethod + def shutdown(self) -> bool: + """Close all resources that were open for the rendezvous. + + Example:: + + rdzv_handler = ... + try: + store, rank, world_size = rdzv_handler.next_rendezvous() + finally: + rdzv_handler.shutdown() + """ + + +class RendezvousParameters: + """Hold the parameters to construct a :py:class:`RendezvousHandler`. + + Args: + backend: + The name of the backend to use to handle the rendezvous. + endpoint: + The endpoint of the rendezvous, usually in form [:]. + run_id: + The id of the rendezvous. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + local_addr: + The address of the local node. + **kwargs: + Additional parameters for the specified backend. + """ + + def __init__( + self, + backend: str, + endpoint: str, + run_id: str, + min_nodes: int, + max_nodes: int, + local_addr: Optional[str] = None, + **kwargs, + ): + if not backend: + raise ValueError("The rendezvous backend name must be a non-empty string.") + + if min_nodes < 1: + raise ValueError( + f"The minimum number of rendezvous nodes ({min_nodes}) must be greater than zero." + ) + if max_nodes < min_nodes: + raise ValueError( + f"The maximum number of rendezvous nodes ({max_nodes}) must be greater than or " + f"equal to the minimum number of rendezvous nodes ({min_nodes})." + ) + + self.backend = backend + self.endpoint = endpoint + self.run_id = run_id + self.min_nodes = min_nodes + self.max_nodes = max_nodes + self.config = kwargs + self.local_addr = local_addr + + def get(self, key: str, default: Any = None) -> Any: + """Return the value for ``key`` if ``key`` exists, else ``default``.""" + return self.config.get(key, default) + + def get_as_bool(self, key: str, default: Optional[bool] = None) -> Optional[bool]: + """Return the value for ``key`` as a ``bool``.""" + value = self.get(key, default) + if value is None or isinstance(value, bool): + return value + if isinstance(value, int): + if value == 1: + return True + if value == 0: + return False + elif isinstance(value, str): + if value.lower() in ["1", "true", "t", "yes", "y"]: + return True + if value.lower() in ["0", "false", "f", "no", "n"]: + return False + raise ValueError( + f"The rendezvous configuration option '{key}' does not represent a valid boolean value." + ) + + def get_as_int(self, key: str, default: Optional[int] = None) -> Optional[int]: + """Return the value for ``key`` as an ``int``.""" + value = self.get(key, default) + if value is None: + return value + try: + return int(value) + except ValueError as e: + raise ValueError( + f"The rendezvous configuration option '{key}' does not represent a valid integer " + "value." + ) from e + + +RendezvousHandlerCreator = Callable[[RendezvousParameters], RendezvousHandler] + + +class RendezvousHandlerRegistry: + """Represent a registry of :py:class:`RendezvousHandler` backends.""" + + _registry: Dict[str, RendezvousHandlerCreator] + + def __init__(self) -> None: + self._registry = {} + + def register(self, backend: str, creator: RendezvousHandlerCreator) -> None: + """Register a new rendezvous backend. + + Args: + backend: + The name of the backend. + creator: + The callback to invoke to construct the + :py:class:`RendezvousHandler`. + """ + if not backend: + raise ValueError("The rendezvous backend name must be a non-empty string.") + + current_creator: Optional[RendezvousHandlerCreator] + try: + current_creator = self._registry[backend] + except KeyError: + current_creator = None + + if current_creator is not None and current_creator != creator: + raise ValueError( + f"The rendezvous backend '{backend}' cannot be registered with '{creator}' as it " + f"is already registered with '{current_creator}'." + ) + + self._registry[backend] = creator + + def create_handler(self, params: RendezvousParameters) -> RendezvousHandler: + """Create a new :py:class:`RendezvousHandler`.""" + try: + creator = self._registry[params.backend] + except KeyError as e: + raise ValueError( + f"The rendezvous backend '{params.backend}' is not registered. Did you forget " + f"to call `{self.register.__name__}`?" + ) from e + + handler = creator(params) + + # Do some sanity check. + if handler.get_backend() != params.backend: + raise RuntimeError( + f"The rendezvous backend '{handler.get_backend()}' does not match the requested " + f"backend '{params.backend}'." + ) + + return handler + + +# The default global registry instance used by launcher scripts to instantiate +# rendezvous handlers. +rendezvous_handler_registry = RendezvousHandlerRegistry() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..dab767925037bea5b94e8292fbbd6cb791951011 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous.py @@ -0,0 +1,1045 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import json +import logging +import sys +import threading +import time +from typing import Optional + +import etcd # type: ignore[import] +from torch.distributed.elastic.rendezvous import ( + RendezvousClosedError, + RendezvousError, + RendezvousHandler, + RendezvousParameters, + RendezvousTimeoutError, +) + +from .utils import parse_rendezvous_endpoint +from .etcd_store import EtcdStore, cas_delay + + +_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s") +_log_handler = logging.StreamHandler(sys.stderr) +_log_handler.setFormatter(_log_fmt) + +log = logging.getLogger(__name__) +log.propagate = False +log.setLevel(logging.INFO) +log.addHandler(_log_handler) + + +# Retryable failure exception means the we were too late to make +# a desired state transition (e.g. because of a race condition), +# and should now restart from the beginning. +# A small delay is recommended to avoid spamming Etcd. +class EtcdRendezvousRetryableFailure(Exception): + pass + + +# Similar to retryable failure, but the new state we observed suggests we +# can re-try immediately, i.e. without a need for "safety delay". +class EtcdRendezvousRetryImmediately(Exception): + pass + + +# Default timeout for the rendezvous. +_DEFAULT_TIMEOUT: int = 600 # 10 minutes + +# Additional waiting time after reaching the minimum number of nodes +# in case the rendezvous is elastic (min != max). +_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds + +# Various constants used internally in EtcdRendezvous +CONST_ETCD_SETUP_TTL = 5 +CONST_ETCD_FROZEN_TTL = 10 +CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10 + +# Ephemeral node TTL for worker's keep-alive key: +CONST_WORKER_KEEPALIVE_TTL = 10 + +# TTL for the ephemeral run_id-specific directory. All rendezvous state data +# for a specific run_id (job instance) is contained within directory. +# Its only role is to clean-up rendezvous data from old runs (for the case when +# etcd server is persistent), and has no affect on correctness, but should be +# larger than any timeouts that a worker process is expected to survive: +CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours + + +class EtcdRendezvousHandler(RendezvousHandler): + """ + Implements a + :py:class:`torch.distributed.elastic.rendezvous.RendezvousHandler` interface + backed by + :py:class:`torch.distributed.elastic.rendezvous.etcd_rendezvous.EtcdRendezvous`. + ``EtcdRendezvousHandler`` uses a URL to configure the type of rendezvous to + use and to pass implementation specific configurations to the rendezvous + module. The basic etcd rendezvous configuration URL looks like the following + :: + + etcd://:/?min_workers=&max_workers= # noqa: W605 + + -- example -- + + etcd://localhost:2379/1234?min_workers=1&max_workers=3 + + The URL above is interpreted as follows: + + 1. Use the rendezvous handler that is registered with the ``etcd`` + scheme + 2. The ``etcd`` endpoint to use is ``localhost:2379`` + 3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to + share a common etcd server for multiple jobs so long as the + ``job_ids`` are guaranteed to be unique). Note that the job id can be + any string (e.g. does not need to be a number) as long as it is + unique. + 4. ``min_workers=1`` and ``max_workers=3`` specifies a range for + membership size - Torch Distributed Elastic starts running the job as + long as the cluster size is greater than or equal to ``min_workers`` + and admits up to ``max_workers`` into the cluster. + + Below are a full list of the parameters that can be passed to etcd + rendezvous: + + +--------------------------------------------+--------------------------+ + | Parameter | Description | + +============================================+==========================+ + | min_workers | minimum number of | + | | workers for the | + | | rendezvous to be valid | + +--------------------------------------------+--------------------------+ + | max_workers | maximum number of | + | | workers to admit | + +--------------------------------------------+--------------------------+ + | timeout | total timeout within | + | | which next_rendezvous is | + | | expected to succeed | + | | (default 600s) | + +--------------------------------------------+--------------------------+ + | last_call_timeout | additional wait amount | + | | (“last call”) after min | + | | number of workers has | + | | been reached (defaults | + | | to 30s) | + +--------------------------------------------+--------------------------+ + | etcd_prefix | path prefix (from etcd | + | | root), inside which all | + | | etcd nodes will be | + | | created (defaults to | + | | ``/torchelastic/p2p``) | + +--------------------------------------------+--------------------------+ + """ + + def __init__(self, rdzv_impl): + self._rdzv_impl = rdzv_impl + + def __del__(self): + # TODO: look into using weakref here instead. + del self._rdzv_impl + + def get_backend(self) -> str: + return "etcd" + + def next_rendezvous(self): + rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier() + + log.info("Creating EtcdStore as the c10d::Store implementation") + store = self._rdzv_impl.setup_kv_store(rdzv_version) + + return store, rank, world_size + + def is_closed(self): + try: + _, state = self._rdzv_impl.get_rdzv_state() + return state["status"] == "closed" + except etcd.EtcdKeyNotFound: + # No rendezvous state, so it cannot be closed. + return False + + def set_closed(self): + self._rdzv_impl.set_closed() + + def num_nodes_waiting(self): + try: + _, state = self._rdzv_impl.get_rdzv_state() + if state["status"] == "final": + return state["num_workers_waiting"] + except etcd.EtcdKeyNotFound: + pass + return 0 + + def get_run_id(self) -> str: + return self._rdzv_impl._run_id + + def shutdown(self) -> bool: + try: + self.set_closed() + return True + except BaseException as e: + log.warning("Shutdown failed. Error occurred: %s", str(e)) + return False + + +# TODO: we should probably handle a few additional errors, +# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are +# only relevant for multi-node Etcd ensemble. A simple retry would work, +# but is verbose to add everywhere. Consider wrapping the client calls +# into auto-retry for these errors? +# +class EtcdRendezvous: + """A rendezvous implementation that uses `etcd `__ as the backend store.""" + + def __init__( + self, + client, + prefix, + run_id, + num_min_workers, + num_max_workers, + timeout, + last_call_timeout, + ): + self.client = client + log.info("Etcd machines: %s", self.client.machines) + + self._prefix = prefix + self._run_id = run_id + self._num_min_workers = num_min_workers + self._num_max_workers = num_max_workers + self._timeout = timeout + self._last_call_timeout = last_call_timeout + + # For cleaning up TTL refresher threads (for ephemeral keys) + self._lease_run_id_stop = None + self._lease_this_rank_stop = None + + if not self._prefix.endswith("/"): + self._prefix += "/" + + # Setup a permanent prefix dir, if didn't exist + if self._prefix != "/": + self.create_path_if_not_exists(self._prefix) + + # Lease a "sub-root" node specific to this job instance (run_id) + self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL) + self._lease_run_id_stop = self.setup_lease_renewal( + self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL + ) + + # Subdir for all rendezvous work + self.create_path_if_not_exists(self.get_path("/rdzv")) + + # Create a rendezvous version counter, if doesn't exist + try: + self.client.write( + key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False + ) + except etcd.EtcdAlreadyExist: + pass + + def __del__(self): + # TODO: look into using weakref here instead. + if self._lease_run_id_stop is not None: + self._lease_run_id_stop.set() + + if self._lease_this_rank_stop is not None: + self._lease_this_rank_stop.set() + + def rendezvous_barrier(self): + """ + Main entry point for next rendezvous. + + This method is blocking until rendezvous succeeds or a timeout occurs. + + Returns: + ``(rdzv_version, rank, world_size)`` + + Raises: + RendezvousTimeoutError - timeout waiting for rendezvous + RendezvousClosedError - rendezvous is or was closed while waiting + RendezvousError - other persistent errors that + render the rendezvous non-retryable + """ + self._rendezvous_deadline = time.time() + self._timeout + while True: + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + + log.info("Attempting to join next rendezvous") + try: + # Dis-own our lease in the previous rendezvous, if exists + if self._lease_this_rank_stop is not None: + self._lease_this_rank_stop.set() + + return self.init_phase() + + except EtcdRendezvousRetryImmediately: + # The type of failure suggests we can retry without delay + pass + + except EtcdRendezvousRetryableFailure: + # In case of retryable failure, wait a small delay + # to avoid spamming etcd + time.sleep(1) + + except RendezvousTimeoutError: + log.info("Rendezvous timeout occurred in EtcdRendezvousHandler") + raise + + except RendezvousClosedError: + log.info( + "Rendezvous for run_id=%s was observed to be closed", self._run_id + ) + raise + + except RendezvousError: + raise + + except Exception as e: + # In case of a general exception, wait a small delay + # to avoid spamming etcd + # FIXME: there are a few things that fall under this like + # etcd.EtcdKeyNotFound, etc, which could be handled more explicitly. + log.info("Rendezvous attempt failed, will retry. Reason: %s", e) + time.sleep(1) + + def init_phase(self): + """ + Initially, the rendezvous state is expected to be one of: + + 1. empty (non-existent) - in this case we try to create a new one. + 2. joinable - we try to join it. + 3. final - we announce ourselves as waiting, and go into monitoring mode + + Any other state is considered transitional, and will be retried after + a short delay. + + Returns: + ``(rdzv_version, rank, world_size)`` + + Raises: + RendezvousClosedError - current rendezvous was/is closed + EtcdRendezvousRetryableFailure - observed some intermediate + state, which is best handled by retrying later + """ + try: + active_version = self.try_create_rendezvous() + state = json.loads(active_version.value) + log.info("New rendezvous state created: %s", state) + except etcd.EtcdAlreadyExist: + active_version, state = self.get_rdzv_state() + # Note: it is possible for above query to fail (etcd.EtcdKeyNotFound), + # but this is ok for us - just means we'll restart from beginning. + log.info("Observed existing rendezvous state: %s", state) + + if state["status"] == "closed": + raise RendezvousClosedError() + + if state["status"] == "joinable": + return self.join_phase(state["version"]) + + if state["status"] == "final": + self.handle_existing_rendezvous(state["version"]) + raise EtcdRendezvousRetryImmediately() + + self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1) + raise EtcdRendezvousRetryableFailure() + + def join_phase(self, expected_version): + """ + We observed a rendezvous state in 'joinable' state, and attempt to join this + particular version, and then wait for all other peers to join. + """ + # Failure to join will propagate an exception, causing a re-entry. + active_version, this_rank = self.join_rendezvous(expected_version) + state = json.loads(active_version.value) + log.info( + "Joined rendezvous version %s as rank %s. Full state: %s", + state["version"], this_rank, state + ) + + # If this worker was first to reach num_min_workers requirement, + # and rendezvous is still joinable (therefore it is elastic), + # then this worker will be responsible for waiting out the "last call" + # timeout and closing (i.e. transitioning to 'frozen') the rendezvous + # afterwards. + # As a safety against a potential failure of this worker (during the + # last call timeout), the rendezvous state is made ephemeral + # when min_num_workers is reached. + + if this_rank == self._num_min_workers - 1 and state["status"] == "joinable": + log.info("Rank %s is responsible for join last call.", this_rank) + last_call_deadline = time.time() + self._last_call_timeout + self.handle_join_last_call(expected_version, last_call_deadline) + log.info("Rank %s finished join last call.", this_rank) + + # Wait for rendezvous state to be frozen, which means a fixed set of peers + log.info("Waiting for remaining peers.") + active_version = self.wait_for_peers(expected_version) + state = json.loads(active_version.value) + + assert ( + state["version"] == expected_version + ), "Logic error: failed to observe version mismatch" + + return self.confirm_phase(expected_version, this_rank) + + def confirm_phase(self, expected_version, this_rank): + """ + Once the rendezvous state transitions from 'joinable' to 'frozen', + we have every participant confirm their membership and setup per-member + keep-alive TTL keys, and then wait for all other participants to confirm, + which would then successfully conclude this rendezvous. + """ + log.info("All peers arrived. Confirming membership.") + self.confirm_membership(expected_version, this_rank) + + log.info("Waiting for confirmations from all peers.") + active_version = self.wait_for_final(expected_version) + state = json.loads(active_version.value) + + log.info( + "Rendezvous version %s is complete. Final state: %s", + state["version"], state + ) + + # Rendezvous version number; our rank in it; world size + return state["version"], this_rank, len(state["participants"]) + + def handle_existing_rendezvous(self, expected_version): + """ + Handle the case when there's an existing (state 'final) rendezvous already + in place, and we have to announce ourselves waiting, and wait until + the next rendezvous opportunity. + """ + # If state is 'final' -> increment num_workers_waiting + # Then, observe state changes: + # 1. if it's no longer final -> bail out and re-try + # 2. if keep alives are missing, destroy it and bail out. + active_state = self.announce_self_waiting(expected_version) + log.info( + "Added self to waiting list. Rendezvous full state: %s", + active_state.value + ) + + self.wait_for_rendezvous_to_free(expected_version) + log.info("Previously existing rendezvous state changed. Will re-try joining.") + + def try_create_rendezvous(self): + """ + Create new rendezvous state or raise an exception that indicates an unexpected state (e.g. already exists). + + Raises: + RendezvousError - on unexpected state + """ + # Initially active_version is ephemeral - this is to handle the + # possibility that might fail to complete the setup transaction, + # i.e. the transition "setup" -> "joinable". + active_version = self.client.write( + key=self.get_path("/rdzv/active_version"), + value=json.dumps({"status": "setup"}), + prevExist=False, + ttl=CONST_ETCD_SETUP_TTL, + ) + + try: + version_counter = self.client.get(self.get_path("/rdzv/version_counter")) + version_counter.value = str(int(version_counter.value) + 1) + self.client.update(version_counter) + except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed) as e: + raise RendezvousError( + "Unexpected state of EtcdRendezvousHandler, worker needs to die." + ) from e + + # Any failure below results in declaring a retryable rendezvous failure. + # The ephemeral /rdzv/active_version will expire and someone can then + # re-try the setup process. + + # Create directory node for participant data + self.client.write( + key=self.get_path(f"/rdzv/v_{version_counter.value}"), + value=None, + dir=True, + prevExist=False, + ) + + # Publish rendezvous version and signal it is ready-to-be-joined. + # If rendezvous was set closed just before this, a retry will happen, + # where the closed condition will be handled. + return self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps( + { + "status": "joinable", + "version": version_counter.value, + "participants": [], + } + ), + prev_value=active_version.value, + ) + + def join_rendezvous(self, expected_version): + """Helper method for the join phase.""" + # Use compare-and-swap to add self to rendezvous state: + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "joinable": + raise EtcdRendezvousRetryableFailure( + "Rendezvous state became non-joinable before we could join. " + "Must join next one." + ) + + if state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately( + "Rendezvous version changed. Must try join the new one." + ) + + assert ( + len(state["participants"]) < self._num_max_workers + ), "Logic error: joinable rendezvous should always have space left" + + this_rank = len(state["participants"]) + state["participants"].append(this_rank) + + # When reaching min workers, or changing state to frozen, we'll set + # the active_version node to be ephemeral. + set_ttl: Optional[int] = None + if len(state["participants"]) == self._num_max_workers: + state["status"] = "frozen" + state["keep_alives"] = [] + set_ttl = CONST_ETCD_FROZEN_TTL + elif len(state["participants"]) >= self._num_min_workers: + set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL + + try: + # Compare-and-swap. + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=set_ttl, + ) + # We succeeded joining. + return active_version, this_rank + + except etcd.EtcdCompareFailed: + log.info("Join rendezvous CAS unsuccessful, retrying") + + def wait_for_peers(self, expected_version): + """Helper method for the join phase.""" + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "frozen" and state["version"] == expected_version: + # Success, all peers arrived. + return active_version + + elif state["status"] == "joinable" and state["version"] == expected_version: + # Continue waiting for any interesting events. + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1 + ) + + else: + # No valid transition possible at this point + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + def confirm_membership(self, expected_version, this_rank): + """Helper method for the confirm phase.""" + # Compare-and-swap loop + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "frozen": + raise EtcdRendezvousRetryImmediately( + "Rendezvous no longer frozen, before we confirmed. " + "Must join next one" + ) + if state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately( + "Rendezvous version changed. Must try join the new one." + ) + + this_lease_key = self.get_path( + f"/rdzv/v_{expected_version}/rank_{this_rank}" + ) + self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL) + + state["keep_alives"].append(this_lease_key) + if len(state["keep_alives"]) == len(state["participants"]): + # Everyone confirmed (this rank is last to do so) + state["status"] = "final" + state["num_workers_waiting"] = 0 + finalize = True + else: + finalize = False + + try: + # Compare-and-swap. If new state is still frozen, keep it ephemeral. + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=None if finalize else CONST_ETCD_FROZEN_TTL, + ) + + self._lease_this_rank_stop = self.setup_lease_renewal( + this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL + ) + return active_version + + except etcd.EtcdCompareFailed: + log.info("Confirm membership CAS unsuccessful, retrying") + + def wait_for_final(self, expected_version): + """Helper method for the confirm phase.""" + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "final" and state["version"] == expected_version: + # Success. This rendezvous is final, and we accept it. + return active_version + + elif state["status"] == "frozen" and state["version"] == expected_version: + # Continue waiting for any interesting events. + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1 + ) + + else: + # No valid transition possible at this point + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + def announce_self_waiting(self, expected_version): + """ + Announce this worker is waiting (via num_workers_waiting counter) to join next + rendezvous, but only if state and version match. + """ + while True: + cas_delay() + active_version, state = self.get_rdzv_state() + + if state["status"] != "final" or state["version"] != expected_version: + raise EtcdRendezvousRetryImmediately() + + # Increment counter to signal an additional waiting worker. + state["num_workers_waiting"] += 1 + + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ) + return active_version + + except etcd.EtcdCompareFailed: + log.info("Announce self as waiting CAS unsuccessful, retrying") + + def wait_for_rendezvous_to_free(self, expected_version): + """ + When there's an existing valid rendezvous in state 'final', we have to wait until the next opportunity to join. + + Such opportunity may come from: + + 1. rendezvous state changed by someone else, in which case we unblock and retry. + 2. rendezvous becomes invalid because at least one member failed to renew their + leased keep_alive node. We detect this, and destroy the rendezvous. + """ + active_version, state = self.get_rdzv_state() + while True: + if state["status"] != "final" or state["version"] != expected_version: + return + + # Check if current rendezvous state is valid, in the sense that all + # its members are alive (renewing their lease). + # If not, try destroy this rendezvous, so a new one can be created. + alive_members = self.client.get( + self.get_path(f"/rdzv/v_{expected_version}") + ) + keep_alive_keys = [ch.key for ch in alive_members.children] + + for key in state["keep_alives"]: + if key not in keep_alive_keys: + # This participant didn't renew their lease. We'll declare this + # rendezvous version as dead (but only if it hadn't changed) + log.info("Keep-alive key %s is not renewed.", key) + log.info( + "Rendezvous version %s is incomplete. ", + expected_version + ) + log.info("Attempting to destroy it.") + + # Compare-and-delete operation. Throws if compare failed, + # which means rendezvous was already destroyed/re-created/closed, + # and we can try to re-enter the barrier. + self.client.delete( + key=self.get_path("/rdzv/active_version"), + prevValue=active_version.value, + ) + + log.info( + "Destroyed rendezvous version %s successfully.", + expected_version + ) + + # We can return (and retry) immediately + return + + # Existing rendezvous seems valid, no reason to destroy it. + # We just have to wait until something changes and re-check. + try: + overall_timeout = ( + max(self._rendezvous_deadline - time.time(), 0.0) + 1.0 + ) + self.client.watch( + key=self.get_path("/rdzv"), + index=active_version.etcd_index + 1, + recursive=True, + timeout=overall_timeout, + ) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + active_version, state = self.get_rdzv_state() + + def handle_join_last_call(self, expected_version, deadline): + """ + After we reach min number of workers, one particular worker takes on the + responsibility of waiting an additional timeout before closing the join window. + If the worker responsible for this fails, the rendezvous will be destroyed due + to expiring TTL, and the other participants will re-rendezvous. + + Here we expect to see state + Exit gracefully if either: + + 1. state becomes + 2. timeout happens (reaching deadline), in which case + we try the transition to + + Exit with exception otherwise. + """ + active_version, state = self.get_rdzv_state() + while True: + if state["status"] == "frozen" and state["version"] == expected_version: + # Worker set became frozen before last-call timeout. This is possible + # when num_max_workers is reached before the timeout. + return + + if state["status"] != "joinable" or state["version"] != expected_version: + raise EtcdRendezvousRetryableFailure( + "Rendezvous state transition no longer possible. Must re-enter." + ) + + # If timeout occurred, attempt a state transition (joinable -> frozen) + if time.time() >= deadline: + state["status"] = "frozen" + state["keep_alives"] = [] + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ttl=CONST_ETCD_FROZEN_TTL, + ) + # We successfully made this rendezvous frozen. + return + except etcd.EtcdCompareFailed: + log.info("Join last-call transition CAS unsuccessful. Will retry") + cas_delay() + active_version, state = self.get_rdzv_state() + continue + + # Timeout did not occur, so we must refresh TTL, and wait for + # further changes. Note: we only want TTL to be refreshed if + # state is still joinable, hence we use CAS for that here, + # even though we don't change any of the data. + try: + active_version = self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=active_version.value, + prev_value=active_version.value, + ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL, + ) + + # Minimize "oversleeping": + timeout = min( + CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2, + deadline - time.time() + 1.0, # Oversleeping by 1s is ok. + ) + active_version, state = self.try_wait_for_state_change( + etcd_index=active_version.etcd_index + 1, timeout=timeout + ) + except etcd.EtcdCompareFailed: + log.info("Join last-call TTL refresh CAS unsuccessful, will retry") + cas_delay() + active_version, state = self.get_rdzv_state() + + def set_closed(self): + """ + Mark rendezvous 'closed' for current run_id, which is used to signal other + participants to not attempt to perform (re-)rendezvous. This is useful + when one of the workers decides the job is complete. + """ + while True: + active_version, state = self.get_rdzv_state() + + if state["status"] == "closed": + # Already closed by someone else. + return + + state["status"] = "closed" + try: + self.client.test_and_set( + key=self.get_path("/rdzv/active_version"), + value=json.dumps(state), + prev_value=active_version.value, + ) + return + + except etcd.EtcdCompareFailed: + log.info("Set closed CAS unsuccessful, retrying") + cas_delay() + + def get_rdzv_state(self): + active_version = self.client.get(key=self.get_path("/rdzv/active_version")) + return active_version, json.loads(active_version.value) + + def try_wait_for_state_change(self, etcd_index, timeout=None): + # Don't sleep past the overall deadline (at least more than by 1s) + overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0 + timeout = overall_timeout if timeout is None else min(timeout, overall_timeout) + + try: + self.client.watch( + self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout + ) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + if time.time() > self._rendezvous_deadline: + raise RendezvousTimeoutError() + + # Unfortunately, we have to do another fetch in order to get last etcd_index. + return self.get_rdzv_state() + + def get_path(self, path): + if not path.startswith("/"): + path = "/" + path + + return f"{self._prefix}run_{self._run_id}{path}" + + def create_path_if_not_exists(self, full_path, ttl=None): + try: + self.client.write( + key=full_path, value=None, dir=True, prevExist=False, ttl=ttl + ) + except etcd.EtcdAlreadyExist: + pass + + def setup_lease_renewal(self, full_path, ttl): + # NOTE: For ephemeral key TTL renewal (~lease) to work correctly, + # make sure you don't call any long-blocking methods that do not + # release the Python's GIL! An example of this is calling a pybind11 + # extension function that is blocking / long-running, but is not + # doing a scoped release of the GIL. + def lease_worker(client, path, ttl, stop_event): + while True: + try: + client.refresh(path, ttl=ttl) + except etcd.EtcdKeyNotFound: + break + except ConnectionRefusedError: + # This error usually occurs during test when the server already got terminated but the + # python garbage collector have not yet invoked the __del__ method. + break + + if stop_event.wait(timeout=ttl / 2): + break + + lease_stop_event = threading.Event() + lease_thread = threading.Thread( + target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event) + ) + + lease_thread.daemon = True + lease_thread.start() + + return lease_stop_event + + def store_extra_data(self, rdzv_version, key, value): + node = self.get_path(f"/rdzv/v_{rdzv_version}/extra_data") + try: + # If first time we are storing anything: + extra_data = self.client.write( + key=node, value=json.dumps({key: value}), prevExist=False + ) + return + except etcd.EtcdAlreadyExist: + pass + + # CAS loop, to make sure we don't lose concurrent stores. + while True: + # We never delete extra_data. Failure here should be fatal, no special handling. + extra_data = self.client.get(node) + + new_extra_data_value = json.loads(extra_data.value) + new_extra_data_value[key] = value + + try: + extra_data = self.client.test_and_set( + key=node, + value=json.dumps(new_extra_data_value), + prev_value=extra_data.value, + ) + return + except etcd.EtcdCompareFailed: + log.info("Store extra_data CAS unsuccessful, retrying") + time.sleep(0.1) + + def load_extra_data(self, rdzv_version, key, timeout=None): + # 'extra_data' node itself, and the directory it is located in: + node = self.get_path(f"/rdzv/v_{rdzv_version}/extra_data") + node_dir = self.get_path(f"/rdzv/v_{rdzv_version}") + + # TODO: implement timeout + # https://github.com/pytorch/elastic/issues/12 + while True: + # Combined wait for the node itself, and the key inside it. + root = self.client.get(node_dir) + + # Find the extra_data node, if it exists + extra_data = [n for n in root.children if n.key == node] + assert len(extra_data) <= 1 + + # Node for extra_data exists, check the desired key inside it. + if len(extra_data) == 1: + extra_data_dict = json.loads(extra_data[0].value) + if key in extra_data_dict: + return extra_data_dict[key] + + # The 'extra_data' node doesn't exist, or they key isn't published yet. + # Wait for interesting events on the extra_data node and retry. + try: + self.client.watch(node, index=root.etcd_index + 1) + except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut): + pass + + def setup_kv_store(self, rdzv_version): + store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv") + self.create_path_if_not_exists(store_path) + return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path) + + +def _create_etcd_client(params: RendezvousParameters) -> etcd.Client: + """Create a new ``etcd.Client`` from the specified ``RendezvousParameters``.""" + hostname, port = parse_rendezvous_endpoint(params.endpoint, 2379) + + # The communication protocol + protocol = params.config.get("protocol") + if protocol is None: + protocol = "http" + else: + if protocol != "http" and protocol != "https": + raise ValueError("The etcd protocol must be HTTP or HTTPS.") + + # The SSL client certificate + ssl_cert = params.config.get("cert") + if ssl_cert is not None: + cert_key = params.config.get("key") + if cert_key is not None: + # The etcd client expects the certificate key as the second element + # of the `cert` tuple. + ssl_cert = (ssl_cert, cert_key) + + # The root certificate + ca_cert = params.config.get("cacert") + + return etcd.Client( + hostname, + port, + protocol=protocol, + cert=ssl_cert, + ca_cert=ca_cert, + allow_reconnect=True, + ) + + +# Handler for torch.distributed "static" registration +def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler: + """ + Usage: + + :: + + rdzv_params = RendezvousParameters( + backend="etcd", + endpoint="192.168.0.42:2379", + run_id="123", + min_nodes=4, + max_nodes=8, + timeout=300, + last_call_timeout=30, + etcd_prefix="custom_prefix", + protocol="https", + cacert="/etc/kubernetes/certs/ca.crt", + cert="/etc/kubernetes/certs/client.crt", + key="/etc/kubernetes/certs/client.key") + # -- or -- + rdzv_params = RendezvousParameters( + backend="etcd", + endpoint="192.168.0.42:2379", + run_id="123", + min_nodes=4, + max_nodes=8) + + etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params) + + + Where: + run_id - unique id for this training job instance, + min_nodes - min number of workers expected to join the rendezvous, + max_nodes - max number of workers allowed to join the rendezvous, + defaults to min_workers is not specified. + timeout - total timeout within which next_rendezvous is expected to + succeed; a RendezvousTimeoutError is raised otherwise; + Defaults is 600 (10 minutes). + last_call_timeout - additional wait amount ("last call") after + min number of workers has been reached. + Defaults to 30 seconds. + etcd_prefix - path prefix (from etcd root), inside which all + etcd nodes will be created. + Default is "/torchelastic/p2p". + protocol - http (default) or https to access etcd. + cacert - CA cert to access etcd, only makes sense with https. + cert - client cert to access etcd, only makes sense with https. + key - client key to access etcd, only makes sense with https. + """ + client = _create_etcd_client(params) + + etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p") + + rdzv = EtcdRendezvous( + client=client, + prefix=etcd_prefix, + run_id=params.run_id, + num_min_workers=params.min_nodes, + num_max_workers=params.max_nodes, + timeout=params.get_as_int("timeout", _DEFAULT_TIMEOUT), + last_call_timeout=params.get_as_int("last_call_timeout", _DEFAULT_LAST_CALL_TIMEOUT), + ) + return EtcdRendezvousHandler(rdzv_impl=rdzv) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py new file mode 100644 index 0000000000000000000000000000000000000000..a220f2ca5917b01af9818e5b38efe00d8fcfdf7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_server.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import atexit +import logging +import os +import shlex +import shutil +import socket +import subprocess +import tempfile +import time +from typing import Optional, TextIO, Union + +try: + import etcd # type: ignore[import] +except ModuleNotFoundError: + pass + + +log = logging.getLogger(__name__) + + +def find_free_port(): + """ + Find a free port and binds a temporary socket to it so that the port can be "reserved" until used. + + .. note:: the returned socket must be closed before using the port, + otherwise a ``address already in use`` error will happen. + The socket should be held and closed as close to the + consumer of the port as possible since otherwise, there + is a greater chance of race-condition where a different + process may see the port as being free and take it. + + Returns: a socket binded to the reserved free port + + Usage:: + + sock = find_free_port() + port = sock.getsockname()[1] + sock.close() + use_port(port) + """ + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + + for addr in addrs: + family, type, proto, _, _ = addr + try: + s = socket.socket(family, type, proto) + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() # type: ignore[possibly-undefined] + print(f"Socket creation attempt failed: {e}") + raise RuntimeError("Failed to create a socket") + + +def stop_etcd(subprocess, data_dir: Optional[str] = None): + if subprocess and subprocess.poll() is None: + log.info("stopping etcd server") + subprocess.terminate() + subprocess.wait() + + if data_dir: + log.info("deleting etcd data dir: %s", data_dir) + shutil.rmtree(data_dir, ignore_errors=True) + + +class EtcdServer: + """ + .. note:: tested on etcd server v3.4.3. + + Starts and stops a local standalone etcd server on a random free + port. Useful for single node, multi-worker launches or testing, + where a sidecar etcd server is more convenient than having to + separately setup an etcd server. + + This class registers a termination handler to shutdown the etcd + subprocess on exit. This termination handler is NOT a substitute for + calling the ``stop()`` method. + + The following fallback mechanism is used to find the etcd binary: + + 1. Uses env var TORCHELASTIC_ETCD_BINARY_PATH + 2. Uses ``/bin/etcd`` if one exists + 3. Uses ``etcd`` from ``PATH`` + + Usage + :: + + server = EtcdServer("/usr/bin/etcd", 2379, "/tmp/default.etcd") + server.start() + client = server.get_client() + # use client + server.stop() + + Args: + etcd_binary_path: path of etcd server binary (see above for fallback path) + """ + + def __init__(self, data_dir: Optional[str] = None): + self._port = -1 + self._host = "localhost" + + root = os.path.dirname(__file__) + default_etcd_bin = os.path.join(root, "bin/etcd") + self._etcd_binary_path = os.environ.get( + "TORCHELASTIC_ETCD_BINARY_PATH", default_etcd_bin + ) + if not os.path.isfile(self._etcd_binary_path): + self._etcd_binary_path = "etcd" + + self._base_data_dir = ( + data_dir if data_dir else tempfile.mkdtemp(prefix="torchelastic_etcd_data") + ) + self._etcd_cmd = None + self._etcd_proc: Optional[subprocess.Popen] = None + + def _get_etcd_server_process(self) -> subprocess.Popen: + if not self._etcd_proc: + raise RuntimeError( + "No etcd server process started. Call etcd_server.start() first" + ) + else: + return self._etcd_proc + + def get_port(self) -> int: + """Return the port the server is running on.""" + return self._port + + def get_host(self) -> str: + """Return the host the server is running on.""" + return self._host + + def get_endpoint(self) -> str: + """Return the etcd server endpoint (host:port).""" + return f"{self._host}:{self._port}" + + def start( + self, + timeout: int = 60, + num_retries: int = 3, + stderr: Union[int, TextIO, None] = None, + ) -> None: + """ + Start the server, and waits for it to be ready. When this function returns the sever is ready to take requests. + + Args: + timeout: time (in seconds) to wait for the server to be ready + before giving up. + num_retries: number of retries to start the server. Each retry + will wait for max ``timeout`` before considering it as failed. + stderr: the standard error file handle. Valid values are + `subprocess.PIPE`, `subprocess.DEVNULL`, an existing file + descriptor (a positive integer), an existing file object, and + `None`. + + Raises: + TimeoutError: if the server is not ready within the specified timeout + """ + curr_retries = 0 + while True: + try: + data_dir = os.path.join(self._base_data_dir, str(curr_retries)) + os.makedirs(data_dir, exist_ok=True) + return self._start(data_dir, timeout, stderr) + except Exception as e: + curr_retries += 1 + stop_etcd(self._etcd_proc) + log.warning( + "Failed to start etcd server, got error: %s, retrying", str(e) + ) + if curr_retries >= num_retries: + shutil.rmtree(self._base_data_dir, ignore_errors=True) + raise + atexit.register(stop_etcd, self._etcd_proc, self._base_data_dir) + + def _start( + self, data_dir: str, timeout: int = 60, stderr: Union[int, TextIO, None] = None + ) -> None: + sock = find_free_port() + sock_peer = find_free_port() + self._port = sock.getsockname()[1] + peer_port = sock_peer.getsockname()[1] + + etcd_cmd = shlex.split( + " ".join( + [ + self._etcd_binary_path, + "--enable-v2", + "--data-dir", + data_dir, + "--listen-client-urls", + f"http://{self._host}:{self._port}", + "--advertise-client-urls", + f"http://{self._host}:{self._port}", + "--listen-peer-urls", + f"http://{self._host}:{peer_port}", + ] + ) + ) + + log.info("Starting etcd server: [%s]", etcd_cmd) + + sock.close() + sock_peer.close() + self._etcd_proc = subprocess.Popen(etcd_cmd, close_fds=True, stderr=stderr) + self._wait_for_ready(timeout) + + def get_client(self): + """Return an etcd client object that can be used to make requests to this server.""" + return etcd.Client( + host=self._host, port=self._port, version_prefix="/v2", read_timeout=10 + ) + + def _wait_for_ready(self, timeout: int = 60) -> None: + client = etcd.Client( + host=f"{self._host}", port=self._port, version_prefix="/v2", read_timeout=5 + ) + max_time = time.time() + timeout + + while time.time() < max_time: + if self._get_etcd_server_process().poll() is not None: + # etcd server process finished + exitcode = self._get_etcd_server_process().returncode + raise RuntimeError( + f"Etcd server process exited with the code: {exitcode}" + ) + try: + log.info("etcd server ready. version: %s", client.version) + return + except Exception: + time.sleep(1) + raise TimeoutError("Timed out waiting for etcd server to be ready!") + + def stop(self) -> None: + """Stop the server and cleans up auto generated resources (e.g. data dir).""" + log.info("EtcdServer stop method called") + stop_etcd(self._etcd_proc, self._base_data_dir) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e0607e9c0d5dc725a38083a9a4f1cc24feed3b14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/api.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import os +import socket +from string import Template +from typing import List, Any + + +def get_env_variable_or_raise(env_name: str) -> str: + r""" + Tries to retrieve environment variable. Raises ``ValueError`` + if no environment variable found. + + Args: + env_name (str): Name of the env variable + """ + value = os.environ.get(env_name, None) + if value is None: + msg = f"Environment variable {env_name} expected, but not set" + raise ValueError(msg) + return value + + +def get_socket_with_port() -> socket.socket: + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + for addr in addrs: + family, type, proto, _, _ = addr + s = socket.socket(family, type, proto) + try: + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() + raise RuntimeError("Failed to create a socket") + + +class macros: + """ + Defines simple macros for caffe2.distributed.launch cmd args substitution + """ + + local_rank = "${local_rank}" + + @staticmethod + def substitute(args: List[Any], local_rank: str) -> List[str]: + args_sub = [] + for arg in args: + if isinstance(arg, str): + sub = Template(arg).safe_substitute(local_rank=local_rank) + args_sub.append(sub) + else: + args_sub.append(arg) + return args_sub diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c39bca6f3c8a31f5f2d7115ad12c1fc4925fe1d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .cycling_iterator import CyclingIterator # noqa: F401 +from .elastic_distributed_sampler import ElasticDistributedSampler # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cac46222e83ab372128f7f9473c11e3f45a6233a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..043783a63a7cb2336075c54b02ec57044cd935d0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/cycling_iterator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35dd677b845b08dd3e30e38ebae67b992ad0750c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/__pycache__/elastic_distributed_sampler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..60a5861f7befdebd5ed80e97151e267ce3523945 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/cycling_iterator.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +class CyclingIterator: + """ + An iterator decorator that cycles through the + underlying iterator "n" times. Useful to "unroll" + the dataset across multiple training epochs. + + The generator function is called as ``generator_fn(epoch)`` + to obtain the underlying iterator, where ``epoch`` is a + number less than or equal to ``n`` representing the ``k``th cycle + + For example if ``generator_fn`` always returns ``[1,2,3]`` + then ``CyclingIterator(n=2, generator_fn)`` will iterate through + ``[1,2,3,1,2,3]`` + """ + + def __init__(self, n: int, generator_fn, start_epoch=0): + self._n = n + self._epoch = start_epoch + self._generator_fn = generator_fn + self._iter = generator_fn(self._epoch) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._iter) + except StopIteration as eod: # eod == end of data + if self._epoch < self._n - 1: + self._epoch += 1 + self._iter = self._generator_fn(self._epoch) + return self.__next__() + else: + raise eod diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..a66803fa8c099a8768d7858f038736d87cb76781 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/data/elastic_distributed_sampler.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch +from torch.utils.data.distributed import DistributedSampler + + +class ElasticDistributedSampler(DistributedSampler): + """ + Sampler that restricts data loading to a subset of + the dataset for elastic training. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Args: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + start_index (optional): Which index of the dataset to start sampling from + """ + + def __init__(self, dataset, num_replicas=None, rank=None, start_index=0): + super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank) + if start_index >= len(dataset): + raise ValueError( + f"Start index {start_index} should be less than dataset size {len(dataset)}" + ) + + self.start_index = start_index + self.num_samples = int( + math.ceil(float(len(self.dataset) - self.start_index) / self.num_replicas) # type: ignore[arg-type] + ) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = ( + torch.randperm(len(self.dataset) - self.start_index, generator=g) # type: ignore[arg-type] + .add(self.start_index) + .tolist() + ) + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank : self.total_size : self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b9ac2fac986049809d62ffa8535981cb7ca0a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/distributed.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import datetime +import socket +from contextlib import closing + +import torch.distributed as dist +from torch.distributed.elastic.utils.logging import get_logger + + +log = get_logger(__name__) + +_ADDRESS_IN_USE = "Address already in use" +_SOCKET_TIMEOUT = "Socket Timeout" + +_MEMBER_CHECKIN = "_tcp_store/num_members" +_LAST_MEMBER_CHECKIN = "_tcp_store/last_member" + + +def create_c10d_store( + is_server: bool, + server_addr: str, + server_port: int = -1, + world_size: int = 1, + timeout: float = (60 * 10), # 10 min + wait_for_workers: bool = True, + retries=3, +): + if server_port == -1 and world_size > 1: + raise ValueError( + f"server_port must be specified when world_size > 1, got server_port={server_port}, world_size={world_size}" + ) + + if server_port != -1: + log.info("sever_port: %s, specified, ignoring retries", server_port) + + # only retry when server_port is NOT static + attempt = retries if server_port == -1 else 1 + while True: + if server_port != -1: + port = server_port + else: + port = get_free_port() + + log.info( + "Creating c10d store on %s:%s\n" + " world_size : %s\n" + " is_server : %s\n" + " timeout(sec): %s\n", + server_addr, port, world_size, is_server, timeout + ) + + try: + store = dist.TCPStore( + host_name=server_addr, + port=port, + world_size=world_size, + is_master=is_server, + timeout=datetime.timedelta(seconds=timeout), + wait_for_workers=wait_for_workers, + ) + # skips full rank check when we don't have to wait for all workers + if wait_for_workers: + _check_full_rank(store, world_size) + log.info("Successfully created c10d store") + return store + except RuntimeError as e: + # this is brittle, but the underlying exception type is not properly pybinded + # so we parse the error msg for now, interestingly this is how torch itself + # detects timeouts and port conflicts in their own unittests + # see - caffe2/torch/testing/_internal/common_utils.py + # TODO properly map the exceptions in pybind (c10d/init.cpp) + if str(e) == _ADDRESS_IN_USE: # this will only happen on the server + if attempt < retries: + log.warning( + "port: %s already in use, attempt: [%s/%s]", port, attempt, retries + ) + attempt += 1 + else: + raise RuntimeError( + f"on {server_addr}, port: {port} already in use" + ) from e + else: + raise + + +def _check_full_rank(store, world_size): + idx = store.add(_MEMBER_CHECKIN, 1) + if idx == world_size: + store.set(_LAST_MEMBER_CHECKIN, "") + + try: + store.get(_LAST_MEMBER_CHECKIN) + except RuntimeError as e: + if str(e) == _SOCKET_TIMEOUT: + raise TimeoutError( + f"timed out waiting for all {world_size} members to join" + ) from e + else: + raise + + +def get_free_port(): + sock = get_socket_with_port() + with closing(sock): + return sock.getsockname()[1] + + +def get_socket_with_port() -> socket.socket: + """ + Returns a free port on localhost that is "reserved" by binding a temporary + socket on it. Close the socket before passing the port to the entity + that requires it. Usage example + + :: + + sock = _get_socket_with_port() + with closing(sock): + port = sock.getsockname()[1] + sock.close() + # there is still a race-condition that some other process + # may grab this port before func() runs + func(port) + """ + + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + for addr in addrs: + family, type, proto, _, _ = addr + s = socket.socket(family, type, proto) + try: + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() + log.info("Socket creation attempt failed.", exc_info=e) + raise RuntimeError("Failed to create a socket") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1309be50188c9348f3aadb884955fd8112a4c80c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_data_parallel_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89213a7b8b78d29de7812665d53323051fd55807 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae79218a1d12fbf825377d2a3e988d131d957ce6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/tensor/parallel/__pycache__/input_reshard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 b/llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 new file mode 100644 index 0000000000000000000000000000000000000000..183f672e16db7fb231a95df213334e45ca6429c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be6f24534f1aa85998fb58a61b9e7124281ab539cbde11f0f40d9d25795c8728 +size 43229425