diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351022e3dbee961af488a46ae5946d8774a03f6e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab80f474b7199f406f2afb2d82152bd5540ba35f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c558fe401c41deadd4dcf4ea29e30ea5e200e06 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..166c6f9254cf6b3e9c9d996ff10bad5333113701 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py @@ -0,0 +1,12 @@ +# Keep old package for BC purposes, this file should be removed once +# everything moves to the `torch.distributed.checkpoint` package. +import sys +import torch +import warnings + +from torch.distributed.checkpoint import * # noqa: F403 +warnings.warn( + "torch.distributed._shard.checkpoint will be deprecated, use torch.distributed.checkpoint instead", + DeprecationWarning +) +sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63ca8ddea6f7c077cdbc711923aac861fb5b77e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c233840f1eccee36974b96d6a2c1a226866dd3d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py @@ -0,0 +1,9 @@ +import torch.distributed._shard.sharded_tensor._ops.misc_ops +import torch.distributed._shard.sharded_tensor._ops.tensor_ops + +from .binary_cmp import equal, allclose +from .init import kaiming_uniform_, normal_, uniform_, constant_ + +# Import all ChunkShardingSpec ops +from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding import sharded_embedding +from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import sharded_embedding_bag diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0491c0f2f5ea2c851358285e51992532ea1459c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ea77017901deff14693edb531bcc62ce76079d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64e7a642328c6b39ca1cc6003fb9261d8704d28e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a8bcab195a43c4fbb3b6c919cf1ff20d8b82ab8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c99381bda29af6a54b7d75221ddc4b0ae1780aca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e672c54927dbd5344ce9ec6b763c4ed7e3b518e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py @@ -0,0 +1,107 @@ +import functools +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, + Shard, + ShardedTensor, +) +from torch.distributed._shard.common_op_utils import _basic_validation + +def _sharded_op_common(op, early_stop_func, extra_check): + """ + Inject sharded tensor op registration with common logics executed before + different behaviors are done on either local shards or a local tensor. + + Example:: + >>> # xdoctest: +SKIP("Undefined variables") + >>> op = torch.transpose + >>> @_sharded_op_impl(op) + >>> @_sharded_op_common(op, early_stop_func, extra_check) + >>> def sharded_tensor_op(types, args, kwargs, process_group): + >>> ... + >>> + >>> st = sharded_tensor.rand(32, 16) + >>> st.transpose(1, 2) + >>> # This will call '_sharded_op_common' + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + + Return: + func (Callable): Torch function for which we want to provide a sharded + implementation (ex: torch.transpose) + """ + def decorator_sharded_func(wrapped_func): + @functools.wraps(wrapped_func) + def wrapper(types, args=(), kwargs=None, pg=None): + _basic_validation(op, args, kwargs) + + st = args[0] + if kwargs is None: + kwargs = {} + if extra_check: + extra_check(*args, **kwargs) + if early_stop_func: + early_stop = early_stop_func(*args, **kwargs) + if early_stop: + return st + return wrapped_func(types, args, kwargs, pg) + + return wrapper + + return decorator_sharded_func + +def _register_sharded_op_on_local_shards( + op, early_stop_func=None, extra_check=None, customized_func=None +): + """ + Handles ``__torch_function__`` dispatch for ops which are performed on + each shard of the sharded tensor such as elementwise op like + ``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``. + + For more complicated ops, a customized func can be used to generate + the new shards and sharded tensor size. + + This function expects that the original ShardingSpec for the ShardedTensor + is preserved irrespective of whether or not a customized function is used. + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + customized_func (Callable, optional): the func for customized logic + to generate new shards and sharded tensor size. + Default: if ``None``, we simply lower to the real op call with + all local shards of the st. + + Return: + func (Callable): registered implementation for sharded op for + ``__torch_function__`` dispatch. + """ + @_sharded_op_impl(op) + @_sharded_op_common(op, early_stop_func, extra_check) + def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None): + st = args[0] + st_metadata = st.metadata() + local_shards = st.local_shards() + local_shards_new = [] + if customized_func: + local_shards_new, st_metadata = customized_func(args, kwargs, pg) + else: + for local_shard in local_shards: + args = (local_shard.tensor, *args[1:]) + local_shards_new.append( + Shard(op(*args, **kwargs), local_shard.metadata) + ) + return ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards_new, + st_metadata, + process_group=pg, + init_rrefs=st._init_rrefs, + sharding_spec=st.sharding_spec() + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..0a7999a4c263a16cc0f743af54fc0e39a378b755 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py @@ -0,0 +1,68 @@ +import torch +import torch.distributed as dist +import torch.distributed.distributed_c10d as distributed_c10d +from torch.distributed._shard.sharded_tensor import ( + ShardedTensor, + _sharded_op_impl +) + +def _communicate_result(result, pg): + # Gather results from all ranks. + if result: + result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device())) + else: + result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device())) + + dist.all_reduce(result_tensor, group=pg) + + expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg) + + return torch.equal(result_tensor, expected_result) + +def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None): + if len(args) != 2: + raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}') + + result = True + st1 = args[0] + st2 = args[1] + if not (isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)): + raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor') + + # Verify same PG + if st1._process_group != st2._process_group: + return False + + if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group): + return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group) + + # Verify metadata + if st1.metadata() != st2.metadata(): + return _communicate_result(False, st1._process_group) + + # Verify number of local shards + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + if len(st1_local_shards) != len(st2_local_shards): + return _communicate_result(False, st1._process_group) + + # kwargs must be dict-like + if kwargs is None: + kwargs = {} + # Verify each local shard + for idx in range(len(st1_local_shards)): + if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata: + return _communicate_result(False, st1._process_group) + if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs): + return _communicate_result(False, st1._process_group) + + + return _communicate_result(True, st1._process_group) + +@_sharded_op_impl(torch.equal) +def equal(types, args, kwargs, process_group): + return binary_cmp(torch.equal, types, args, kwargs, process_group) + +@_sharded_op_impl(torch.allclose) +def allclose(types, args, kwargs, process_group): + return binary_cmp(torch.allclose, types, args, kwargs, process_group) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0e0911bb1d18c9f90726436a9efe6dc5ef44019b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py @@ -0,0 +1,12 @@ +import torch +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, +) + +# This is used by `_apply()` within module.py to set new +# parameters after apply a certain method, we should follow +# the future behavior of overwriting the existing tensor +# instead of doing in-place change using `.data = `. +@_sharded_op_impl(torch._has_compatible_shallow_copy_type) +def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None): + return False diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f96eded95f310d59803ccbb328fe5b1311d2ebe2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py @@ -0,0 +1,215 @@ +import copy +import torch +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, + Shard, + ShardedTensor, +) +from ._common import ( + _register_sharded_op_on_local_shards, +) +from torch.distributed._shard.common_op_utils import _register_default_op + + +# Tensor properties access +_register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.size, _sharded_op_impl) +_register_default_op(torch.Tensor.dim, _sharded_op_impl) +_register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl) +_register_default_op(torch.Tensor.contiguous, _sharded_op_impl) +_register_default_op(torch.Tensor.is_floating_point, _sharded_op_impl) + +# __reduce_ex__ to dispatch to get_state/set_state +_register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl) + +# autograd related properties +_register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined] +# TODO: set grad with a ShardedTensor that consists of all local grads +_register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr] +_register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[union-attr] +_register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined] + +# device property is ambiguous as from a global prospective, +# ShardedTensor.device consists of multiple devices (might even across hosts) +# We choose to return the current device of the local tensor to represent +# the device property on each rank +@_sharded_op_impl(torch.Tensor.device.__get__) +def tensor_device(types, args=(), kwargs=None, pg=None): + self_st = args[0] + # Validate types + if not isinstance(self_st, ShardedTensor): + raise TypeError("input needs to be a ShardedTensor") + dev: torch.device + if self_st._local_shards: + dev = self_st._local_shards[0].tensor.device + elif pg and pg._get_backend_name() == "gloo": + dev = torch.device("cpu") + else: + dev = torch.device(torch.cuda.current_device()) + return dev + +@_sharded_op_impl(torch.Tensor.is_meta.__get__) # type: ignore[attr-defined] +def st_is_meta(types, args=(), kwargs=None, pg=None): + return args[0].local_tensor().is_meta + + +def sharded_type_as_check(*args, **kwargs): + """ + Perform extra checks for the sharded_type_as op such as the input needs to + be either a Tensor or ShardedTensor. + + Args: same as ``torch.Tensor.type_as``. + + Return: None + """ + if len(args) < 2: + raise ValueError("Needs to give a tensor to cast type as!") + if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor): + raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!") + + +def same_dtype(*args, **kwargs): + """ + When the dtype is the same, return the original ShardedTensor. + + Args: same as ``torch.Tensor.type_as``. + + Return (bool): Whether to return early or not. + """ + return args[0].dtype == args[1].dtype + + +def sharded_type_as(args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op. + + Args: same as ``torch.Tensor.type_as``. + + Return: + new_local_shards (List[Shard]): Local shards for the new sharded tensor. + st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor. + """ + st = args[0] + tensor = args[1] + if isinstance(tensor, ShardedTensor): + tensor = tensor.local_tensor() + new_local_shards = [] + for shard in st.local_shards(): + new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata)) + st_meta = copy.deepcopy(st._metadata) + st_meta.tensor_properties.dtype = tensor.dtype + return new_local_shards, st_meta + + +_register_sharded_op_on_local_shards( + torch.Tensor.type_as, + early_stop_func=same_dtype, + extra_check=sharded_type_as_check, + customized_func=sharded_type_as, +) + + +def sharded_deepcopy(args, kwargs, pg): + # NOTE: we directly implement deepcopy magic method + # instead of using the default tensor.__deepcopy__ + # and implement clone(). This is because the default + # tensor deepcopy copies every attribute, but the + # process_group in ShardedTensor cannot be deep copied. + self_st = args[0] + new_local_shards = copy.deepcopy(self_st.local_shards()) + new_metadata = copy.deepcopy(self_st.metadata()) + return new_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.__deepcopy__, + customized_func=sharded_deepcopy, +) + + +@_sharded_op_impl(torch.Tensor.copy_) +def sharded_inplace_copy(types, args, kwargs, pg): + # NOTE: inplace op don't need to rewrap + kwargs = {} if kwargs is None else kwargs + self_st = args[0] + new_st = args[1] + nonblocking = kwargs.get("non_blocking", False) + for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()): + if local_shard.metadata != new_shard.metadata: + raise RuntimeError( + "inplace copy can only happen between two ShardedTensor with same metadata!" + ) + for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()): + local_shard.tensor.copy_(new_shard.tensor, nonblocking) + + return self_st + + +def sharded_clone(args, kwargs, pg): + self_st = args[0] + desire_memory_format = kwargs.get("memory_format", None) + if desire_memory_format and desire_memory_format != torch.preserve_format: + raise RuntimeError("Only support torch.preserve_format for ShardedTensor!") + cloned_local_shards = [ + Shard( + local_shard.tensor.clone(memory_format=desire_memory_format), + metadata=copy.deepcopy(local_shard.metadata), + ) + for local_shard in self_st.local_shards() + ] + new_metadata = copy.deepcopy(self_st.metadata()) + return cloned_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.clone, + customized_func=sharded_clone, +) + + +def sharded_detach(args, kwargs, pg): + self_st = args[0] + detached_local_shards = [ + Shard( + local_shard.tensor.detach(), + metadata=copy.deepcopy(local_shard.metadata), + ) + for local_shard in self_st.local_shards() + ] + new_metadata = copy.deepcopy(self_st.metadata()) + new_metadata.tensor_properties.requires_grad = False + return detached_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.detach, + customized_func=sharded_detach, +) + + +@_sharded_op_impl(torch.Tensor.requires_grad_) +def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None): + self_st = args[0] + # Validate types + if not isinstance(self_st, ShardedTensor): + raise TypeError("input needs to be a ShardedTensor") + + if kwargs is None: + kwargs = {} + + requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True) + if requires_grad == self_st.requires_grad: + return self_st + + for local_shard in self_st.local_shards(): + local_shard.tensor.requires_grad_(requires_grad) + + # update the wrapper class property + with torch._C.DisableTorchFunctionSubclass(): + self_st.requires_grad_(requires_grad) + # update the metadata in the meanwhile + self_st._metadata.tensor_properties.requires_grad = requires_grad + return self_st diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd38105c53ba4783b4d1517f88f54025719eab4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py @@ -0,0 +1,12 @@ +from .api import ( + DevicePlacementSpec, + EnumerableShardingSpec, + PlacementSpec, + ShardingSpec, + _infer_sharding_spec_from_shards_metadata, +) +from .chunk_sharding_spec import ( + ChunkShardingSpec as ChunkShardingSpec, +) + +from torch.distributed._shard.metadata import ShardMetadata diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..955b2826f8c6cda56230c46f2ccb401776e233f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffb7a0d658a33d73435cc69640d4a0b0e8b31a29 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5348513035741471a377493bdbeac91b7ea54b7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a76eedbed2c8ca8e847f92776fa25e50d9e3c485 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..e8275063e038266cf3893c83ba2a552de2723c8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py @@ -0,0 +1,209 @@ +from typing import List, Optional, Tuple + +from torch.distributed._shard.metadata import ShardMetadata + + +def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata): + """ + Checks if two shards overlap. + """ + + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(shard1.shard_offsets) + for i in range(ndims): + if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]: + return False + if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]: + return False + + return True + + +def _find_nd_overlapping_shards( + shards: List[ShardMetadata], sharded_dims: List[int] +) -> Optional[Tuple[int, int]]: + # Each rank has len(sharded_dims) tuples. Each tuple represent the + # [begin, end] (inclusive) pair of that dimension. + shard_intervals = [ + [ + (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1) + for dim in sharded_dims + ] + for s in shards + ] + + for i in range(len(shards)): + shard_i = shard_intervals[i] + for j in range(i + 1, len(shards)): + shard_j = shard_intervals[j] + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + overlap = True + for interval_i, interval_j in zip(shard_i, shard_j): + if interval_i[0] > interval_j[1] or interval_j[0] > interval_i[1]: + overlap = False + break + if overlap: + return (i, j) + return None + + +def _find_1d_overlapping_shards( + shards: List[ShardMetadata], dim: int +) -> Optional[Tuple[int, int]]: + # (begin, end, index_in_shards). Begin and end are inclusive. + intervals = [ + (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1, i) + for i, s in enumerate(shards) + ] + intervals.sort() + for i in range(len(shards) - 1): + if intervals[i][1] >= intervals[i + 1][0]: + return (intervals[i][2], intervals[i + 1][2]) + return None + + +def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]): + """ + Ensures none of the shards overlap with each other. + + Args: + shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing + each shard. + Raises: + ``ValueError`` if there's overlap in any two shards. + """ + if not shards or len(shards) == 1: + return + + sharded_dims: List[int] = [] + for dim in range(len(shards[0].shard_offsets)): + for i in range(1, len(shards)): + if ( + shards[i].shard_offsets[dim] != shards[0].shard_offsets[dim] or + shards[i].shard_sizes[dim] != shards[0].shard_sizes[dim] + ): + sharded_dims.append(dim) + break + + pair: Optional[Tuple[int, int]] = None + if len(sharded_dims) == 0: + # All shards are the same, all dims are not partitioned. Choose any 2. + pair = (0, 1) + elif len(sharded_dims) == 1: + # Shards are partitioned over only one dimension. Overlap can be found + # using a O(nlogn) overlapping interval algorithm. + pair = _find_1d_overlapping_shards(shards, sharded_dims[0]) + else: + # Shards are partitioned over more than one dimension. Fall back to + # pair-wise check. Even though O(nlogn) algorithms (line sweep) exist + # for 2D overlap, the implementation is not trivial and may not justify + # the time saving in most cases. + pair = _find_nd_overlapping_shards(shards, sharded_dims) + + if pair: + raise ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap') + + +def check_tensor(shards_metadata, tensor_dims) -> None: + """ + Checks if the shards_metadata is compatible with the provided tensor dims. + + Args: + shards_metadata(List[ShardMetadata]): List of :class:`ShardMetadata` + objects representing each shard of the tensor. + tensor_dims(Sequence of int): Dimensions of tensor to verify + Raises: + ``ValueError`` if not compatible. + """ + + # If the tensor's volume matches the total volume of all shards and + # all shard boundaries are within tensor dims, we have a compatible + # sharding spec for this tensor. Note that we have already verified + # we don't have overlapping shards. + tensor_rank = len(tensor_dims) + shards_rank = len(shards_metadata[0].shard_offsets) + if tensor_rank != shards_rank: + raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') + + total_shard_volume = 0 + for shard in shards_metadata: + shard_volume = 1 + for i, shard_length in enumerate(shard.shard_sizes): + shard_volume *= shard_length + if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]: + raise ValueError( + f'Shard offset {shard.shard_offsets[i]} and length ' + f'{shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') + total_shard_volume += shard_volume + + tensor_volume = 1 + for size in tensor_dims: + tensor_volume *= size + + if total_shard_volume != tensor_volume: + # TODO: Can we improve this error message to point out the gaps? + raise ValueError( + f'Total volume of shards: {total_shard_volume} ' + f'does not match tensor volume: {tensor_volume}, in other words ' + f'all the individual shards do not cover the entire tensor') + +def get_split_size(dim_size, chunks): + """ + Computes the split size inline with ``torch.chunk`` + + Args: + dim_size(int): Size of the dimension being chunked. + chunks(int): Number of chunks to create for ``dim_size``. + + Returns: + An int indicating the split size to use. + """ + return (dim_size + chunks - 1) // chunks + +def get_chunked_dim_size(dim_size, split_size, idx): + """ + Computes the dim size of the chunk for provided ``idx`` given ``dim_size`` + and ``split_size``. + + Args: + dim_size(int): Size of the dimension being chunked. + split_size(int): The chunk size for each chunk of ``dim_size``. + idx(int): The index of chunk whose dim size is being requested. + + Returns: + An int indicating the dim size of the chunk. + """ + return max(min(dim_size, split_size * (idx + 1)) - split_size * idx, 0) + +def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank): + """ + Generate the start pos and offset length for the current rank for + chunk sharding. + + Args: + sharding_dim_size(int): The dimension length which we shard on. + world_size(int): number of ranks. + spec (:class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec`): + sharding spec. + rank(int): # of cuda process. + + Returns: + start_pos(int): start position of sharded tensor on the given rank. + chunk_size(int): chunk size of sharded tensor on the given rank. + """ + split_size = get_split_size(sharding_dim_size, world_size) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size # type: ignore[possibly-undefined] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfacbf0354dfa553f973e009c8ff84657fc9a9d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py @@ -0,0 +1,242 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +import functools +from typing import Callable, Dict, List, TYPE_CHECKING + +import torch + +from ._internals import ( + check_tensor, + get_chunked_dim_size, + get_split_size, + validate_non_overlapping_shards_metadata +) +from torch.distributed._shard.metadata import ShardMetadata + +import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta +from torch.distributed._shard.op_registry_utils import _decorator_func + +if TYPE_CHECKING: + # Only include ShardedTensor when do type checking, exclude it + # from run-time to resolve circular dependency. + from torch.distributed._shard.sharded_tensor import ShardedTensor + +class PlacementSpec(ABC): # noqa: B024 + """ + Base class representing the placement of an entity. Subclasses of this + class can be used to specify customized placements which might not be + covered by existing APIs. + """ + pass + + +@dataclass +class DevicePlacementSpec(PlacementSpec): + """ + Associates placement of an entity with a single device. + + Args: + device(:class:`torch.distributed._remote_device`): The device to place the entity on. + """ + + device: torch.distributed._remote_device + + def __post_init__(self): + if not isinstance(self.device, torch.distributed._remote_device): + self.device = torch.distributed._remote_device(self.device) + +class ShardingSpec(ABC): + """ + Base class representing sharding specifications. + """ + @abstractmethod + def build_metadata(self, + tensor_sizes: torch.Size, + tensor_properties: sharded_tensor_meta.TensorProperties, + ) -> sharded_tensor_meta.ShardedTensorMetadata: + """ + Given a global tensor size, define how to shard a tensor like this shape + across ranks, return ShardedTensorMetadata + Args: + tensor_sizes (:class:`torch.Size`): + The tensor shape to shard on, a `torch.Size` object that represents the + tensor shape to be sharded according to the ShardingSpec. + tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties): + Tensor properties used to create a ShardedTensor. + Returns: + A :class:`ShardedTensorMetadata` object that encodes the information about + the layout of the ShardedTensor and its properties. + """ + + @abstractmethod + def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor": + """ + Given a global tensor on src_rank, shard this tensor + across ranks within the process group, return a ShardedTensor. + Args: + tensor (:class:`torch.Tensor`): Tensor needs to be sharded. + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + A :class:`ShardedTensor` sharded from the given tensor. + """ + +# Ops customized for a particular ShardingSpec. +_CUSTOM_SHARDING_SPEC_OPS: Dict[str, Dict[Callable, Callable]] = {} + +def _has_custom_op(sharding_spec, op): + """ + Returns whether or not the ShardingSpec has a custom op implementation. + """ + class_name = type(sharding_spec).__qualname__ + return class_name in _CUSTOM_SHARDING_SPEC_OPS and op in _CUSTOM_SHARDING_SPEC_OPS[class_name] + +def _dispatch_custom_op(sharding_spec, op: Callable, types, args, kwargs, process_group): + """ + Calls the custom op for this ShardingSpec if it exists. + """ + class_name = type(sharding_spec).__qualname__ + if not _has_custom_op(sharding_spec, op): + raise RuntimeError(f'Custom op: {op} not registered for {class_name}') + func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op] + return func(types, args, kwargs, process_group) + +def custom_sharding_spec_op(sharding_spec_class, func): + """ + Decorator to allow custom registration of ops. + Args: + sharding_spec_class(type): The ShardingSpec for which we need to add this custom op. + func(Callable): The op to override (ex: torch.bmm) + """ + class_name = sharding_spec_class.__qualname__ + if class_name not in _CUSTOM_SHARDING_SPEC_OPS: + _CUSTOM_SHARDING_SPEC_OPS[class_name] = {} + return functools.partial( + _decorator_func, + op=func, + op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name] + ) + + +@dataclass +class EnumerableShardingSpec(ShardingSpec): + """ + This is a type of PlacementSpec that allows users to specify a generic + sharding scheme by enumerating exactly how each shard is laid out. + + Args: + shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing + each shard. Note that none of the shards should overlap. + """ + + shards: List[ShardMetadata] + + def __post_init__(self): + if len(self.shards) == 0: + raise ValueError(f'Empty shard list provided: {self.shards}') + + # Validate each shard has same rank. + rank = -1 + for shard in self.shards: + if rank != -1 and rank != len(shard.shard_offsets): + raise ValueError(f'Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}') + rank = len(shard.shard_offsets) + + validate_non_overlapping_shards_metadata(self.shards) + + def build_metadata(self, + tensor_sizes: torch.Size, + tensor_properties: sharded_tensor_meta.TensorProperties, + ) -> sharded_tensor_meta.ShardedTensorMetadata: + # check if shards form a valid tensor + check_tensor(self.shards, tensor_sizes) + return sharded_tensor_meta.ShardedTensorMetadata( + self.shards, + tensor_sizes, + tensor_properties + ) + + def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor": + # TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec + raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!") + + +def _infer_sharding_spec_from_shards_metadata(shards_metadata): + """ + Infer the sharding spec from the metadata of each shard of a ShardedTensor. + If the tensor is sharded only on one dimension, we can then verify whether it's + a ChunkShardingSpec or not. The way to verify it is to first get the total length + and perform a chunk sharding with the given placements to see if we can have the + same chunk size as the given shards_metadata. If not, we assume it's enum sharded. + + Args: + shards_metadata (List[ShardMetadata]): List of Metadata of local shards. + + Returns: + A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding + spec for one sharded tensor. + """ + placements = [] + chunk_sharding_dim = None + chunk_offset_list = [] + shard_size_list = [] + shard_offset_list = [] + # collect local shard metadatas from the global sharded_tensor_metadata + for shard_metadata in shards_metadata: # type: ignore[attr-defined] + placements.append(shard_metadata.placement) + local_offsets = shard_metadata.shard_offsets + chunk_offset_list.append(sum(local_offsets)) + shard_size_list.append(shard_metadata.shard_sizes) + shard_offset_list.append(shard_metadata.shard_offsets) + shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0] + # If the offset is [0, 0, ..., 0] (all zeros), + # we cannot decide whether how the tensor is sharded. + if len(shard_dims) == 0: + continue + # If the offset is [0, N, .,0, M, 0, .., 0], + # we are sure it's sharded by more than one dimension. + if len(shard_dims) != 1: + chunk_sharding_dim = None + break + # If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just + # one dimension, we need to make sure all ranks share the same dimension. + if not chunk_sharding_dim: + chunk_sharding_dim = shard_dims[0] + elif chunk_sharding_dim != shard_dims[0]: + chunk_sharding_dim = None + break + + if chunk_sharding_dim is not None: + # Ensure we infer the correct placement order from offsets + placements = [ + x for _, x in sorted(zip(chunk_offset_list, placements), key=lambda e: e[0]) + ] + + from .chunk_sharding_spec import ChunkShardingSpec + chunk_spec = ChunkShardingSpec( + dim=chunk_sharding_dim, + placements=placements, + ) + + shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list]) + shard_total_length = sum(shard_sizes) + shard_offsets = sorted([x[chunk_sharding_dim] for x in shard_offset_list]) + + chunks = len(placements) + split_size = get_split_size(shard_total_length, chunks) + chunk_shard_sizes = sorted( + [ + get_chunked_dim_size(shard_total_length, split_size, idx) + for idx in range(chunks) + ] + ) + # Should match ChunkShardingSpec offsets calculation + chunk_shard_offsets = [split_size * idx for idx in range(chunks)] + if shard_sizes == chunk_shard_sizes and shard_offsets == chunk_shard_offsets: + return chunk_spec + return EnumerableShardingSpec(shards_metadata) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..a96bc1c25fdac50818468811912f4e1f9e1ce242 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py @@ -0,0 +1,202 @@ +from dataclasses import dataclass +import torch +import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta +from torch.distributed._shard.metadata import ShardMetadata +from torch.distributed._shard.sharded_tensor.shard import Shard +from torch.distributed._shard.sharded_tensor.utils import ( + _parse_and_validate_remote_device +) +from torch.distributed._shard._utils import narrow_tensor +import torch.distributed as dist +import torch.distributed.distributed_c10d as distributed_c10d +from typing import List, Union, TYPE_CHECKING +from ._internals import ( + get_chunked_dim_size, + get_split_size, +) + +from .api import ShardingSpec + +if TYPE_CHECKING: + # Only include ShardedTensor when do type checking, exclude it + # from run-time to resolve circular dependency. + from torch.distributed._shard.sharded_tensor import ShardedTensor + +@dataclass +class ChunkShardingSpec(ShardingSpec): + """ + This is a type of PlacementSpec that defines the placement as being sharded + across multiple devices. In particular, it represents sharding a Tensor + along a single dimension into equal chunks (similar to :meth:`torch.chunk`). + + The semantics of how a tensor is partitioned is inline with + :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the + specified ``dim`` and ``chunks`` in torch.chunk is the number of elements + in the placement specified. + + Args: + dim (int or str): + The dimension to shard on, could be an integer representing the + dimension or a string in case of named tensors where dimensions are + named. Note that named tensor support is not added yet. + placement(List[Union[_remote_device, str]]): + Specifies the placement of each shard of the Tensor. The size of + the list represents the number of shards to be created. This could + be a list of + :class:`torch.distributed._remote_device`'s. This list + could also contain a string which represents remote + device as accepted by + :class:`torch.distributed._remote_device` + """ + + ShardingDim = Union[int, str] + + dim: ShardingDim + placements: List[Union[torch.distributed._remote_device, str]] + + def __post_init__(self): + self._verify_dim(self.dim) + for i, remote_device in enumerate(self.placements): + if not isinstance(remote_device, torch.distributed._remote_device): + self.placements[i] = torch.distributed._remote_device(remote_device) + + @staticmethod + def _verify_dim(dim): + # Validate the sharding spec. + # TODO: support named dimension + if isinstance(dim, str): + raise NotImplementedError( + "ChunkShardingSpec does not support named dimension yet!" + ) + + if not isinstance(dim, int): + raise ValueError( + f"Sharding dim needs to be an integer, found: {dim}" + ) + + def build_metadata(self, + tensor_sizes: torch.Size, + tensor_properties: sharded_tensor_meta.TensorProperties, + ) -> sharded_tensor_meta.ShardedTensorMetadata: + tensor_num_dim = len(tensor_sizes) + + self._verify_dim(self.dim) + if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator] + raise ValueError(f"Invalid sharding dim: {self.dim}") + + shards_metadata = [] + sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index] + chunks = len(self.placements) + split_size = get_split_size(sharding_dim_size, chunks) + for idx, placement in enumerate(self.placements): + # generate ShardMetadata for each placement device + chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + shard_size = list(tensor_sizes) + current_offsets = [0] * tensor_num_dim + current_offsets[self.dim] = split_size * idx # type: ignore[index] + shard_size[self.dim] = chunked_dim_size # type: ignore[index] + + shard_metadata = ShardMetadata( + shard_offsets=current_offsets, + shard_sizes=shard_size, + placement=placement, + ) + shards_metadata.append(shard_metadata) + + return sharded_tensor_meta.ShardedTensorMetadata( + shards_metadata, + tensor_sizes, + tensor_properties + ) + + + def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor": + """ + Args: + src_rank: group rank relative to ``process_group`` + + N.B. If ``process_group`` is None, ``src_rank`` is a global rank. + """ + # relative imports to avoid circular dependency + from torch.distributed._shard.sharded_tensor import ( + ShardedTensor + ) + tensor_properties = sharded_tensor_meta.TensorProperties( + dtype=tensor.dtype, + layout=tensor.layout, + requires_grad=tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=tensor.is_pinned() + ) + current_rank = dist.get_rank(process_group) + tensor_meta = self.build_metadata(tensor.size(), tensor_properties) + local_shards = [] + local_tensor = None + local_metadata = None + tensors_to_scatter = [None] * dist.get_world_size(process_group) + + sharding_dim_size = tensor.size()[self.dim] # type: ignore[index] + chunks = len(self.placements) + split_size = get_split_size(sharding_dim_size, chunks) + scatter_shape = list(tensor.size()) + scatter_shape[self.dim] = split_size # type: ignore[index] + + for shard_meta in tensor_meta.shards_metadata: + rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement) + if current_rank == src_rank: + # Reshape to get shard for this rank and we don't want autograd + # recording here for the narrow op and 'local_shard' should be a + # leaf variable in the autograd graph. + narrowed_tensor = narrow_tensor(tensor, shard_meta) + if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index] + # for the last shard that might be smaller to other shards + # resize the narrowed tensor to the same size and use it for + # the scatter collective as dist.scatter requires same size + # inputs on every rank + tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape) + else: + tensor_to_scatter = narrowed_tensor.detach().clone().contiguous() + + tensors_to_scatter[rank] = tensor_to_scatter + + if current_rank == rank: + local_tensor = torch.empty( + scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device) + local_metadata = shard_meta + + # each rank should have local_tensor and local_metadata initialized if we build + # the metadata list in a correct way. + assert local_tensor is not None + assert local_metadata is not None + + # Scatter the shards to all ranks in the pg + # scatter takes the global rank as ``src`` + src_for_scatter = src_rank + if process_group is not None and process_group is not distributed_c10d._get_default_group(): + src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter) + + dist.scatter( + local_tensor, + scatter_list=tensors_to_scatter if current_rank == src_rank else None, + src=src_for_scatter, + group=process_group + ) + + if list(local_tensor.size()) != local_metadata.shard_sizes: + # detach again after receiving to ensure local shards remain a leaf node + local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach() + + # Sync requires_grad to local_shard. + local_tensor.requires_grad = tensor.requires_grad + + local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata)) + + st = ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, + tensor_meta, + process_group=process_group) + + # Manually set sharding_spec + st._sharding_spec = self + + return st diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6047a7fdf4aa38a16c14d3c8dacac3aaa4947bb7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64ac6ea13dd2fa5b98debc5c19a5a3246b6426d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a869f2059dd954486cb3450c775e440a161a5fa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b188bed7cf05ea9bb3a8a6ad5ef7baa859b21bd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..24727005870de8373074acea3562aab0814455e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py @@ -0,0 +1,349 @@ + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharded_tensor._ops._common import _sharded_op_common +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec._internals import ( + get_chunk_sharding_params, + get_chunked_dim_size, + get_split_size, +) +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import ( + _all_gather_base, + all_reduce, + all_to_all_single, +) + + +def _chunk_sharding_spec_check(spec, op): + """ + For the given op implementation check if the sharding spec is ChunkShardingSpec. + """ + if not isinstance(spec, ChunkShardingSpec): + raise NotImplementedError( + f"Only ChunkShardingSpec supported for '{op.__name__}'." + ) + + +def _register_sharded_op_on_local_tensor( + op, early_stop_func=None, extra_check=None, customized_func=None +): + """ + Handles ``__torch_function__`` dispatch for ops which are performed on + the single local tensor of the sharded tensor such as op like + ``torch.nn.functional.softmax`` or ``torch.Tensor.view``. + + For more complicated ops, a customized func can be used to generate + the new local tensor, sharding spec and sharded tensor size. + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + customized_func (Callable, optional): the func for customized logic + to generate the new local tensor, sharding spec and sharded tensor size. + Default: if ``None``, we simply lower to the real op call with + the single local tensor of the st. + + Return: + func (Callable): registered implementation for sharded op for + ``__torch_function__`` dispatch. + """ + + @custom_sharding_spec_op(ChunkShardingSpec, op) + @_sharded_op_common(op, early_stop_func, extra_check) + def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None): + st = args[0] + sharding_spec = st.sharding_spec() + if len(st.local_shards()) != 1: + raise TypeError( + f"torch function '{op.__name__}', with args: {args} and " + f"kwargs: {kwargs} only supported for single local tensor!" + ) + st_size = st.size() + if customized_func: + local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg) + else: + args = (st.local_tensor(), *args[1:]) + local_tensor = op(*args, **kwargs) + return ShardedTensor._init_from_local_tensor( + local_tensor.contiguous(), + sharding_spec, + st_size, # type: ignore[arg-type] + process_group=pg, + init_rrefs=st._init_rrefs, + ) + + +def _handle_col_wise_sharding_base( + op_func, + col_dim, + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + mode=None, + gathered_per_sample_weights=None, + gathered_offsets=None, + padding_idx=None, +): + """ + For col-wise sharding of weight, lots of logic are common. + So we extract the common logic and put in this function: + Step 1. To get input from each rank and + Step 2. To perform the op on the concatenated tensor. + Step 3. To distribute results to each rank with col rearrangement. + Step 4. To concatenate all results from all ranks. + + Args: + op_func: operator which is applied to the input tensor. + col_dim: dim of result tensor after the operation. + input: tensor to be applied op on. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise sharded weight tensor. + pg: process group. + gathered_inputs: list of inputs from all ranks. If specified, we + don't need to communicate with each rank any more. + mode: aggregation mode of EmbeddingBag. + gathered_per_sample_weights: per_sample_weights across all ranks. + gathered_offsets: offsets across all ranks. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + + Return: final result of input being applied with the op. + """ + # run the operator's function for all the inputs. + results = [] + for i, inp in enumerate(gathered_inputs): + if op_func == torch.nn.functional.embedding_bag: + result = op_func( + inp, + local_shard, + offsets=gathered_offsets[i] if gathered_offsets is not None else None, + mode=mode, + per_sample_weights=gathered_per_sample_weights[i] + if gathered_per_sample_weights is not None + else None, + padding_idx=padding_idx, + ) + elif op_func == torch.nn.functional.embedding: + result = op_func( + inp, + local_shard, + padding_idx=padding_idx, + ) + else: + result = op_func(inp, local_shard) + results.append(torch.transpose(result, 0, col_dim)) + + # Distribute results to each rank with col rearrangement. + output = _result_distribute_with_col_rearrange( + results, input, world_size, weight, pg + ) + + # transpose the output and return result. + return torch.transpose(output, 0, col_dim) + + +def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg): + """ + For col-wise sharding of weight, we need to distribute + results to each rank. We do them in this function. + Note that, if the index in the Sharding Spec is not equal to + the rank number, we need to do the rearrangement based on the + order given by the Sharding Spec (placement). + + Args: + results: results from ops applied to inputs from all ranks. + We need to distribute them back to their original ranks. + input: tensor to be applied op to. + world_size: number of ranks. + weight: sharded weight tensor. + pg: process group. + + Return: column rearranged result. + """ + # Process results and outputs for all2all. + sharding_dim = weight._sharding_spec.dim + sharding_dim_size = weight.size(sharding_dim) + dims = list(results[0].size()) + dims[0] = sharding_dim_size + combined_results = torch.cat(results) + output = torch.empty( + *dims, device=combined_results.device, dtype=combined_results.dtype + ) + + # Compute output splits + split_size = get_split_size(sharding_dim_size, world_size) + output_split_sizes = [0] * world_size + for idx, placement in enumerate(weight._sharding_spec.placements): + output_split_sizes[placement.rank()] = get_chunked_dim_size( + sharding_dim_size, split_size, idx + ) + + # distribute the outputs using all2all. + output = all_to_all_single( + output, combined_results, output_split_sizes=output_split_sizes, group=pg + ) + + # Check if we need to rearrange columns appropriately for output. + rearrange_columns = any( + idx != placement.rank() + for idx, placement in enumerate(weight._sharding_spec.placements) + ) + if not rearrange_columns: + return output + + indices = [] + for placement in weight._sharding_spec.placements: + dim_size = output_split_sizes[placement.rank()] + start = sum( + [ + split_size if i < placement.rank() else 0 + for i, split_size in enumerate(output_split_sizes) + ] + ) + indices += list(range(start, start + dim_size)) + + return output.index_select(0, torch.tensor(indices, device=output.device)) + + +def _handle_max_norm_col_wise( + max_norm, + norm_type, + local_shard, + input, + world_size, + gathered_inputs, + pg, +): + """ + For col-wise sharding of weight, we need to aggregate the + norm across all ranks before we can perform the proper re-norm. + Note that, the max_norm logic is only applied to the embedding + indices that are looked up and not the whole shard. + + Args: + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + local_shard: col-wise shared local weight used for lookup. + input: tensor to be applied op to. + world_size: number of ranks. + gathered_inputs: list of inputs from all ranks. + pg: process group. + + Return: + local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger + than it. + + """ + norm_type = norm_type if norm_type is not None else 2.0 + unique_inp = torch.unique(torch.cat(gathered_inputs)) + local_shard_sum = torch.sum( + torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype + ) + # For col-wise sharding, we need to first aggregate the powered sum + # from each rank first and then calculate the norm. + local_shard_sum = all_reduce(local_shard_sum, group=pg) + local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type) + max_norm_tensor = torch.full( + (local_shard.size(0),), + float("inf"), + dtype=local_shard.dtype, + device=input.device, + ) + max_norm_tensor[unique_inp] = max_norm + local_shard_t = local_shard.t().contiguous() + normalized_tensor = torch.where( + local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm + ) + # Make sure divisor is not zero. + local_shard_norm[local_shard_norm == 0.0] = 1.0 + local_shard_norm_renormed = ( + torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm) + .t() + .contiguous() + ) + return local_shard_norm_renormed + + +def _all_gather_base_input(input, pg): + """ + Use _all_gather_base to get a concatenated input from each rank. + + Args: + input: tensor to be applied op on. + pg: process group. + + Returns: + gathered_inputs: input gathered from each rank and concat by dim 0. + """ + # allgather the inputs first. + gather_inp_size = list(input.size()) + gather_inp_size[0] = input.size(0) * dist.get_world_size(pg) + gather_inp = torch.empty(gather_inp_size, device=input.device, dtype=input.dtype) + return _all_gather_base(gather_inp, input, group=pg) + + +def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank): + """ + Mask the input for embedding look-up for IDs which are not stored + on the current rank. This function also adjust the ``padding_idx`` + so that it is only used on the rank where the corresponding row is + stored. + + Note that, with ``max_norm`` flag on, only weights of rows being + looked up will be re-normed. So we need an extra row for masked ID + so that it does not affect the final result and ``max_norm``. + + Args: + gather_inp: tensor to be applied op on gathered from all ranks. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + weight: weight tensor of Embedding look-up table. + world_size: number of ranks. + rank: # of cuda process. + + Returns: + lookup_input: Tensor of masked input. + padding_idx: adjusted padding_idx. + padding_row: The extra row we used during lookup so that + looking up does not affect ``max_norm``. + """ + (start_pos, chunk_size) = get_chunk_sharding_params( + weight.size(0), world_size, weight._sharding_spec, rank + ) + mask = (gather_inp < start_pos) | (gather_inp >= start_pos + chunk_size) + lookup_input = gather_inp.clone() - start_pos + lookup_input[mask] = chunk_size + if ( + padding_idx is not None + and padding_idx >= start_pos + and padding_idx < (start_pos + chunk_size) + ): + padding_idx = padding_idx - start_pos + else: + padding_idx = None + + # When max_norm is set, it will only re-norm the row being looked up. + padding_row = torch.zeros( + 1, weight.size(1), device=gather_inp.device, dtype=weight.dtype + ) + return lookup_input, padding_idx, padding_row diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c1cb6380439ca14f6e352dc03ea30877e6286c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py @@ -0,0 +1,293 @@ + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import all_gather, reduce_scatter + +from ._common import ( + _all_gather_base_input, + _handle_col_wise_sharding_base, + _handle_max_norm_col_wise, + _handle_row_wise_mask, +) + + +@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding) +def sharded_embedding(types, args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding``. + This method computes a sharded embedding lookup and has the following limitations: + + 1. Supports only sharding of ``weight``. + 2. Supports only ``ChunkShardingSpec``. + 3. Supports only a single local shard per rank. + 4. Supports all specs except for scale_grad_by_freq, sparse, etc. + + Based on the dimension that the weight is sharded on, there are two + algorithms: + + ROWWISE SHARDING + ================ + For row-wise sharding the weight is sharded on dimension 0. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (10 x 17) and W is sharded across + 4 GPUs creating 3 shard of (3 x 17) and 1 shard of (1 x 17). + The algorithm is as follows: + + 1. First the input is all gathered to all ranks, since this is SPMD and + input is actually sharded across all ranks. The inputs then become a + 4 (4 x 6) tensor on each rank. For example if the given input is + tensor([[6, 5, 2, 9, 6, 3], + [3, 1, 2, 4, 7, 6], + [4, 0, 4, 9, 8, 9], + [8, 6, 6, 4, 6, 1]]) + on rank 0. + Then on every rank, we will have this tensor. + If input itself is already replicated, no all-gather will be done. + 2. Next, we mask the ID which are not stored on that rank. + For example on rank 0, we store ID [0, 1, 2]. We only keep the ID + inside the set of numbers. The rest of them will be masked to an extra row. + The masked matrix will be used for embedding look up and is like: + tensor([[4, 4, 2, 4, 4, 4], + [4, 1, 2, 4, 4, 4], + [4, 0, 4, 4, 4, 4], + [4, 4, 4, 4, 4, 1]]) + The reason of having an extra row (aka, number 4 in the example) is + because when max_norm is specified only weight which has looked will + be re-normed so mask IDs whose embeddings are not stored in current + rank will to an extra row will ensure max_norm still works as expected. + 3. If max_norm is specified, the extra row guarantees that the mask ID will + not affect the behavior of weigh re-norm. + + COLWISE SHARDING + ================ + For col-wise sharding the weight is sharded on dimension 1. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2). + The algorithm is as follows: + + 1. First the input is broadcasted to all ranks, since this is SPMD we + actually do an all_gather for all the inputs resulting in 4 (4 x 6) + inputs on each rank. + 2. Next we perform local embedding lookup operation by apply each + input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last). + This results in 4 (5 x 6 x 4) ((2 x 6 x 4) for the last) matrices + on each rank. We transpose dim 0 and dim 2. + 3. Next, we concat these 4 matrices and perform an all2all to share the + appropriate (5 x 6 x 4) or (2 x 6 x 4) matrices to each rank. + 4. Now, each rank receives a (17 x 6 x 4) matrix which is basically the + size of the result we need. + 5. If placements are not in order any appropriate rearrangement of columns + are done for the (17 x 6 x 4) matrix and finally we transpose the + dim 0 and dim 2 again. + 6. If max_norm is specified, we manually sum up the norm and renorm. Because + the renorm must be in place, we need to override the local_shard to mimic + this behavior. + """ + # Validate input params + _validate_embedding_param(args, kwargs) + + input = args[0] + weight = args[1] + max_norm = kwargs.get("max_norm") + norm_type = kwargs.get("norm_type") + padding_idx = kwargs.get("padding_idx") + + local_shard = weight.local_tensor().contiguous() + sharding_dim = weight._sharding_spec.dim + world_size = dist.get_world_size(pg) + rank = dist.get_rank(pg) + + if sharding_dim == 1: + output, local_shard = _handle_col_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg + ) + weight.local_shards()[0].tensor = local_shard + return output + elif sharding_dim == 0: + return _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + max_norm, + norm_type, + padding_idx, + rank, + pg, + ) + else: + raise RuntimeError( + f"nn.Embedding weight sharded on dim {sharding_dim} not supported!" + ) + + +def _validate_embedding_param(args, kwargs): + """ + Validate input params of sharded embedding op. + + Args: + input: list of ID used for lookup. + weight: sharded weight tensor. + kwargs: same as normal Embedding. + + Return: None. + """ + + input = args[0] + weight = args[1] + max_norm = kwargs.get("max_norm") + scale_grad_by_freq = kwargs.get("scale_grad_by_freq") + sparse = kwargs.get("sparse") + + # Validate types + if not isinstance(input, torch.Tensor): + raise TypeError("input need to be torch.Tensor") + if not isinstance(weight, ShardedTensor): + raise TypeError("weight needs to be ShardedTensor") + weight_size = weight.size() + if len(weight_size) != 2: + raise ValueError("Weight needs to have exactly 2 dims") + if int(torch.min(input).item()) < 0: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.min(input).item()), + weight_size[1], + ) + if int(torch.max(input).item()) >= weight_size[0]: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.max(input).item()), + weight_size[1], + ) + if scale_grad_by_freq: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!' + ) + if sparse: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "sparse" not supported!' + ) + if max_norm and max_norm <= 0.0: + raise ValueError('"max_norm" must be larger than zero!') + + if not isinstance(weight._sharding_spec, ChunkShardingSpec): + raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!") + if len(weight.local_shards()) != 1: + raise ValueError("Only one local shard supported!") + + +def _handle_col_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg +): + """ + Entry-point function to handle the logic of col-wise sharding of weight + for embedding. (Detailed explanations of the logic can be found in + the comment for sharded_embedding.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise shared local weight used for lookup. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + pg: process group. + + Returns: final result of lookup. + """ + # allgather the inputs first for non Replicated Tensor. + gathered_inputs = all_gather(input, group=pg) + + if max_norm is not None: + # max_norm changes the weight in-place + local_shard = _handle_max_norm_col_wise( + max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg + ) + + output = _handle_col_wise_sharding_base( + torch.nn.functional.embedding, + len(input.size()), + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + padding_idx=padding_idx, + ) + return (output, local_shard) + + +def _handle_row_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg +): + """ + Entry-point function to handle the logic of row-wise sharding of weight + for embedding. (Detailed explanations of the logic can be found in + the comment for sharded_embedding.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: row-wise shared local weight used for lookup. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + rank: # of cuda process. + pg: process group. + + Returns: final result of lookup. + """ + # allgather the inputs first for non Replicated Tensor. + gather_inp = _all_gather_base_input(input, pg) + + # Mask the input according to sharding spec. + lookup_input, padding_idx, padding_row = _handle_row_wise_mask( + gather_inp, padding_idx, weight, world_size, rank + ) + + # When input is a large tensor, the value of weight is changed. + # This is a walk-around for now. GH issue: #81717 + if max_norm is not None: + torch.nn.functional.embedding( + torch.unique(lookup_input)[:-1], + local_shard, + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + ) + max_norm = None + + local_input_embeddings = torch.nn.functional.embedding( + lookup_input, + torch.cat([local_shard, padding_row]), + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + ) + + # TODO: Make the result a PartialTensor. + local_shards = local_input_embeddings.chunk(pg.size()) + return reduce_scatter( + torch.empty_like(local_shards[0]), + list(local_shards), + group=pg, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6ea1d705d59dba53eee215913000db4b4b333c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py @@ -0,0 +1,476 @@ + +from typing import cast, List + +import torch +import torch.distributed as dist +from torch._C._distributed_c10d import ReduceOp +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import all_gather, reduce_scatter + +from ._common import ( + _all_gather_base_input, + _handle_col_wise_sharding_base, + _handle_max_norm_col_wise, + _handle_row_wise_mask, +) + + +@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding_bag) +def sharded_embedding_bag(types, args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding_bag``. + This method computes a sharded embedding bag aggregation and has the following limitations: + + 1. Supports only sharding of ``weight``. + 2. Supports only ``ChunkShardingSpec``. + 3. Supports only a single local shard per rank. + 4. Supports all specs except for scale_grad_by_freq, sparse, etc. + + Based on the dimension that the weight is sharded on, there are two + algorithms: + + ROWWISE SHARDING + ================ + For row-wise sharding the weight is sharded on dimension 0. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 4 shard of (4 x 17). + The algorithm is as follows: + + 1. First the input is all gathered to all ranks, since this is SPMD and + input is actually sharded across all ranks. The inputs then become a + 4 (4 x 6) tensor on each rank. For example if the given input is + tensor([[6, 5, 2, 9, 6, 3], + [3, 1, 2, 4, 7, 6], + [4, 0, 4, 9, 8, 9], + [8, 6, 6, 4, 6, 1]]) + on rank 0. + Then on every rank, we will have this tensor. + If input itself is already replicated, no all-gather will be done. + 2. Next, we mask the ID which are not stored on that rank. + For example on rank 0, we store ID [0, 1, 2]. We only keep the ID + inside the set of numbers. The rest of them will be masked to an extra row. + The masked matrix will be used for embedding look up and is like: + tensor([[4, 4, 2, 4, 4, 4], + [4, 1, 2, 4, 4, 4], + [4, 0, 4, 4, 4, 4], + [4, 4, 4, 4, 4, 1]]) + 3. If ``max_norm`` is specified, the extra row guarantees that the mask ID will + not affect the behavior of weigh re-norm. + 4. The example above only happens in one rank and each rank does a very similar thing. + For "Mean" mode we need to divide by either column size (2D) or the interval length + defined by the offset (excluding the row specified in ``padding_idx``). + We also need to mask the unexisting row to neg Inf so that negative value does not + gets wiped out in the "Max" mode. + + COLWISE SHARDING + ================ + For col-wise sharding the weight is sharded on dimension 1. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2). + The algorithm is as follows: + + 1. First the input is broadcasted to all ranks, since this is SPMD we + actually do an all_gather for all the inputs resulting in 4 (4 x 6) + inputs on each rank. + 2. Next we perform local embedding bag operation under the given mode by + apply each input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last). + This results in 4 (5 x 4) ((2 x 4) for the last) matrices on each rank. + We transpose the aggregation result. + 3. Next, we concatenate these 4 matrices and perform an all2all to share the + appropriate (5 x 4) or (2 x 4) matrices to each rank. + 4. Now, each rank receives a (17 x 4) matrix which is basically the + size of the result we need. + 5. If placements are not in order any appropriate rearrangement of columns + are done for the (17 x 4) matrix and finally we transpose the output again. + 6. If max_norm is specified, we manually sum up the norm and renorm. Because + the renorm must be in place, we need to override the local_shard to mimic + this behavior. + """ + # Validate input params + _validate_embedding_bag_param(args, kwargs) + + input = args[0] + weight = args[1] + offsets = kwargs.get("offsets") + per_sample_weights = kwargs.get("per_sample_weights") + mode = kwargs.get("mode") + max_norm = kwargs.get("max_norm") + norm_type = kwargs.get("norm_type") + include_last_offset = kwargs.get("include_last_offset") + padding_idx = kwargs.get("padding_idx") + + local_shard = weight.local_tensor().contiguous() + sharding_dim = weight._sharding_spec.dim + world_size = dist.get_world_size(pg) + rank = dist.get_rank(pg) + if include_last_offset: + offsets = offsets[:-1] + + if sharding_dim == 1: + output, local_shard = _handle_col_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + pg, + ) + weight.local_shards()[0].tensor = local_shard + return output + elif sharding_dim == 0: + return _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + rank, + pg, + ) + else: + raise RuntimeError( + f"nn.EmbeddingBag weight sharded on dim {sharding_dim} not supported!" + ) + + +def _validate_embedding_bag_param(args, kwargs): + """ + Validate input params of sharded embeddingBag op. + + Args: + input: list of ID used for lookup and aggregation. + weight: sharded weight tensor. + kwargs: same as normal EmbeddingBag. + + Return: None. + """ + + input = args[0] + weight = args[1] + offsets = kwargs.get("offsets") + per_sample_weights = kwargs.get("per_sample_weights") + mode = kwargs.get("mode") + max_norm = kwargs.get("max_norm") + scale_grad_by_freq = kwargs.get("scale_grad_by_freq") + sparse = kwargs.get("sparse") + include_last_offset = kwargs.get("include_last_offset") + + # Validate types + if not isinstance(input, torch.Tensor): + raise TypeError("input need to be torch.Tensor") + if offsets is not None and not isinstance(offsets, torch.Tensor): + raise TypeError("offsets need to be torch.Tensor") + if per_sample_weights is not None and not isinstance( + per_sample_weights, torch.Tensor + ): + raise TypeError("per_sample_weights need to be torch.Tensor") + if not isinstance(weight, ShardedTensor): + raise TypeError("weight needs to be ShardedTensor") + if len(input.size()) > 2: + raise ValueError("Input more than 2 dims not supported") + weight_size = weight.size() + if len(weight_size) != 2: + raise ValueError("Weight needs to have exactly 2 dims") + if int(torch.min(input).item()) < 0: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.min(input).item()), + weight_size[1], + ) + if int(torch.max(input).item()) >= weight_size[0]: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.max(input).item()), + weight_size[1], + ) + if offsets is not None and len(input.size()) != 1: + raise ValueError("Input dimension needs to be exactly 1 dim") + if len(input.size()) == 1 and offsets is None: + raise ValueError("offsets is required for 1D input") + if per_sample_weights is not None and per_sample_weights.size() != input.size(): + raise ValueError( + f"per_sample_weights size {per_sample_weights.size()} not equal to input size {input.size()}" + ) + if mode is None: + mode = "mean" + if mode not in ["sum", "mean", "max"]: + raise ValueError(f"mode '{mode}' is not supported") + if scale_grad_by_freq: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!' + ) + if sparse: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "sparse" not supported!' + ) + if include_last_offset and offsets is None: + raise ValueError('offsets is required for flag "include_last_offset"!') + if include_last_offset and cast(List[int], offsets)[-1] != input.size(0): + raise ValueError( + 'offsets need to have the input size in the end when the flag "include_last_offset" is on!' + ) + + if max_norm and max_norm <= 0.0: + raise ValueError('"max_norm" must be larger than zero!') + + if not isinstance(weight._sharding_spec, ChunkShardingSpec): + raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!") + if len(weight.local_shards()) != 1: + raise ValueError("Only one local shard supported!") + + +def _handle_col_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + pg, +): + """ + Entry-point function to handle the logic of col-wise sharding of weight + for embeddingBag. (Detailed explanations of the logic can be found in + the comment for sharded_embedding_bag.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise shared local weight used for lookup. + offsets: list of start positions of each bag for 1D input. + per_sample_weights: weights for weighted sum mode. + mode: aggregation method of each bag. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + pg: process group. + + Return: + output: final result of lookup and aggregation. + local_shard: col-wise shared local weight used for lookup. + If max_norm, this will be the renormed weight. + """ + # allgather the special input of embedding bag first. + ( + gathered_inputs, + gathered_per_sample_weights, + gathered_offsets, + ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg) + + if max_norm is not None: + # max_norm changes the weight in-place + local_shard = _handle_max_norm_col_wise( + max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg + ) + + output = _handle_col_wise_sharding_base( + torch.nn.functional.embedding_bag, + 1, + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + mode=mode, + gathered_per_sample_weights=gathered_per_sample_weights, + gathered_offsets=gathered_offsets, + padding_idx=padding_idx, + ) + return (output, local_shard) + + +def _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + rank, + pg, +): + """ + Entry-point function to handle the logic of row-wise sharding of weight + for embeddingBag. (Detailed explanations of the logic can be found in + the comment for sharded_embedding_bag.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: row-wise shared local weight used for lookup. + offsets: list of start positions of each bag for 1D input. + per_sample_weights: weights for weighted sum mode. + mode: aggregation method of each bag. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + rank: # of cuda process. + pg: process group. + + Returns: + gathered_output: final result of lookup and aggregation. + """ + if input.dim() > 1 and per_sample_weights is None: + # allgather the inputs first for non Replicated Tensor. + gather_inp = _all_gather_base_input(input, pg) + else: + ( + gathered_inputs, + gathered_per_sample_weights, + gathered_offsets, + ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg) + cat_dim = 0 if input.dim() != 1 else -1 + gather_inp = torch.cat(gathered_inputs, dim=cat_dim) + if per_sample_weights is not None: + per_sample_weights = torch.cat(gathered_per_sample_weights, dim=cat_dim) + offset_add = 0 if input.dim() > 1 else input.size(0) + if offsets is not None: + offsets_list = torch.cat( + [gathered_offsets[i] + (offset_add * i) for i in range(pg.size())], + dim=cat_dim, + ) + + # Mask the input according to sharding spec. + lookup_input, padding_local, padding_row = _handle_row_wise_mask( + gather_inp, padding_idx, weight, world_size, rank + ) + if mode == "max": + padding_row[:] = -float("Inf") + + # When input is a large tensor, the value of weight is changed. + # This is a walk-around for now. GH issue: #81717. + if max_norm is not None: + torch.nn.functional.embedding_bag( + torch.unique(lookup_input)[:-1], + local_shard, + offsets=torch.tensor([0], device=local_shard.device, dtype=torch.long), + mode=mode, + per_sample_weights=None, + max_norm=max_norm, + norm_type=norm_type, + padding_idx=padding_local, + ) + max_norm = None + result = torch.nn.functional.embedding_bag( + lookup_input, + torch.cat([local_shard, padding_row]), + offsets=offsets_list if offsets is not None else offsets, # type: ignore[possibly-undefined] + mode=mode if mode != "mean" else "sum", + per_sample_weights=per_sample_weights, + max_norm=max_norm, + norm_type=norm_type, + padding_idx=padding_local, + ) + + op = ReduceOp.SUM if mode != "max" else ReduceOp.MAX + # TODO: Make the result a PartialTensor and move the logic below there. + local_shards = result.chunk(pg.size()) + result = reduce_scatter( + torch.empty_like(local_shards[0]), + list(local_shards), + op=op, + group=pg, + ) + + # For Mean, we cannot do the division until very end because the sum of means + # not equal to the mean of sum. (Divisor is different) + if mode == "mean": + if input.dim() > 1: + padding_idx = padding_idx if padding_idx is not None else -1 + split_sizes = torch.sum( + torch.ne(input, padding_idx), dim=-1, dtype=local_shard.dtype + ) + else: + split_sizes = torch.cat( + ( + offsets[1 : offsets.size(0)] - offsets[0:-1], + (input.size(0) - offsets[-1]).unsqueeze(0), + ), + dim=-1, + ) + return torch.div(result, split_sizes.unsqueeze(1)) + + # Return the appropriate local result. + return result + + +def _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg): + """ + In case we need to gather input and all other parameters of embeddingBag + ops, we need to stack all input together to perform ``all_gather`` + collective communication just once. + + Note that since offsets does not share the same size as input and + is always smaller than input, we resize it during the communication. + + Args: + input: tensor to be applied op on. + per_sample_weights: weights for weighted sum mode. + offsets: when input is 1D. offsets determines the starting + index position of each bag (sequence) in input. + pg: process group. + + Returns: + gathered_inputs: list of input tensor gathered from each rank. + gathered_per_sample_weights: list of per_sample_weights from each rank. + gathered_offsets: list of offsets from each rank. + """ + input_to_gather = [input] + if per_sample_weights is not None: + input_to_gather.append(per_sample_weights) + if offsets is not None: + input_to_gather.append(offsets.clone().resize_(input.size())) + gathered_inputs = all_gather(torch.stack(input_to_gather), group=pg) + + gathered_per_sample_weights = None + if per_sample_weights is not None: + gathered_per_sample_weights = [t[1] for t in gathered_inputs] + gathered_offsets = None + if offsets is not None: + idx = 2 if per_sample_weights is not None else 1 + gathered_offsets = [ + t[idx].resize_(offsets.size()).to(offsets.dtype) for t in gathered_inputs + ] + gathered_inputs = [t[0].to(input.dtype) for t in gathered_inputs] + return gathered_inputs, gathered_per_sample_weights, gathered_offsets diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e6b1662589c47b81534d5d04493d6e68f89b12f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py @@ -0,0 +1,12 @@ +# Keep old package for BC purposes, this file should be removed once +# everything moves to the `torch.distributed._shard` package. +import sys +import torch +import warnings + +from torch.distributed._shard.sharded_tensor import * # noqa: F403 +warnings.warn( + "torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead", + DeprecationWarning +) +sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb32f7ad5ab41b7860a25f04f7789850420bbf9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a07470a0cfd4039504c2358a402242e429434a49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/__init__.py @@ -0,0 +1,3 @@ +from .join import Join +from .join import Joinable +from .join import JoinHook diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/join.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/join.py new file mode 100644 index 0000000000000000000000000000000000000000..7c1aa3cac5ac5525faa1b6d48529505360f385a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/join.py @@ -0,0 +1,346 @@ +import warnings +from abc import ABC, abstractmethod +from types import TracebackType +from typing import Any, List, NamedTuple, Optional, Type + +import torch +import torch.distributed as dist + +__all__ = ['JoinHook', 'Joinable', 'Join'] + +class JoinHook: + r""" + This defines a join hook, which provides two entry points in the join context manager. + + Entry points : a main hook, which is called repeatedly while there exists a non-joined + process, and a post-hook, which is called once all processes have joined. + + To implement a join hook for the generic join context manager, define a + class that inherits from :class:`JoinHook` and override ``main_hook()`` and + ``post_hook()`` as appropriate. + """ + + def main_hook(self) -> None: + r"""Call this hook while there exists a non-joined process to shadow collective communications in a training iteration. + + Training iteration i.e., in one forward pass, backward pass, and optimizer step. + """ + ... + + def post_hook(self, is_last_joiner: bool) -> None: + r""" + Call hook after all processes have joined. + + It is passed an additional ``bool`` argument ``is_last_joiner``, which indicates if the rank is one of the last to join. + + Arguments: + is_last_joiner (bool): ``True`` if the rank is one of the last to + join; ``False`` otherwise. + """ + ... + + +class Joinable(ABC): + r""" + This defines an abstract base class for joinable classes. + + A joinable class + (inheriting from :class:`Joinable`) should implement :meth:`join_hook`, + which returns a :class:`JoinHook` instance, in addition to + :meth:`join_device` and :meth:`join_process_group` that return device and + process group information, respectively. + """ + + @abstractmethod + def __init__(self): + super().__init__() + self._join_config = _JoinConfig.construct_disabled_join_config() + + @abstractmethod + def join_hook(self, **kwargs) -> JoinHook: + r""" + Return a :class:`JoinHook` instance for the given :class:`Joinable`. + + Arguments: + kwargs (dict): a :class:`dict` containing any keyword arguments + to modify the behavior of the join hook at run time; all + :class:`Joinable` instances sharing the same join context + manager are forwarded the same value for ``kwargs``. + """ + ... + + @property + @abstractmethod + def join_device(self) -> torch.device: + r"""Return the device from which to perform collective communications needed by the join context manager.""" + ... + + @property + @abstractmethod + def join_process_group(self) -> Any: + r"""Returns the process group for the collective communications needed by the join context manager itself.""" + ... + + +class _JoinConfig(NamedTuple): + r"""This includes all fields needed from a :class:`Joinable` instance for the join context manager side.""" + + enable: bool + throw_on_early_termination: bool + is_first_joinable: bool + + @staticmethod + def construct_disabled_join_config(): + r"""Return a :class:`_JoinConfig` instance indicating that join-related logic should be disabled. + + e.g. if the caller is not in a join context manager. + """ + return _JoinConfig( + enable=False, + throw_on_early_termination=False, + is_first_joinable=False + ) + + + +class Join: + r""" + This class defines the generic join context manager, which allows custom hooks to be called after a process joins. + + These hooks should shadow the + collective communications of non-joined processes to prevent hanging and + erroring and to ensure algorithmic correctness. Refer to :class:`JoinHook` + for details about the hook definition. + + .. warning:: + The context manager requires each participating :class:`Joinable` to + call the method :meth:`notify_join_context()` before its own per- + iteration collective communications to ensure correctness. + + .. warning:: + The context manager requires that all ``process_group`` attributes in + the :class:`JoinHook` objects are the same. If there are multiple + :class:`JoinHook` objects, then the ``device`` of the first is used. + The process group and device information is used for checking for non- + joined processes and for notifying processes to throw an exception if + ``throw_on_early_termination`` is enabled, both of which using an all- + reduce. + + Arguments: + joinables (List[Joinable]): a list of the participating + :class:`Joinable` s; their hooks are iterated over in the given + order. + + enable (bool): a flag enabling uneven input detection; setting to + ``False`` disables the context manager's functionality and should + only be set when the user knows the inputs will not be uneven + (default: ``True``). + + throw_on_early_termination (bool): a flag controlling whether to throw an + exception upon detecting uneven inputs (default: ``False``). + + Example:: + + >>> import os + >>> import torch + >>> import torch.distributed as dist + >>> import torch.multiprocessing as mp + >>> # xdoctest: +SKIP + >>> import torch.nn.parallel.DistributedDataParallel as DDP + >>> import torch.distributed.optim.ZeroRedundancyOptimizer as ZeRO + >>> from torch.distributed.algorithms.join import Join + >>> + >>> # On each spawned worker + >>> def worker(rank): + >>> dist.init_process_group("nccl", rank=rank, world_size=2) + >>> model = DDP(torch.nn.Linear(1, 1).to(rank), device_ids=[rank]) + >>> optim = ZeRO(model.parameters(), torch.optim.Adam, lr=0.01) + >>> # Rank 1 gets one more input than rank 0 + >>> inputs = [torch.tensor([1.]).to(rank) for _ in range(10 + rank)] + >>> with Join([model, optim]): + >>> for input in inputs: + >>> loss = model(input).sum() + >>> loss.backward() + >>> optim.step() + >>> # All ranks reach here without hanging/erroring + """ + + def __init__( + self, + joinables: List[Joinable], + enable: bool = True, + throw_on_early_termination: bool = False, + **kwargs, + ): + if len(joinables) == 0: + raise ValueError("The join context manager requires at least one joinable") + self._joinables = joinables + self._join_hooks = [joinable.join_hook(**kwargs) for joinable in self._joinables] + self._enable = enable + self._throw_on_early_termination = throw_on_early_termination + self._set_joinable_configs() + self._extract_dist_info() + + def _set_joinable_configs(self) -> None: + r"""Set the :class:`_JoinConfig` of each participating :class:`Joinable`.""" + assert len(self._joinables) > 0 + is_first_joinable = True + for joinable in self._joinables: + joinable._join_config = _JoinConfig( + enable=self._enable, + throw_on_early_termination=self._throw_on_early_termination, + is_first_joinable=is_first_joinable + ) + is_first_joinable = False + + def _extract_dist_info(self) -> None: + r""" + Extract the process group and device information from the joinables. + + If there are multiple joinables, then the context manager uses the + first specified device. + + Preconditions: + ``self._joinables`` is not ``None`` and is non-empty. + + Raises: + ValueError + If there are multiple conflicting ``process_group`` attributes + among the ``Joinable`` objects. + """ + process_group = None + device = None + for joinable in self._joinables: + if process_group is None: + process_group = joinable.join_process_group + elif process_group != joinable.join_process_group: + raise ValueError("Using join context manager with multiple process groups") + if device is None: + device = joinable.join_device + self._process_group = process_group + self._rank = dist.get_rank(self._process_group) + self._device = device + + def __enter__(self): + ... + + def __exit__( + self, + type: Optional[Type[BaseException]], + value: Optional[BaseException], + traceback: Optional[TracebackType] + ): + r""" + Repeatedly runs the main hooks until all processes join; then, runs the post-hooks. + + Raises: + RuntimeError + If ``throw_on_early_termination=True``. + """ + if not self._enable or type: + return # propagate the exception directly if one was raised + + all_procs_joined = False + is_last_joiner = True + + i = 0 + WARN_THRESHOLD = 1000 + warnings.simplefilter("once") + + while not all_procs_joined: + if i > WARN_THRESHOLD: + warnings.warn( + "Detected uneven input skew of greater than " + f"{WARN_THRESHOLD}. This means that rank " + f"{self._rank} has at least {WARN_THRESHOLD} " + f"fewer inputs than other currently-active ranks. " + "This level of skew could lead to performance " + "degradation during training." + ) + # Shadow the all-reduce in non-joined processes + num_nonjoined_procs = self._get_num_nonjoined_procs() + if num_nonjoined_procs == 0: + all_procs_joined = True + else: + if self._throw_on_early_termination: + self._notify_procs_to_terminate() + + # Run main hooks + for join_hook in self._join_hooks: + join_hook.main_hook() + + is_last_joiner = False + i += 1 + + # Run post-hooks + for join_hook in self._join_hooks: + join_hook.post_hook(is_last_joiner) + + def _get_num_nonjoined_procs(self): + r"""Return the number of non-joined processes by shadowing an all-reduce in the non-joined processes.""" + num_nonjoined_procs = torch.zeros(1, device=self._device) + dist.all_reduce(num_nonjoined_procs, group=self._process_group) + return num_nonjoined_procs.item() + + def _notify_procs_to_terminate(self): + r"""Schedule an all-reduce to notify non-joined processes to terminate. + + Also raise a ``RuntimeError`` indicating that the current process has exhausted its inputs. + """ + ones = torch.ones(1, device=self._device) + dist.all_reduce(ones, group=self._process_group) + raise RuntimeError(f"Rank {self._rank} exhausted all inputs.") + + @staticmethod + def notify_join_context(joinable: Joinable): + r""" + Notifies the join context manager that the calling process has not yet joined. + + Then, if ``throw_on_early_termination=True``, checks if uneven inputs have been detected + (i.e. if one process has already joined) and throws an exception if so. + + This method should be called from a :class:`Joinable` object before + its per-iteration collective communications. For example, this should + be called at the beginning of the forward pass in + :class:`DistributedDataParallel`. + + Only the first :class:`Joinable` object passed into the context + manager performs the collective communications in this method, and + for the others, this method is vacuous. + + Arguments: + joinable (Joinable): the :class:`Joinable` object calling this + method. + + Returns: + An async work handle for the all-reduce meant to notify the context + manager that the process has not yet joined if ``joinable`` is the + first one passed into the context manager; ``None`` otherwise. + """ + assert hasattr(joinable, "_join_config"), \ + f"Check that the {type(joinable)} constructor calls the " \ + "``Joinable`` constructor" + + join_config = joinable._join_config + # First joinable is responsible for the collective communications + if not join_config.is_first_joinable or not join_config.enable: + return None + + device = joinable.join_device + process_group = joinable.join_process_group + + # Schedule an all-reduce to indicate that the caller has not yet joined + ones = torch.ones(1, device=device) + work = dist.all_reduce(ones, group=process_group, async_op=True) + + if join_config.throw_on_early_termination: + # Check if uneven inputs have been detected + zeros = torch.zeros(1, device=device) + dist.all_reduce(zeros, group=process_group) + should_throw = zeros.item() + if should_throw: + raise RuntimeError( + "Detected at least one rank that exhausted inputs. " + "Throwing across all ranks." + ) + return work diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/averagers.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/averagers.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f8c0800c508837d25017202d87f788ec2b58e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/averagers.py @@ -0,0 +1,120 @@ +import warnings +from abc import ABC, abstractmethod +from typing import Union, Iterable, Dict +import torch +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.utils as utils + +__all__ = ['ModelAverager', 'PeriodicModelAverager'] + +class ModelAverager(ABC): + r"""Base class for all model averagers. + + Args: + process_group: The process group to be used for all-reduce. + If ``None``, the default process group, which + is created by :func:`torch.distributed.init_process_group`, + will be used. (default: ``None``) + """ + + def __init__(self, process_group=None): + self.process_group = ( + process_group if process_group is not None else dist.group.WORLD + ) + self.step = 0 + + @abstractmethod + def average_parameters(self, params): + raise NotImplementedError + + +class PeriodicModelAverager(ModelAverager): + r""" + Averages parameters periodically after the warm-up stage. + + This can be used for running `post-local SGD `_, + by running :class:`~torch.nn.DistributedDataParallel` (DDP) + using the subgroups created by :meth:`~torch.distributed.new_subgroups`. + + Args: + period (int): The number of steps per model averaging. + Usually the period should be greater than ``1`` to reduce the communication cost. + Otherwise, only DDP needs to be used. + warmup_steps (int): The number of warm-up steps. During this stage, + model averaging is skipped. + process_group: The process group to be used for all-reduce. + If ``None``, the default process group, which + is created by :func:`torch.distributed.init_process_group`, + will be used. (default: ``None``) + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch + >>> import torch.distributed as dist + >>> import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD + >>> import torch.distributed.algorithms.model_averaging.averagers as averagers + >>> import torch.nn as nn + >>> + >>> dist.init_process_group("nccl", rank=rank, world_size=16) + >>> torch.cuda.set_device(rank) + >>> module = nn.Linear(1, 1, bias=False).cuda() + >>> model = nn.parallel.DistributedDataParallel( + >>> module, device_ids=[rank], output_device=rank + >>> ) + >>> # Register a post-localSGD communication hook. + >>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100) + >>> model.register_comm_hook(state, post_localSGD_hook) + >>> + >>> # In the first 100 steps, run global gradient averaging like normal DDP at every step. + >>> # After 100 steps, run model averaging every 4 steps. + >>> # Note that ``warmup_steps`` must be the same as ``start_localSGD_iter`` used in ``PostLocalSGDState``. + >>> averager = averagers.PeriodicModelAverager(period=4, warmup_steps=100) + >>> for step in range(0, 200): + >>> optimizer.zero_grad() + >>> loss = loss_fn(output, labels) + >>> loss.backward() + >>> optimizer.step() + >>> # Will average model parameters globally every 4 steps. Thus, + >>> # inter-node communication only occurs every 4 iterations after + >>> # the initial ``warmup_steps`` period. + >>> averager.average_parameters(model.parameters()) + """ + + def __init__( + self, + period, + warmup_steps=0, + process_group=None + ): + super().__init__(process_group) + if warmup_steps < 0: + raise ValueError("Arg ``warmup_steps`` must be a non-negative number.") + self.warmup_steps = warmup_steps + if period < 1: + raise ValueError("Arg ``period`` must be a positive value.") + elif period == 1: + warnings.warn( + "When period is 1, no need to use model averaging because the communication cost " + "of all-reducing parameters will be no less than the cost of all-reducing gradients " + "by DistributedDataParallel in the backward pass. Therefore, only " + "DistributedDataParallel should be used for this case." + ) + self.period = period + + def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]): + """ + Averages parameters or parameter groups of an optimizer if ``step`` is no less than ``warmup_steps``. + + Can be divided by ``period``, where ``step`` is increased by 1 + at each iteration in the training loop. + Args: + params: The parameters of a model or parameter groups of an optimizer. + + """ + if ( + self.step >= self.warmup_steps + and (self.step - self.warmup_steps) % self.period == 0 + ): + utils.average_parameters_or_parameter_groups(params, self.process_group) + self.step += 1 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa1cd2e968db5b551a8bfec64ee9f5a13d0b16b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/algorithms/model_averaging/utils.py @@ -0,0 +1,72 @@ +# flake8: noqa C101 +import itertools +from typing import Union, Iterable, Dict, Iterator + +import torch +import torch.distributed as dist +# The two imports below are not always available depending on the +# USE_DISTRIBUTED compile flag. Make sure they raise import error +# if we're trying to use them. +from torch.distributed import ProcessGroup, group + +__all__ = ["average_parameters", "get_params_to_average", "average_parameters_or_parameter_groups"] + +def average_parameters( + params: Iterator[torch.nn.Parameter], process_group: ProcessGroup +): + """ + Averages all the given parameters. + + For allreduce efficiency, all the parameters are flattened into a contiguous buffer. + Thus, it requires extra memory of the same size as the given parameters. + """ + group_to_use = process_group if process_group is not None else group.WORLD + # Do not update any parameter if not in the process group. + if dist._rank_not_in_group(group_to_use): + return + + params_it1, params_it2 = itertools.tee(params) + # If the input parameters have different data types, + # packing these parameters will trigger an implicit type up-casting. + # The original parameter data types will be restored during the subsequent unpacking. + flat_params = torch.cat([p.data.reshape(-1) for p in params_it1]) + flat_params /= dist.get_world_size(group_to_use) + # Make sure the allreduce will not conflict with any other ongoing process group. + if torch.cuda.is_available(): + torch.cuda.synchronize() + dist.all_reduce(flat_params, group=group_to_use) + + offset = 0 + for p in params_it2: + p.data = flat_params[offset : offset + p.numel()].view_as(p).type_as(p) + offset += p.numel() + + +def get_params_to_average(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]]): + """ + Return a list of parameters that need to average. + + This filters out the parameters that do not contain any gradients. + Args: + params: The parameters of a model or parameter groups of an optimizer. + """ + filtered_params = [] + for param in params: + if isinstance(param, torch.nn.Parameter): + # model.parameters() input + param_data = param + if param_data.grad is not None: + filtered_params.append(param_data) + elif isinstance(param, dict): + # optimizer.param_groups input + for param_data in param["params"]: + if param_data.grad is not None: + filtered_params.append(param_data) + else: + raise NotImplementedError(f"Parameter input of type {type(param)} is not supported") + return filtered_params + + +def average_parameters_or_parameter_groups(params: Union[Iterable[torch.nn.Parameter], Iterable[Dict[str, torch.nn.Parameter]]], process_group: ProcessGroup): + """Averages parameters of a model or parameter groups of an optimizer.""" + average_parameters(iter(get_params_to_average(params)), process_group) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c9b29a750593a812907ce2cf4c800d7d1435bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py @@ -0,0 +1,77 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + +Torchelastic agent and user worker failover contract: + +**TL;DR;**: + +* TE(torchelastic) expects user workers to finish with the 5 minutes drift +* It is better to design DDP app to fail for all workers, rather than a single one. +* TE does not synchronize number of restarts between agents +* TE re-rendezvous does not trigger restart decrease +* When a single agent finishes its job(successfully or not), it will close rendezvous. + If other agents still have workers in progress, they will be terminated. +* Based on above, scale down does not work if at least single agent finishes the job. +* When Scale up is detected by agents, it will not decrease ``max_restarts`` + + +In general TE(torchelastic) can launch arbitrary user code, but there is some +clarifications need to be done around what failover mechanism torchelastic +provides and what failover mechanism it expects from user workers. + +Torchelastic currently supports DDP style applications. That means that +TE expects *ALL* workers finish approximately at the same time. In practice, +it is nearly to impossible to guarantee that all workers in arbitrary +DDP application finish at the time, so TE provides a finalization barrier +that waits for TIMEOUT(5 minutes) for worker finalization. + +**Worker Failure** + +When worker fails, TE will check the number of restarts +available, if there is more than 0 restarts, TE will start a new rendezvous +round and restart the worker process. New rendezvous round will other +TE agents to terminate their workers. + +.. note:: The TE agent does not synchronize restarts between themselves. + When a single agent performs restart, it will trigger a local ``max_restarts`` + decrease, other agent will not decrease their ``max_restarts``. + the user to run the distributed application locally on a dev host. + +A single worker failure can cause the whole cluster to fail: +If a single worker is constantly failing, it will cause the TE agent +``max_restarts`` to go to zero. This will cause an agent to finish its +work and close rendezvous. If there are any other workers on different +agents, they will be terminated. + + +**Re-Rendezvous** + +Re-rendezvous occurs when TE agents detect a new node +trying to joint a cluster. TE will not decrease ``max_restarts``. TE agents +will terminate its workers and start a new rendezvous round. + +Note about DynamicRendezvous(etcd-v2, c10d-experimental): If the rendezvous +has already max_nodes, the new node won't be added to the wait list right +away since there is no need to tear down a rendezvous that is already fully +utilized. The new node will wait until its timeout (600 secs by default) +and periodically check the number of participants. If the number becomes +less than max_nodes, it will be added to the wait list; otherwise, it will time out after 600 secs. + +*Scale up event*. When scale up event happens, torchelastic rendezvous +will detect that there are new nodes trying to join. Torchelastic agent +will stop all workers and perform re-rendezvous. Note: when scale up event +happens, *``max_restarts``* will *not* decrease. + +*Scale down event*. When scale down event happens, rendezvous will not +notify the torchelastic agent about it. If TE agent launched with ``max_restarts=0`` , +it relies on the underlying scheduler to handle job restart. If the ``max_restarts>0`` , +TE agent will terminate workers and start a new rdzv round, which is a *Scale up event*. + +""" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7c0d76131fe40d70945ffa8ff97431954151d50e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +The elastic agent is the control plane of torchelastic. + +It is a process that launches and manages underlying worker processes. +The agent is responsible for: + +1. Working with distributed torch: the workers are started with all the + necessary information to successfully and trivially call + ``torch.distributed.init_process_group()``. + +2. Fault tolerance: monitors workers and upon detecting worker failures + or unhealthiness, tears down all workers and restarts everyone. + +3. Elasticity: Reacts to membership changes and restarts workers with the new + members. + +The simplest agents are deployed per node and works with local processes. +A more advanced agent can launch and manage workers remotely. Agents can +be completely decentralized, making decisions based on the workers it manages. +Or can be coordinated, communicating to other agents (that manage workers +in the same job) to make a collective decision. +""" + +from .api import ( # noqa: F401 + ElasticAgent, + RunResult, + SimpleElasticAgent, + Worker, + WorkerGroup, + WorkerSpec, + WorkerState, +) +from .local_elastic_agent import TORCHELASTIC_ENABLE_FILE_TIMER, TORCHELASTIC_TIMER_FILE diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6a5907f3fa9bc1963d3ebd81d5cdaa266c37deb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd4620fbe3dad046da36f4a0c3e433090cf87c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py @@ -0,0 +1,954 @@ +# mypy: ignore-errors + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import functools +import json +import os +import signal +import socket +import time +import traceback +import warnings +from contextlib import closing +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch.distributed.elastic.rendezvous as rdzv +import torch.distributed.elastic.utils.store as store_util +from torch.distributed.elastic.rendezvous import RendezvousGracefulExitError +from torch.distributed import Store +from torch.distributed.elastic.events import Event, EventSource, record +from torch.distributed.elastic.metrics import prof, put_metric +from torch.distributed.elastic.multiprocessing import ( + ProcessFailure, + SignalException, +) +from torch.distributed.elastic.utils.logging import get_logger + +__all__ = [ + "WorkerSpec", + "Worker", + "WorkerState", + "WorkerGroup", + "RunResult", + "ElasticAgent", + "SimpleElasticAgent", +] +_TERMINAL_STATE_SYNC_ID = "torchelastic/agent/terminal_state" + +DEFAULT_ROLE = "default" +log = get_logger(__name__) + + +@dataclass +class WorkerSpec: + """Blueprint information about a particular type of worker. + + For a given role, there must only exist a single worker spec. + Worker spec is expected to be homogeneous across all nodes (machine), + that is each node runs the same number of workers for a particular spec. + + Args: + role: user-defined role for the workers with this spec + local_world_size: number local workers to run + fn: (deprecated use entrypoint instead) + entrypoint: worker function or command + args: arguments to pass to ``entrypoint`` + rdzv_handler: handles rdzv for this set of workers + max_restarts: number of max retries for the workers + monitor_interval: monitor status of workers every ``n`` seconds + master_port: fixed port to run the c10d store on rank 0 + if not specified then will chose a random free port + master_addr: fixed master_addr to run the c10d store on rank 0 + if not specified then will chose hostname on agent rank 0 + redirects: redirect std streams to a file, + selectively redirect for a particular + local rank by passing a map + tee: tees the specified std stream(s) to console + file, + selectively tee for a particular local rank by passing a map, + takes precedence over ``redirects`` settings. + + """ + + role: str + local_world_size: int + rdzv_handler: rdzv.RendezvousHandler + fn: Optional[Callable] = None + # TODO @kiuk - make entrypoint a required field + entrypoint: Union[Callable, str, None] = None + args: Tuple = () + max_restarts: int = 3 + monitor_interval: float = 30.0 + master_port: Optional[int] = None + master_addr: Optional[str] = None + local_addr: Optional[str] = None + + def __post_init__(self): + assert self.local_world_size > 0 + assert self.monitor_interval > 0 + + if self.fn: + warnings.warn( + "WorkerSpec.fn will be deprecated," + " please use WorkerSpec.entrypoint instead", + category=DeprecationWarning, + ) + self.entrypoint = self.fn + assert self.entrypoint + + def get_entrypoint_name(self): + """Get the entry point name. + + If the entrypoint is a function (e.g. ``Callable``) returns its ``__qualname__`` + else if the entrypoint is a binary (e.g. ``str``), returns the binary name. + """ + if isinstance(self.entrypoint, str): + return os.path.basename(self.entrypoint) + else: + assert self.entrypoint is not None + return self.entrypoint.__qualname__ + + +class Worker: + """A worker instance. + + Contrast this with ``WorkerSpec`` that represents the specifications of a + worker. A ``Worker`` is created from a ``WorkerSpec``. A ``Worker`` is to + a ``WorkerSpec`` as an object is to a class. + + The ``id`` of the worker is interpreted + by the specific implementation of ``ElasticAgent``. For a local + agent, it could be the ``pid (int)`` of the worker, for a remote + agent it could be encoded as ``host:port (string)``. + + Args: + id (Any): uniquely identifies a worker (interpreted by the agent) + local_rank (int): local rank of the worker + global_rank (int): global rank of the worker + role_rank (int): rank of the worker across all workers that have the same role + world_size (int): number of workers (globally) + role_world_size (int): number of workers that have the same role + """ + + __slots__ = [ + "id", + "local_rank", + "global_rank", + "role_rank", + "world_size", + "role_world_size", + ] + + def __init__( + self, + local_rank: int, + global_rank: int = -1, + role_rank: int = -1, + world_size: int = -1, + role_world_size: int = -1, + ): + # unique identifier for this worker + self.id: Any = None + + # rank of the worker among workers with the same role being monitored + # by the same ``agent`` instance. + self.local_rank: int = local_rank + + # rank of the worker among all the workers across all roles + # across all ``agent`` instances. + # Global rank is not stable between re-rendezvous. + self.global_rank: int = global_rank + + # rank of the worker among all the workers with the same role + # across all ``agent`` instances. + # Role rank is not stable between re-rendezvous. + self.role_rank: int = role_rank + + # total number of workers (globally). Due to elasticity + # the world size may change between re-rendezvous. + self.world_size: int = world_size + + # total number of workers that share the same role. Due to elasticity + # the role world size may change between re-rendezvous. + self.role_world_size: int = role_world_size + + def __str__(self): + return ( + f"local_rank={self.local_rank},global_rank={self.global_rank}" + f",role_rank={self.role_rank},world_size={self.world_size}" + f",role_world_size={self.role_world_size}" + ) + + def __repr__(self): + return str(self) + + +class WorkerState(str, Enum): + """A state of the ``WorkerGroup``. + + Workers in a worker group change state as a unit. If a single worker + in a worker group fails the entire set is considered failed:: + + UNKNOWN - agent lost track of worker group state, unrecoverable + INIT - worker group object created not yet started + HEALTHY - workers running and healthy + UNHEALTHY - workers running and unhealthy + STOPPED - workers stopped (interrupted) by the agent + SUCCEEDED - workers finished running (exit 0) + FAILED - workers failed to successfully finish (exit !0) + + + A worker group starts from an initial ``INIT`` state, + then progresses to ``HEALTHY`` or ``UNHEALTHY`` states, + and finally reaches a terminal ``SUCCEEDED`` or ``FAILED`` state. + + Worker groups can be interrupted and temporarily put into ``STOPPED`` state + by the agent. Workers in ``STOPPED`` state are scheduled to be restarted + in the near future by the agent. Some examples of workers being put into + ``STOPPED`` state are: + + 1. Worker group failure|unhealthy observed + 2. Membership change detected + + When actions (start, stop, rdzv, retry, etc) on worker group fails + and results in the action being partially applied to the worker group + the state will be ``UNKNOWN``. Typically this happens on uncaught/unhandled + exceptions during state change events on the agent. The agent is not + expected to recover worker groups in ``UNKNOWN`` state and is better off + self terminating and allowing the job manager to retry the node. + """ + + UNKNOWN = "UNKNOWN" + INIT = "INIT" + HEALTHY = "HEALTHY" + UNHEALTHY = "UNHEALTHY" + STOPPED = "STOPPED" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + + @staticmethod + def is_running(state: "WorkerState") -> bool: + """Return the state of the Worker. + + Returns: + True if the worker state represents workers still running + (e.g. that the process exists but not necessarily healthy). + """ + return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY} + + +class WorkerGroup: + """A set of ``Worker`` instances. + + The class defines a set of ``Worker`` instances for the given ``WorkerSpec`` managed by ``ElasticAgent``. Whether the worker + group contains cross instance workers or not depends on the implementation of the agent. + """ + + __slots__ = ["spec", "workers", "store", "group_rank", "group_world_size", "state"] + + def __init__(self, spec: WorkerSpec): + self.spec = spec + self.workers = [Worker(local_rank=i) for i in range(self.spec.local_world_size)] + + # assigned after rdzv + self.store = None + self.group_rank = None + self.group_world_size = None + + self.state = WorkerState.INIT + + +class _RoleInstanceInfo: + """The class is used by the agent to exchange the information with other agents. + + The information is used to determine the rank of the workers that agent + manages in heterogeneous environments, where different agents can have + different number of workers. + """ + + __slots__ = ["role", "rank", "local_world_size"] + + def __init__(self, role: str, rank: int, local_world_size: int): + r"""Initialize the agent class instance. + + Args: + role (str): user-defined role for the workers with this spec + rank (int): the rank of the agent + local_world_size (int): number of local workers to run + """ + self.role = role + self.rank = rank + self.local_world_size = local_world_size + + def serialize(self) -> bytes: + dict_data = { + "role": self.role, + "rank": self.rank, + "local_world_size": self.local_world_size, + } + return json.dumps(dict_data).encode(encoding="UTF-8") + + @staticmethod + def deserialize(data: bytes): + dict_data = json.loads(data.decode(encoding="UTF-8")) + return _RoleInstanceInfo( + dict_data["role"], dict_data["rank"], dict_data["local_world_size"] + ) + + @staticmethod + def compare(obj1, obj2) -> int: + if obj1.role == obj2.role: + return obj1.rank - obj2.rank + elif obj1.role > obj2.role: + return 1 + else: + return -1 + + @staticmethod + def find_role_boundaries(roles_infos: List, role: str) -> Tuple[int, int]: + start_idx, end_idx = -1, -1 + for idx, role_info in enumerate(roles_infos): + if role_info.role == role: + if start_idx == -1: + start_idx = idx + end_idx = idx + return (start_idx, end_idx) + + +@dataclass +class RunResult: + """Return results of the worker executions. + + Run results follow an "all-or-nothing" policy where the run is successful if and + only if ALL local workers managed by this agent complete successfully. + + If the result is successful (e.g. ``is_failed() = False``) then the ``return_values`` + field contains the outputs (return values) of the workers managed by THIS agent mapped + by their GLOBAL ranks. That is ``result.return_values[0]`` is the return value of + global rank 0. + + .. note:: ``return_values`` are only meaningful for when the worker entrypoint + is a function. Workers specified as a binary entrypoint do not canonically + have a return value and the ``return_values`` field is meaningless and + may be empty. + + If ``is_failed()`` returns ``True`` then the ``failures`` field contains the + failure information, again, mapped by the GLOBAL rank of the worker that failed. + + The keys in ``return_values`` and ``failures`` are mutually exclusive, that is, + a worker's final state can only be one of: succeeded, failed. Workers intentionally + terminated by the agent according to the agent's restart policy, are not represented + in either ``return_values`` nor ``failures``. + """ + + state: WorkerState + return_values: Dict[int, Any] = field(default_factory=dict) + failures: Dict[int, ProcessFailure] = field(default_factory=dict) + + def is_failed(self) -> bool: + return self.state == WorkerState.FAILED + + +def _get_socket_with_port() -> socket.socket: + """Return a free port on localhost. + + The free port is "reserved" by binding a temporary socket on it. + Close the socket before passing the port to the entity that + requires it. Usage example:: + + sock = _get_socket_with_port() + with closing(sock): + port = sock.getsockname()[1] + sock.close() + # there is still a race-condition that some other process + # may grab this port before func() runs + func(port) + """ + addrs = socket.getaddrinfo( + host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM + ) + for addr in addrs: + family, type, proto, _, _ = addr + s = socket.socket(family, type, proto) + try: + s.bind(("localhost", 0)) + s.listen(0) + return s + except OSError as e: + s.close() + log.info("Socket creation attempt failed.", exc_info=e) + raise RuntimeError("Failed to create a socket") + + +def _get_fq_hostname() -> str: + return socket.getfqdn(socket.gethostname()) + + +class ElasticAgent(abc.ABC): + """An agent process responsible for managing one or more worker processes. + + The worker processes are assumed to be regular distributed PyTorch scripts. + When the worker process is created by the agent, the agent provides the + necessary information for the worker processes to properly initialize + a torch process group. + + The exact deployment topology and ratio of agent-to-worker is dependent + on the specific implementation of the agent and the user's job placement + preferences. For instance, to run a distributed training job on GPU with + 8 trainers (one per GPU) one can: + + 1. Use 8 x single GPU instances, place an agent per instance, managing + 1 worker per agent. + 2. Use 4 x double GPU instances, place an agent per instance, managing + 2 workers per agent. + 3. Use 2 x quad GPU instances, place an agent per instance, managing + 4 workers per agent. + 4. Use 1 x 8 GPU instance, place an agent per instance, managing + 8 workers per agent. + + Usage + :: + + group_result = agent.run() + if group_result.is_failed(): + # workers failed + failure = group_result.failures[0] + log.exception("worker 0 failed with exit code : %s", failure.exit_code) + else: + return group_result.return_values[0] # return rank 0's results + + """ + + @abc.abstractmethod + def run(self, role: str = DEFAULT_ROLE) -> RunResult: + """Run the agent. + + Supports retrying the worker group on failures up to ``max_restarts``. + + Returns: + The result of the execution, containing the return values or + failure details for each worker mapped by the worker's global rank. + + Raises: + Exception - any other failures NOT related to worker process + """ + raise NotImplementedError() + + @abc.abstractmethod + def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup: + """Return the ``WorkerGroup`` for the given ``role``. + + Note that the worker group is a mutable object and hence in a + multi-threaded/process environment it may change state. + Implementors are encouraged (but not required) to return + a defensive read-only copy. + """ + raise NotImplementedError() + + +class SimpleElasticAgent(ElasticAgent): + """An ``ElasticAgent`` that manages one particular type of worker role. + + An ``ElasticAgent`` that manages workers (``WorkerGroup``) for a single ``WorkerSpec`` + such as one particular type of worker role. + """ + + def __init__(self, spec: WorkerSpec, exit_barrier_timeout: float = 300): + self._worker_group = WorkerGroup(spec) + self._remaining_restarts = self._worker_group.spec.max_restarts + self._store = None + self._exit_barrier_timeout = exit_barrier_timeout + self._total_execution_time = 0 + + def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup: + return self._worker_group + + @abc.abstractmethod + def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]: + r"""Start ``worker_group.spec.local_world_size`` number of workers. + + This is according to worker spec for the worker group . + Returns a map of ``local_rank`` to worker ``id``. + """ + raise NotImplementedError() + + @abc.abstractmethod + def _stop_workers(self, worker_group: WorkerGroup) -> None: + r"""Stop all workers in the given worker group. + + Implementors must deal with workers in all states defined by + ``WorkerState``. That is, it must gracefully handle stopping + non-existent workers, unhealthy (stuck) workers, etc. + """ + raise NotImplementedError() + + @abc.abstractmethod + def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult: + r"""Check on the workers for the ``worker_group``. + + This function also returns the new state of the worker group. + """ + raise NotImplementedError() + + @abc.abstractmethod + def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None: + """Clean up any resources that were allocated during the agent's work. + + Args: + death_sig: Signal to send to the child process, SIGTERM is default + """ + raise NotImplementedError() + + @staticmethod + def _set_master_addr_port( + store: Store, + master_addr: Optional[str], + master_port: Optional[int], + local_addr: Optional[str], + ): + if master_port is None: + sock = _get_socket_with_port() + with closing(sock): + master_port = sock.getsockname()[1] + + if master_addr is None: + # If user specified the address for the local node, use it as the master addr if not exist + if local_addr: + master_addr = local_addr + else: + master_addr = _get_fq_hostname() + + store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8")) + store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8")) + + @staticmethod + def _get_master_addr_port(store: Store) -> Tuple[str, int]: + master_addr = store.get("MASTER_ADDR").decode(encoding="UTF-8") + master_port = int(store.get("MASTER_PORT").decode(encoding="UTF-8")) + return (master_addr, master_port) + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _rendezvous(self, worker_group: WorkerGroup) -> None: + r"""Run rendezvous for the workers specified by the worker spec. + + Assigns workers a new global rank and world size. + Updates the rendezvous store for the worker group. + """ + spec = worker_group.spec + + store, group_rank, group_world_size = spec.rdzv_handler.next_rendezvous() + self._store = store + + workers = self._assign_worker_ranks(store, group_rank, group_world_size, spec) + worker_group.workers = workers + worker_group.store = store + worker_group.group_rank = group_rank + worker_group.group_world_size = group_world_size + + if group_rank == 0: + self._set_master_addr_port( + store, + spec.master_addr, + spec.master_port, + spec.local_addr, + ) + + master_addr, master_port = self._get_master_addr_port(store) + restart_count = spec.max_restarts - self._remaining_restarts + + log.info( + "[%(role)s] Rendezvous complete for workers. Result:\n" + " restart_count=%(restart_count)s\n" + " master_addr=%(master_addr)s\n" + " master_port=%(master_port)s\n" + " group_rank=%(group_rank)s\n" + " group_world_size=%(group_world_size)s\n" + " local_ranks=%(local_ranks)s\n" + " role_ranks=%(role_ranks)s\n" + " global_ranks=%(global_ranks)s\n" + " role_world_sizes=%(role_world_sizes)s\n" + " global_world_sizes=%(global_world_sizes)s\n", + { + "role": spec.role, + "restart_count": restart_count, + "master_addr": master_addr, + "master_port": master_port, + "group_rank": group_rank, + "group_world_size": group_world_size, + "local_ranks": [worker.local_rank for worker in workers], + "role_ranks": [worker.role_rank for worker in workers], + "global_ranks": [worker.global_rank for worker in workers], + "role_world_sizes": [worker.role_world_size for worker in workers], + "global_world_sizes": [worker.world_size for worker in workers] + } + ) + + def _get_ranks( + self, + role_infos: List[_RoleInstanceInfo], + role_idx: int, + start_idx: int = 0, + end_idx: int = -1, + ) -> Tuple[int, List[int]]: + if end_idx == -1: + end_idx = len(role_infos) + prefix_sum = 0 + total_sum = 0 + for idx in range(start_idx, end_idx): + if role_idx > idx: + prefix_sum += role_infos[idx].local_world_size + total_sum += role_infos[idx].local_world_size + return ( + total_sum, + list(range(prefix_sum, prefix_sum + role_infos[role_idx].local_world_size)), + ) + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _assign_worker_ranks( + self, store, group_rank: int, group_world_size: int, spec: WorkerSpec + ) -> List[Worker]: + """Determine proper ranks for worker processes. + + The rank assignment is done according to the following algorithm: + + 1. Each agent writes its configuration(group_rank, group_world_size + , num_workers) to the common store. + 2. Each agent retrieves configuration for all agents + and performs two level sort using role and rank. + 3. Determine the global rank: the global rank of the workers for the current + agent is the offset of the infos array up to group_rank of the agent. + The offset is computed as a sum of local_world_size of all agents that + have rank less than the group_rank. The workers would have the ranks: + [offset, offset+local_world_size) + 4. Determine the role rank: The role rank is determined using the algorithms + in the point 3 with the exception that the offset is done from the first + agent that has the same role as current one and has the minimum group rank. + """ + role_infos = self._share_and_gather(store, group_rank, group_world_size, spec) + my_role_info = role_infos[group_rank] + worker_world_size, worker_global_ranks = self._get_ranks(role_infos, group_rank) + role_infos = sorted( + role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare) + ) + role_start_idx, role_end_idx = _RoleInstanceInfo.find_role_boundaries( + role_infos, my_role_info.role + ) + role_pos = next( + idx + for idx, role_info in enumerate(role_infos) + if _RoleInstanceInfo.compare(role_info, my_role_info) == 0 + ) + role_world_size, role_ranks = self._get_ranks( + role_infos, role_pos, role_start_idx, role_end_idx + 1 + ) + workers = [] + for ind in range(spec.local_world_size): + worker = Worker( + local_rank=ind, + global_rank=worker_global_ranks[ind], + role_rank=role_ranks[ind], + world_size=worker_world_size, + role_world_size=role_world_size, + ) + workers.append(worker) + return workers + + def _share_and_gather( + self, store, group_rank: int, group_world_size: int, spec: WorkerSpec + ) -> List: + agent_role_info = _RoleInstanceInfo( + spec.role, group_rank, spec.local_world_size + ) + key_prefix = "torchelastic/role_info" + agent_config_enc = agent_role_info.serialize() + role_infos_bytes = store_util.synchronize( + store, agent_config_enc, group_rank, group_world_size, key_prefix + ) + role_infos = [ + _RoleInstanceInfo.deserialize(role_info_bytes) + for role_info_bytes in role_infos_bytes + ] + return role_infos + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _initialize_workers(self, worker_group: WorkerGroup) -> None: + r"""Start a fresh set of workers for the worker_group. + + Essentially, a rendezvous followed by a ``start_workers``. + The caller should first call ``_stop_workers()`` to stop running workers + prior to calling this method. + + Optimistically sets the state of the worker group that + just started as ``HEALTHY`` and delegates the actual monitoring + of state to ``_monitor_workers()`` method + """ + role = worker_group.spec.role + log.info("[%s] Rendezvous'ing worker group", role) + + # TODO after stopping workers, wait at least monitor_interval*2 for + # workers on different nodes to fail on a collective op before waiting + # on the rdzv barrier, this way we ensure that nodes enter rdzv + # at around the same time and reduce false positive rdzv timeout errors + self._rendezvous(worker_group) + + log.info("[%s] Starting worker group", role) + worker_ids = self._start_workers(worker_group) + for local_rank, w_id in worker_ids.items(): + worker = worker_group.workers[local_rank] + worker.id = w_id + + worker_group.state = WorkerState.HEALTHY + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _restart_workers(self, worker_group: WorkerGroup) -> None: + """Restart (stops, rendezvous, starts) all local workers in the group.""" + role = worker_group.spec.role + log.info("[%s] Stopping worker group", role) + self._stop_workers(worker_group) + worker_group.state = WorkerState.STOPPED + self._initialize_workers(worker_group) + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def run(self, role: str = DEFAULT_ROLE) -> RunResult: + start_time = time.monotonic() + shutdown_called: bool = False + try: + result = self._invoke_run(role) + self._total_execution_time = int(time.monotonic() - start_time) + self._record_metrics(result) + self._record_worker_events(result) + return result + except RendezvousGracefulExitError as e: + log.info("Rendezvous gracefully exited: %s", e) + except SignalException as e: + log.warning("Received %s death signal, shutting down workers", e.sigval) + self._shutdown(e.sigval) + shutdown_called = True + raise + finally: + if not shutdown_called: + self._shutdown() + # record the execution time in case there were any exceptions during run. + self._total_execution_time = int(time.monotonic() - start_time) + + def get_event_failed(self) -> Event: + return self._construct_event( + state="FAILED", + source=EventSource.AGENT, + raw_error=traceback.format_exc(), + ) + + def get_event_succeeded(self) -> Event: + return self._construct_event( + state="SUCCEEDED", + source=EventSource.AGENT, + ) + + def _record_worker_events(self, result: RunResult) -> None: + for worker in self._worker_group.workers: + failure = result.failures.get(worker.global_rank) + state: str = self._get_worker_state(worker, result) + raw_error = json.dumps(failure.error_file_data) if failure else None + record(self._construct_event(state, EventSource.WORKER, worker, raw_error)) + + def _get_worker_state(self, worker: Worker, result: RunResult) -> str: + failure = result.failures.get(worker.global_rank) + if result.state in {WorkerState.UNHEALTHY, WorkerState.FAILED} and not failure: + # The worker got terminated by the torchelastic agent via SIGTERM signal + return "TERMINATED" + elif failure or worker.global_rank in result.return_values: + return result.state.value + else: + raise ValueError(f"Unknown worker: {worker.global_rank}") + + def _construct_event( + self, + state: str, + source: EventSource, + worker: Optional[Worker] = None, + raw_error: Optional[str] = None, + ) -> Event: + wg = self._worker_group + spec = wg.spec + md = { + "group_world_size": wg.group_world_size, + "entry_point": spec.get_entrypoint_name(), + } + if worker: + md["local_rank"] = (worker.local_rank,) + md["role_rank"] = (worker.role_rank,) + md["role_world_size"] = (worker.role_world_size,) + global_rank = worker.global_rank + worker_id = str(worker.id) + else: + global_rank = None + worker_id = None + md_str = json.dumps(md) + metadata = { + "run_id": spec.rdzv_handler.get_run_id(), + "global_rank": global_rank, + "group_rank": wg.group_rank, + "worker_id": worker_id, + "role": spec.role, + "hostname": _get_fq_hostname(), + "state": state, + "total_run_time": self._total_execution_time, + "rdzv_backend": spec.rdzv_handler.get_backend(), + "raw_error": raw_error, + "metadata": md_str, + "agent_restarts": spec.max_restarts - self._remaining_restarts, + } + return Event( + f"torchelastic.worker.status.{state}", source=source, metadata=metadata + ) + + def _record_metrics(self, group_results: RunResult): + is_failed = group_results.is_failed() + self._record_flakiness_metric(is_failed) + spec = self._worker_group.spec + restarts_happened = self._remaining_restarts != spec.max_restarts + put_metric(f"workers.{spec.role}.run_total", 1) + self._record_metric_with_condition( + "run_success_with_retries", not is_failed and restarts_happened + ) + self._record_metric_with_condition( + "run_success_no_retries", not is_failed and not restarts_happened + ) + self._record_metric_with_condition( + "run_failed_with_retries", is_failed and restarts_happened + ) + self._record_metric_with_condition( + "run_failed_no_retries", is_failed and not restarts_happened + ) + + def _record_metric_with_condition(self, metric_name, condition): + spec = self._worker_group.spec + if condition: + put_metric(f"workers.{spec.role}.{metric_name}", 1) + else: + put_metric(f"workers.{spec.role}.{metric_name}", 0) + + def _record_flakiness_metric(self, is_failed: bool = False): + if is_failed: + flakiness = 100.0 + else: + spec = self._worker_group.spec + flakiness = 100.0 - 100.0 * (self._remaining_restarts + 1) / ( + spec.max_restarts + 1 + ) + spec = self._worker_group.spec + + put_metric(f"workers.{spec.role}.flakiness", int(flakiness)) + + def _invoke_run(self, role: str = DEFAULT_ROLE) -> RunResult: + # NOTE: currently only works for a single role + + spec = self._worker_group.spec + role = spec.role + + log.info( + "[%s] starting workers for entrypoint: %s", role, spec.get_entrypoint_name() + ) + + self._initialize_workers(self._worker_group) + monitor_interval = spec.monitor_interval + rdzv_handler = spec.rdzv_handler + + while True: + assert self._worker_group.state != WorkerState.INIT + time.sleep(monitor_interval) + run_result = self._monitor_workers(self._worker_group) + state = run_result.state + self._worker_group.state = state + + put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts) + put_metric(f"workers.{role}.{state.name.lower()}", 1) + + if state == WorkerState.SUCCEEDED: + log.info( + "[%s] worker group successfully finished." + " Waiting %s seconds for other agents to finish.", + role, self._exit_barrier_timeout + ) + self._exit_barrier() + return run_result + elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}: + if self._remaining_restarts > 0: + log.info( + "[%s] Worker group %s. " + "%s/%s attempts left;" + " will restart worker group", + role, state.name, self._remaining_restarts, spec.max_restarts + ) + self._remaining_restarts -= 1 + self._restart_workers(self._worker_group) + else: + self._stop_workers(self._worker_group) + self._worker_group.state = WorkerState.FAILED + return run_result + elif state == WorkerState.HEALTHY: + # membership changes do not count as retries + num_nodes_waiting = rdzv_handler.num_nodes_waiting() + group_rank = self._worker_group.group_rank + if num_nodes_waiting > 0: + log.info( + "[%s] Detected %s " + "new nodes from group_rank=%s; " + "will restart worker group", + role, num_nodes_waiting, group_rank + ) + self._restart_workers(self._worker_group) + else: + raise Exception(f"[{role}] Worker group in {state.name} state") + + def _exit_barrier(self): + """ + Define a barrier that keeps the agent process alive until all workers finish. + + Wait for ``exit_barrier_timeout`` seconds for all agents to finish + executing their local workers (either successfully or not). This + acts as a safety guard against user scripts that terminate at different + times. + """ + log.info( + "Local worker group finished (%s). " + "Waiting %s seconds for other agents to finish", + self._worker_group.state, self._exit_barrier_timeout + ) + start = time.time() + try: + store_util.barrier( + self._store, + self._worker_group.group_rank, + self._worker_group.group_world_size, + key_prefix=_TERMINAL_STATE_SYNC_ID, + barrier_timeout=self._exit_barrier_timeout, + ) + log.info( + "Done waiting for other agents. Elapsed: %s seconds", time.time() - start + ) + except SignalException as e: + log.warning("Got termination signal: %s", e.sigval) + raise + except Exception: + log.exception( + "Error waiting on exit barrier. Elapsed: %s seconds", + time.time() - start + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6348b5c543a5ecb46c97f713a77a14816f1bcc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +import json +import os +import signal +import socket +from string import Template +import uuid +from typing import Any, Dict, Optional, Tuple + +import torch.distributed.elastic.timer as timer +from torch.distributed.elastic import events + +from torch.distributed.elastic.agent.server.api import ( + RunResult, + SimpleElasticAgent, + WorkerGroup, + WorkerSpec, + WorkerState, +) +from torch.distributed.elastic.events.api import EventMetadataValue +from torch.distributed.elastic.metrics.api import prof +from torch.distributed.elastic.multiprocessing import PContext, start_processes, LogsSpecs +from torch.distributed.elastic.utils import macros +from torch.distributed.elastic.utils.logging import get_logger + +log = get_logger(__name__) + +__all__ = [ + "LocalElasticAgent", + "TORCHELASTIC_ENABLE_FILE_TIMER", + "TORCHELASTIC_TIMER_FILE", +] + +TORCHELASTIC_ENABLE_FILE_TIMER = "TORCHELASTIC_ENABLE_FILE_TIMER" +TORCHELASTIC_TIMER_FILE = "TORCHELASTIC_TIMER_FILE" + +class LocalElasticAgent(SimpleElasticAgent): + """An implementation of :py:class:`torchelastic.agent.server.ElasticAgent` that handles host-local workers. + + This agent is deployed per host and is configured to spawn ``n`` workers. + When using GPUs, ``n`` maps to the number of GPUs available on the host. + + The local agent does not communicate to other local agents deployed on + other hosts, even if the workers may communicate inter-host. The worker id + is interpreted to be a local process. The agent starts and stops all worker + processes as a single unit. + + + The worker function and argument passed to the worker function must be + python multiprocessing compatible. To pass multiprocessing data structures + to the workers you may create the data structure in the same multiprocessing + context as the specified ``start_method`` and pass it as a function argument. + + The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait + for other agents to finish. This acts as a safety net to handle cases where + workers finish at different times, to prevent agents from viewing workers + that finished early as a scale-down event. It is strongly advised that the + user code deal with ensuring that workers are terminated in a synchronous + manner rather than relying on the exit_barrier_timeout. + + A named pipe based watchdog can be enabled in ```LocalElasticAgent``` if an + environment variable ``TORCHELASTIC_ENABLE_FILE_TIMER`` with value 1 has + been defined in the ```LocalElasticAgent``` process. + Optionally, another environment variable ```TORCHELASTIC_TIMER_FILE``` + can be set with a unique file name for the named pipe. If the environment + variable ```TORCHELASTIC_TIMER_FILE``` is not set, ```LocalElasticAgent``` + will internally create a unique file name and set it to the environment + variable ```TORCHELASTIC_TIMER_FILE```, and this environment variable will + be propagated to the worker processes to allow them to connect to the same + named pipe that ```LocalElasticAgent``` uses. + + Logs are written to the specified log directory. Each log line will be by default + prefixed by ``[${role_name}${local_rank}]:`` (e.g. ``[trainer0]: foobar``). + Log prefixes can be customized by passing a `template string + `_ as the + ``log_line_prefix_template`` argument. + The following macros (identifiers) are substituted at runtime: + ``${role_name}, ${local_rank}, ${rank}``. For example, to prefix each log line with + global rank instead of the local rank, set ``log_line_prefix_template = "[${rank}]:``. + + + Example launching function + + :: + + def trainer(args) -> str: + return "do train" + + def main(): + start_method="spawn" + shared_queue= multiprocessing.get_context(start_method).Queue() + spec = WorkerSpec( + role="trainer", + local_world_size=nproc_per_process, + entrypoint=trainer, + args=("foobar",), + ...) + agent = LocalElasticAgent(spec, start_method) + results = agent.run() + + if results.is_failed(): + print("trainer failed") + else: + print(f"rank 0 return value: {results.return_values[0]}") + # prints -> rank 0 return value: do train + + Example launching binary + + :: + + def main(): + spec = WorkerSpec( + role="trainer", + local_world_size=nproc_per_process, + entrypoint="/usr/local/bin/trainer", + args=("--trainer-args", "foobar"), + ...) + agent = LocalElasticAgent(spec) + results = agent.run() + + if not results.is_failed(): + print("binary launches do not have return values") + + """ + + def __init__( + self, + spec: WorkerSpec, + logs_specs: LogsSpecs, + start_method="spawn", + exit_barrier_timeout: float = 300, + log_line_prefix_template: Optional[str] = None, + ): + super().__init__(spec, exit_barrier_timeout) + self._start_method = start_method + self._pcontext: Optional[PContext] = None + self._rdzv_handler = spec.rdzv_handler + self._log_line_prefix_template = log_line_prefix_template + self._worker_watchdog: Optional[timer.FileTimerServer] = None + self._logs_specs = logs_specs + + + def _setup_local_watchdog(self, envs: Dict[int, Dict[str, str]]) -> None: + enable_watchdog_env_name = TORCHELASTIC_ENABLE_FILE_TIMER + watchdog_enabled = os.getenv(enable_watchdog_env_name) + watchdog_file_env_name = TORCHELASTIC_TIMER_FILE + watchdog_file_path = os.getenv(watchdog_file_env_name) + if watchdog_enabled is not None and str(watchdog_enabled) == "1": + if watchdog_file_path is None: + watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4()) + log.info("Starting a FileTimerServer with %s ...", watchdog_file_path) + self._worker_watchdog = timer.FileTimerServer( + file_path=watchdog_file_path, + max_interval=0.1, + daemon=True, + log_event=self._log_watchdog_event) + self._worker_watchdog.start() + log.info("FileTimerServer started") + else: + log.info("Environment variable '%s' not found. Do not start FileTimerServer.", enable_watchdog_env_name) + # Propagate the watchdog file env to worker processes + if watchdog_file_path is not None: + for worker_env in envs.values(): + worker_env[watchdog_file_env_name] = watchdog_file_path + + + def _get_fq_hostname(self) -> str: + return socket.getfqdn(socket.gethostname()) + + def _log_watchdog_event( + self, + name: str, + request: Optional[timer.FileTimerRequest], + ) -> None: + wg = self._worker_group + spec = wg.spec + md = { + "watchdog_event": name + } + if request is not None: + md["worker_pid"] = str(request.worker_pid) + md["scope_id"] = request.scope_id + md["expiration_time"] = str(request.expiration_time) + md["signal"] = str(request.signal) + md_str = json.dumps(md) + state = "RUNNING" + metadata: Dict[str, EventMetadataValue] = { + "run_id": spec.rdzv_handler.get_run_id(), + "global_rank": None, + "group_rank": wg.group_rank, + "worker_id": None, + "role": spec.role, + "hostname": self._get_fq_hostname(), + "state": state, + "total_run_time": self._total_execution_time, + "rdzv_backend": spec.rdzv_handler.get_backend(), + "raw_error": None, + "metadata": md_str, + "agent_restarts": spec.max_restarts - self._remaining_restarts, + } + # Note: The 'metadata' field of the Event is converted to a TorchelasticStatusLogEntry later. + # The 'name' field of the Event is NOT used in the TorchelasticStatusLogEntry. + event = events.Event( + name=name, source=events.EventSource.AGENT, metadata=metadata + ) + events.record(event) + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _stop_workers(self, worker_group: WorkerGroup) -> None: + self._shutdown() + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]: + spec = worker_group.spec + store = worker_group.store + assert store is not None + master_addr, master_port = super()._get_master_addr_port(store) + restart_count = spec.max_restarts - self._remaining_restarts + + use_agent_store = spec.rdzv_handler.get_backend() == "static" + + args: Dict[int, Tuple] = {} + envs: Dict[int, Dict[str, str]] = {} + log_line_prefixes: Optional[Dict[int, str]] = {} if self._log_line_prefix_template else None + for worker in worker_group.workers: + local_rank = worker.local_rank + worker_env = { + "LOCAL_RANK": str(local_rank), + "RANK": str(worker.global_rank), + "GROUP_RANK": str(worker_group.group_rank), + "ROLE_RANK": str(worker.role_rank), + "ROLE_NAME": spec.role, + "LOCAL_WORLD_SIZE": str(spec.local_world_size), + "WORLD_SIZE": str(worker.world_size), + "GROUP_WORLD_SIZE": str(worker_group.group_world_size), + "ROLE_WORLD_SIZE": str(worker.role_world_size), + "MASTER_ADDR": master_addr, + "MASTER_PORT": str(master_port), + "TORCHELASTIC_RESTART_COUNT": str(restart_count), + "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts), + "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(), + "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store), + "TORCH_NCCL_ASYNC_ERROR_HANDLING": os.getenv( + "TORCH_NCCL_ASYNC_ERROR_HANDLING", str(1) + ), + } + if "OMP_NUM_THREADS" in os.environ: + worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"] + + + if self._log_line_prefix_template: + log_line_prefix = Template(self._log_line_prefix_template).safe_substitute( + role_name=spec.role, + rank=worker.global_rank, + local_rank=local_rank,) + log_line_prefixes[local_rank] = log_line_prefix + + envs[local_rank] = worker_env + worker_args = list(spec.args) + worker_args = macros.substitute(worker_args, str(local_rank)) + args[local_rank] = tuple(worker_args) + + self._setup_local_watchdog(envs=envs) + + assert spec.entrypoint is not None + assert self._logs_specs is not None + self._pcontext = start_processes( + name=spec.role, + entrypoint=spec.entrypoint, + args=args, + envs=envs, + logs_specs=self._logs_specs, + log_line_prefixes=log_line_prefixes, + start_method=self._start_method, + ) + + return self._pcontext.pids() + + def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None: + if self._worker_watchdog is not None: + self._worker_watchdog.stop() + self._worker_watchdog = None + if self._pcontext: + self._pcontext.close(death_sig) + if self._rdzv_handler: + self._rdzv_handler.shutdown() + + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `torch.distributed.elastic.metrics.prof`. + @prof + def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult: + role = worker_group.spec.role + worker_pids = {w.id for w in worker_group.workers} + assert self._pcontext is not None + pc_pids = set(self._pcontext.pids().values()) + if worker_pids != pc_pids: + log.error( + "[%s] worker pids do not match process_context pids." + " Expected: %s, actual: %s", + role, worker_pids, pc_pids + ) + return RunResult(state=WorkerState.UNKNOWN) + + result = self._pcontext.wait(0) + if result: + if result.is_failed(): + # map local rank failure to global rank + worker_failures = {} + for local_rank, failure in result.failures.items(): + worker = worker_group.workers[local_rank] + worker_failures[worker.global_rank] = failure + return RunResult( + state=WorkerState.FAILED, + failures=worker_failures, + ) + else: + # copy ret_val_queue into a map with a global ranks + workers_ret_vals = {} + for local_rank, ret_val in result.return_values.items(): + worker = worker_group.workers[local_rank] + workers_ret_vals[worker.global_rank] = ret_val + return RunResult( + state=WorkerState.SUCCEEDED, + return_values=workers_ret_vals, + ) + else: + return RunResult(state=WorkerState.HEALTHY) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa72380fc75c5217673777d2a9025b4c0debc107 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f9f305fb702c33107bd688907ada6e6dce01d03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e6a1884206cbc529dcda4fa0d18494bb40138a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py new file mode 100644 index 0000000000000000000000000000000000000000..62f5d7500922ceb035f1f6229e6edb93acfb1922 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import json +from dataclasses import asdict, dataclass, field +from enum import Enum +from typing import Dict, Union, Optional + +__all__ = ['EventSource', 'Event', 'NodeState', 'RdzvEvent'] + +EventMetadataValue = Union[str, int, float, bool, None] + + +class EventSource(str, Enum): + """Known identifiers of the event producers.""" + + AGENT = "AGENT" + WORKER = "WORKER" + + +@dataclass +class Event: + """ + The class represents the generic event that occurs during the torchelastic job execution. + + The event can be any kind of meaningful action. + + Args: + name: event name. + source: the event producer, e.g. agent or worker + timestamp: timestamp in milliseconds when event occurred. + metadata: additional data that is associated with the event. + """ + + name: str + source: EventSource + timestamp: int = 0 + metadata: Dict[str, EventMetadataValue] = field(default_factory=dict) + + def __str__(self): + return self.serialize() + + @staticmethod + def deserialize(data: Union[str, "Event"]) -> "Event": + if isinstance(data, Event): + return data + if isinstance(data, str): + data_dict = json.loads(data) + data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined] + return Event(**data_dict) + + def serialize(self) -> str: + return json.dumps(asdict(self)) + + +class NodeState(str, Enum): + """The states that a node can be in rendezvous.""" + + INIT = "INIT" + RUNNING = "RUNNING" + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + + +@dataclass +class RdzvEvent: + """ + Dataclass to represent any rendezvous event. + + Args: + name: Event name. (E.g. Current action being performed) + run_id: The run id of the rendezvous + message: The message describing the event + hostname: Hostname of the node + pid: The process id of the node + node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED) + master_endpoint: The master endpoint for the rendezvous store, if known + rank: The rank of the node, if known + local_id: The local_id of the node, if defined in dynamic_rendezvous.py + error_trace: Error stack trace, if this is an error event. + """ + + name: str + run_id: str + message: str + hostname: str + pid: int + node_state: NodeState + master_endpoint: str = "" + rank: Optional[int] = None + local_id: Optional[int] = None + error_trace: str = "" + + def __str__(self): + return self.serialize() + + @staticmethod + def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent": + if isinstance(data, RdzvEvent): + return data + if isinstance(data, str): + data_dict = json.loads(data) + data_dict["node_state"] = NodeState[data_dict["node_state"]] # type: ignore[possibly-undefined] + return RdzvEvent(**data_dict) + + def serialize(self) -> str: + return json.dumps(asdict(self)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e6a55406f0334a7199d5fc4a653ddc7f744910 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary. + +For functions, it uses ``torch.multiprocessing`` (and therefore python +``multiprocessing``) to spawn/fork worker processes. For binaries it uses python +``subprocessing.Popen`` to create worker processes. + + +Usage 1: Launching two trainers as a function + +:: + + from torch.distributed.elastic.multiprocessing import Std, start_processes + + def trainer(a, b, c): + pass # train + + + # runs two trainers + # LOCAL_RANK=0 trainer(1,2,3) + # LOCAL_RANK=1 trainer(4,5,6) + ctx = start_processes( + name="trainer", + entrypoint=trainer, + args={0: (1,2,3), 1: (4,5,6)}, + envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}}, + log_dir="/tmp/foobar", + redirects=Std.ALL, # write all worker stdout/stderr to a log file + tee={0: Std.ERR}, # tee only local rank 0's stderr to console + ) + + # waits for all copies of trainer to finish + ctx.wait() + +Usage 2: Launching 2 echo workers as a binary + +:: + + # same as invoking + # echo hello + # echo world > stdout.log + ctx = start_processes( + name="echo" + entrypoint="echo", + log_dir="/tmp/foobar", + args={0: "hello", 1: "world"}, + redirects={1: Std.OUT}, + ) + +Just like ``torch.multiprocessing``, the return value of the function +:func:`start_processes` is a process context (:class:`api.PContext`). If a function +was launched, a :class:`api.MultiprocessContext` is returned and if a binary +was launched a :class:`api.SubprocessContext` is returned. Both are specific +implementations of the parent :class:`api.PContext` class. +""" + +import os +from typing import Callable, Dict, Optional, Tuple, Union, Set + +from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401 + _validate_full_rank, + DefaultLogsSpecs, + LogsDest, + LogsSpecs, + MultiprocessContext, + PContext, + ProcessFailure, + RunProcsResult, + SignalException, + Std, + SubprocessContext, + to_map, +) +from torch.distributed.elastic.utils.logging import get_logger + +__all__ = [ + "start_processes", + "MultiprocessContext", + "PContext", + "ProcessFailure", + "RunProcsResult", + "SignalException", + "Std", + "LogsDest", + "LogsSpecs", + "DefaultLogsSpecs", + "SubprocessContext", + "to_map", +] + +log = get_logger(__name__) + + +def start_processes( + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + start_method: str = "spawn", +) -> PContext: + """ + Start ``n`` copies of ``entrypoint`` processes with the provided options. + + ``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary). + The number of copies is determined by the number of entries for ``args`` and + ``envs`` arguments, which need to have the same key set. + + ``args`` and ``env`` parameters are the arguments and environment variables + to pass down to the entrypoint mapped by the replica index (local rank). + All local ranks must be accounted for. + That is, the keyset should be ``{0,1,...,(nprocs-1)}``. + + .. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings. + If any other type is given, then it is casted to a string representation + (e.g. ``str(arg1)``). Furthermore, a binary failure will only write + an ``error.json`` error file if the main function is annotated with + ``torch.distributed.elastic.multiprocessing.errors.record``. For function launches, + this is done by default and there is no need to manually annotate + with the ``@record`` annotation. + + ``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect + to a log file in the ``log_dir``. Valid mask values are defined in ``Std``. + To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as + the local rank to specify the redirect behavior for. + Any missing local ranks will default to ``Std.NONE``. + + ``tee`` acts like the unix "tee" command in that it redirects + prints to console. + To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter. + + For each process, the ``log_dir`` will contain: + + #. ``{local_rank}/error.json``: if the process failed, a file with the error info + #. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT`` + #. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR`` + + .. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory. + + Example: + :: + + log_dir = "/tmp/test" + + # ok; two copies of foo: foo("bar0"), foo("bar1") + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # invalid; envs missing for local rank 1 + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}}, + log_dir=log_dir + ) + + # ok; two copies of /usr/bin/touch: touch file1, touch file2 + start_processes( + name="trainer", + entrypoint="/usr/bin/touch", + args:{0:("file1",), 1:("file2",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # caution; arguments casted to string, runs: + # echo "1" "2" "3" and echo "[1, 2, 3]" + start_processes( + name="trainer", + entrypoint="/usr/bin/echo", + args:{0:(1,2,3), 1:([1,2,3],), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + Args: + name: a human readable short name that describes what the processes are + (used as header when tee'ing stdout/stderr outputs) + entrypoint: either a ``Callable`` (function) or ``cmd`` (binary) + args: arguments to each replica + envs: env vars to each replica + log_dir: directory used to write log files + start_method: multiprocessing start method (spawn, fork, forkserver) + ignored for binaries + redirects: which std streams to redirect to a log file + tee: which std streams to redirect + print to console + local_ranks_filter: which ranks' logs to print to console + + """ + + nprocs = len(args) + _validate_full_rank(args, nprocs, "args") + _validate_full_rank(envs, nprocs, "envs") + + context: PContext + if isinstance(entrypoint, str): + context = SubprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + logs_specs=logs_specs, + log_line_prefixes=log_line_prefixes, + ) + else: + context = MultiprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + log_line_prefixes=log_line_prefixes, + start_method=start_method, + logs_specs=logs_specs, + ) + + try: + context.start() + return context + except Exception: + context.close() + raise diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6158e760f0e5cfa36dfc7dc303b9ec788241a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py @@ -0,0 +1,873 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import logging +import os +import re +import shutil +import signal +import subprocess +import sys +import tempfile +import time +from contextlib import nullcontext +from dataclasses import dataclass, field +from enum import IntFlag +from multiprocessing import synchronize +from types import FrameType +from typing import Any, Callable, Dict, Optional, Set, Tuple, Union +from abc import ABC, abstractmethod + +import torch.multiprocessing as mp +from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record +from torch.distributed.elastic.multiprocessing.redirects import ( + redirect_stderr, + redirect_stdout, +) + +from torch.distributed.elastic.multiprocessing.subprocess_handler import SubprocessHandler, get_subprocess_handler +from torch.distributed.elastic.multiprocessing.tail_log import TailLog + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +log = logging.getLogger(__name__) + +__all__ = [ + "DefaultLogsSpecs", + "SignalException", + "Std", + "to_map", + "RunProcsResult", + "PContext", + "get_std_cm", + "MultiprocessContext", + "SubprocessContext", +] + +class SignalException(Exception): + """ + Exception is raised inside the torchelastic agent process by the termination handler + if the death signal got received by the process. + """ + + def __init__(self, msg: str, sigval: signal.Signals) -> None: + super().__init__(msg) + self.sigval = sigval + + +def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None: + """Termination handler that raises exceptions on the main process. + + When the process receives death signal(SIGTERM, SIGINT), this termination handler will + be invoked. It raises the ``SignalException`` exception that should be processed by the + user code. Python does not terminate process after the termination handler is finished, + so the exception should not be silently ignored, otherwise the process will never + be terminated. + """ + sigval = signal.Signals(signum) + raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval) + + +def _get_kill_signal() -> signal.Signals: + """Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGKILL + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str): + actual_keys = set(d.keys()) + expected_keys = set(range(nprocs)) + + if actual_keys != expected_keys: + raise RuntimeError( + f"{what}, local rank mapping mismatch," + f" expected: {expected_keys}, actual: {actual_keys}" + ) + + +_MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$" +_VALUE_REGEX = r"^[0123]$" + + +class Std(IntFlag): + NONE = 0 + OUT = 1 + ERR = 2 + ALL = OUT | ERR + + @classmethod + def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]: + """ + Example: + :: + + from_str("0") -> Std.NONE + from_str("1") -> Std.OUT + from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR} + + Any other input raises an exception + """ + + def to_std(v: str) -> Std: # type: ignore[return] + s = Std(int(v)) + if s in Std: + return s + # return None -> should NEVER reach here since we regex check input + + if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0) + return to_std(vm) + elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2) + d: Dict[int, Std] = {} + for m in vm.split(","): + i, v = m.split(":") + d[int(i)] = to_std(v) + return d + else: + raise ValueError( + f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>" + ) + + +def to_map( + val_or_map: Union[Std, Dict[int, Std]], local_world_size: int +) -> Dict[int, Std]: + """ + Certain APIs take redirect settings either as a single value (e.g. apply to all + local ranks) or as an explicit user-provided mapping. This method is a convenience + method that converts a value or mapping into a mapping. + + Example: + :: + + to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} + to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + """ + if isinstance(val_or_map, Std): + return dict.fromkeys(range(local_world_size), val_or_map) + else: + map = {} + for i in range(local_world_size): + map[i] = val_or_map.get(i, Std.NONE) + return map + + +@dataclass +class LogsDest: + """ + For each log type, holds mapping of local rank ids to file paths. + """ + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + tee_stdouts: Dict[int, str] = field(default_factory=dict) + tee_stderrs: Dict[int, str] = field(default_factory=dict) + error_files: Dict[int, str] = field(default_factory=dict) + + +class LogsSpecs(ABC): + """ + Defines logs processing and redirection for each worker process. + + Args: + log_dir: + Base directory where logs will be written. + redirects: + Streams to redirect to files. Pass a single ``Std`` + enum to redirect for all workers, or a mapping keyed + by local_rank to selectively redirect. + tee: + Streams to duplicate to stdout/stderr. + Pass a single ``Std`` enum to duplicate streams for all workers, + or a mapping keyed by local_rank to selectively duplicate. + """ + + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + self._root_log_dir = log_dir + self._redirects = redirects + self._tee = tee + self._local_ranks_filter = local_ranks_filter + + @abstractmethod + def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest: + """ + Given the environment variables, builds destination of log files for each of the local ranks. + + Envs parameter contains env variables dict for each of the local ranks, where entries are defined in: + :func:`~torchelastic.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent._start_workers`. + """ + pass + + @property + @abstractmethod + def root_log_dir(self) -> str: + pass + +class DefaultLogsSpecs(LogsSpecs): + """ + Default LogsSpecs implementation: + + - `log_dir` will be created if it doesn't exist + - Generates nested folders for each attempt and rank. + """ + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + if log_dir != os.devnull: + if not log_dir: + log_dir = tempfile.mkdtemp(prefix="torchelastic_") + elif not os.path.exists(log_dir): + os.makedirs(log_dir) + else: + if os.path.isfile(log_dir): + raise NotADirectoryError(f"log_dir: {log_dir} is a file") + super().__init__(log_dir, redirects, tee, local_ranks_filter) + # initialized only once + self._run_log_dir = None + + @property + def root_log_dir(self) -> str: + return str(self._root_log_dir) + + def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str): + base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_") + os.makedirs(base_log_dir, exist_ok=True) + dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir) + log.info("log directory set to: %s", dir) + return dir + + def reify(self, envs: Dict[int, Dict[str, str]],) -> LogsDest: + """ + Uses following scheme to build log destination paths: + + - `//attempt_//stdout.log` + - `//attempt_//stderr.log` + - `//attempt_//error.json` + """ + nprocs = len(envs) + global_env = {} # use only to query properies that are not dependent on a rank + if nprocs > 0: + global_env = envs[0] + else: + log.warning("Empty envs map provided when defining logging destinations.") + # Keys are always defined, but values can be missing in unit tests + run_id = global_env.get("TORCHELASTIC_RUN_ID", "test_run_id") + restart_count = global_env.get("TORCHELASTIC_RESTART_COUNT", "0") + + attempt_log_dir: str = "" + if self._root_log_dir != os.devnull: + if not self._run_log_dir: + self._run_log_dir = self._make_log_dir(self._root_log_dir, run_id) + + attempt_log_dir = os.path.join(self._run_log_dir, f"attempt_{restart_count}") # type: ignore[call-overload] + shutil.rmtree(attempt_log_dir, ignore_errors=True) + os.makedirs(attempt_log_dir) + + if self._root_log_dir == os.devnull: + attempt_log_dir = os.devnull + + # create subdirs for each local rank in the logs_dir + # logs_dir + # |- 0 + # |- error.json + # |- stdout.log + # |- stderr.log + # |- ... + # |- (nprocs-1) + redirs = to_map(self._redirects, nprocs) + ts = to_map(self._tee, nprocs) + + # to tee stdout/stderr we first redirect into a file + # then tail -f stdout.log/stderr.log so add tee settings to redirects + for local_rank, tee_std in ts.items(): + redirect_std = redirs[local_rank] + redirs[local_rank] = redirect_std | tee_std + + SYS_STREAM = "" # special case to indicate to output to console + stdouts = dict.fromkeys(range(nprocs), SYS_STREAM) + stderrs = dict.fromkeys(range(nprocs), SYS_STREAM) + tee_stdouts: Dict[int, str] = {} + tee_stderrs: Dict[int, str] = {} + error_files = {} + + for local_rank in range(nprocs): + + if attempt_log_dir == os.devnull: + tee_stdouts[local_rank] = os.devnull + tee_stderrs[local_rank] = os.devnull + error_files[local_rank] = os.devnull + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = "" + else: + clogdir = os.path.join(attempt_log_dir, str(local_rank)) + os.mkdir(clogdir) + + rd = redirs[local_rank] + if (rd & Std.OUT) == Std.OUT: + stdouts[local_rank] = os.path.join(clogdir, "stdout.log") + if (rd & Std.ERR) == Std.ERR: + stderrs[local_rank] = os.path.join(clogdir, "stderr.log") + + t = ts[local_rank] + if t & Std.OUT == Std.OUT: + tee_stdouts[local_rank] = stdouts[local_rank] + if t & Std.ERR == Std.ERR: + tee_stderrs[local_rank] = stderrs[local_rank] + + if self._local_ranks_filter and local_rank not in self._local_ranks_filter: + # If stream is tee'd, only write to file, but don't tail + if local_rank in tee_stdouts: + tee_stdouts.pop(local_rank, None) + if local_rank in tee_stderrs: + tee_stderrs.pop(local_rank, None) + + # If stream is not redirected, don't print + if stdouts[local_rank] == SYS_STREAM: + stdouts[local_rank] = os.devnull + if stderrs[local_rank] == SYS_STREAM: + stderrs[local_rank] = os.devnull + + error_file = os.path.join(clogdir, "error.json") + error_files[local_rank] = error_file + log.info("Setting worker%s reply file to: %s", local_rank, error_file) + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file + + return LogsDest(stdouts, stderrs, tee_stdouts, tee_stderrs, error_files) + + def __repr__(self) -> str: + return ( + f"DefaultLogsSpecs(root_log_dir={self._root_log_dir}, redirects={self._redirects}, " + f"tee={self._tee}, local_ranks_filter={self._local_ranks_filter})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DefaultLogsSpecs): + return False + + return ( + self._root_log_dir == other._root_log_dir + and self._redirects == other._redirects + and self._tee == other._tee + and self._local_ranks_filter == other._local_ranks_filter + ) + + +@dataclass +class RunProcsResult: + """ + Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``. + + Note the following: + + 1. All fields are mapped by local rank + 2. ``return_values`` - only populated for functions (not the binaries). + 3. ``stdouts`` - path to stdout.log (empty string if no redirect) + 4. ``stderrs`` - path to stderr.log (empty string if no redirect) + + """ + + return_values: Dict[int, Any] = field(default_factory=dict) + failures: Dict[int, ProcessFailure] = field(default_factory=dict) + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + + def is_failed(self) -> bool: + return len(self.failures) > 0 + + +class PContext(abc.ABC): + """ + The base class that standardizes operations over a set of processes that are launched via different mechanisms. + + The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``. + + .. warning:: stdouts and stderrs should ALWAYS be a superset of + tee_stdouts and tee_stderrs (respectively) this is b/c + tee is implemented as a redirect + tail -f + """ + + def __init__( + self, + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + + ): + self.name = name + # validate that all mappings have the same number of keys and + # all local ranks are accounted for + nprocs = len(args) + + # TODO log_line_prefixes can be exanded too + logs_dest = logs_specs.reify(envs) + + _validate_full_rank(logs_dest.stdouts, nprocs, "stdouts") + _validate_full_rank(logs_dest.stderrs, nprocs, "stderrs") + + self.entrypoint = entrypoint + self.args = args + self.envs = envs + self.stdouts = logs_dest.stdouts + self.stderrs = logs_dest.stderrs + self.error_files = logs_dest.error_files + self.nprocs = nprocs + + self._stdout_tail = TailLog(name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes) + self._stderr_tail = TailLog(name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes) + + def start(self) -> None: + """Start processes using parameters defined in the constructor.""" + signal.signal(signal.SIGTERM, _terminate_process_handler) + signal.signal(signal.SIGINT, _terminate_process_handler) + if not IS_WINDOWS: + signal.signal(signal.SIGHUP, _terminate_process_handler) + signal.signal(signal.SIGQUIT, _terminate_process_handler) + self._start() + self._stdout_tail.start() + self._stderr_tail.start() + + @abc.abstractmethod + def _start(self) -> None: + """Start processes using strategy defined in a particular context.""" + raise NotImplementedError() + + @abc.abstractmethod + def _poll(self) -> Optional[RunProcsResult]: + """ + Poll the run status of the processes running under this context. + This method follows an "all-or-nothing" policy and returns + a ``RunProcessResults`` object if either all processes complete + successfully or any process fails. Returns ``None`` if + all processes are still running. + """ + raise NotImplementedError() + + def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]: + """ + Wait for the specified ``timeout`` seconds, polling every ``period`` seconds + for the processes to be done. Returns ``None`` if the processes are still running + on timeout expiry. Negative timeout values are interpreted as "wait-forever". + A timeout value of zero simply queries the status of the processes (e.g. equivalent + to a poll). + + ..note: Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise + ``SignalException`` when the signals received. It is up to the consumer of the code + to properly handle the exception. It is important not to swallow the exception otherwise + the process would not terminate. Example of the typical workflow can be: + + .. code-block:: python + pc = start_processes(...) + try: + pc.wait(1) + .. do some other work + except SignalException as e: + pc.shutdown(e.sigval, timeout=30) + + If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating + received signal. If child processes will not terminate in the timeout time, the process will send + the SIGKILL. + """ + if timeout == 0: + return self._poll() + + if timeout < 0: + timeout = sys.maxsize + + expiry = time.time() + timeout + while time.time() < expiry: + pr = self._poll() + if pr: + return pr + time.sleep(period) + + return None + + @abc.abstractmethod + def pids(self) -> Dict[int, int]: + """Return pids of processes mapped by their respective local_ranks.""" + raise NotImplementedError() + + @abc.abstractmethod + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + """ + raise NotImplementedError() + + def close( + self, death_sig: Optional[signal.Signals] = None, timeout: int = 30 + ) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + + Args: + death_sig: Death signal to terminate processes. + timeout: Time to wait for processes to finish, if process is + still alive after this time, it will be terminated via SIGKILL. + """ + if not death_sig: + death_sig = _get_default_signal() + self._close(death_sig=death_sig, timeout=timeout) + if self._stdout_tail: + self._stdout_tail.stop() + if self._stderr_tail: + self._stderr_tail.stop() + + +def get_std_cm(std_rd: str, redirect_fn): + if IS_WINDOWS or IS_MACOS or not std_rd: + return nullcontext() + else: + return redirect_fn(std_rd) + + +def _wrap( + local_rank: int, + fn: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None) + stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None) + ret_vals: Dict[int, mp.SimpleQueue], + queue_finished_reading_event: synchronize.Event, +) -> None: + # get the per-rank params up front so we fail fast if no mapping is found + args_ = args[local_rank] + env_ = envs[local_rank] + ret_val_ = ret_vals[local_rank] + + stdout_rd = stdout_redirects[local_rank] + stderr_rd = stderr_redirects[local_rank] + + stdout_cm = get_std_cm(stdout_rd, redirect_stdout) + stderr_cm = get_std_cm(stderr_rd, redirect_stderr) + + for k, v in env_.items(): + os.environ[k] = v + + with stdout_cm, stderr_cm: + ret = record(fn)(*args_) + ret_val_.put(ret) + queue_finished_reading_event.wait() + + +class MultiprocessContext(PContext): + """``PContext`` holding worker processes invoked as a function.""" + + def __init__( + self, + name: str, + entrypoint: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + start_method: str, + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + self.start_method = start_method + # each ret_val queue will always contain a single element. + self._ret_vals = { + local_rank: mp.get_context(self.start_method).SimpleQueue() + for local_rank in range(self.nprocs) + } + + # see comments in ``join()`` for what this is + self._return_values: Dict[int, Any] = {} + self._pc: Optional[mp.ProcessContext] = None + # Note: set method should ONLY be invoked for the use case when all processes finished + # successfully. If any process died on event.wait() calling set() method will deadlock. + self._worker_finished_event = mp.get_context(self.start_method).Event() + + def _start(self): + if self._pc: + raise ValueError( + "The process context already initialized." + " Most likely the start method got called twice." + ) + self._pc = mp.start_processes( + fn=_wrap, + args=( + self.entrypoint, + self.args, + self.envs, + self.stdouts, + self.stderrs, + self._ret_vals, + self._worker_finished_event, + ), + nprocs=self.nprocs, + join=False, + daemon=False, + start_method=self.start_method, + ) + + def _is_done(self) -> bool: + return len(self._return_values) == self.nprocs + + def _poll(self) -> Optional[RunProcsResult]: + assert self._pc is not None # assertion for mypy type checker + + try: + # torch.mp.ProcessContext Throws an Exception if some/all of + # worker processes failed + # timeout < 0 checks worker status and return immediately + # Join will never return success since we use synchronize.Event to wait + # for all processes to finish. + self._pc.join(-1) + + # IMPORTANT: we use multiprocessing.Queue to carry worker return values + # back to the parent, the worker process will wait before terminating + # until all the buffered items are fed by the feeder thread to the underlying + # pipe. Hence to prevent deadlocks on large return values, + # we opportunistically try queue.get on each join call + # See: https://docs.python.org/2/library/multiprocessing.html#all-platforms + for local_rank in range(0, self.nprocs): + return_queue = self._ret_vals[local_rank] + if not return_queue.empty(): + # save the return values temporarily into a member var + self._return_values[local_rank] = return_queue.get() + + if self._is_done(): + # we should ALWAYS have ALL the return values when all the processes are done + self._worker_finished_event.set() + # Wait untill all processes are finished. At this point workers finished executing + # user function + self._pc.join() + _validate_full_rank( + self._return_values, self.nprocs, "return_value queue" + ) + self.close() + return RunProcsResult( + return_values=self._return_values, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + else: + return None + except (mp.ProcessRaisedException, mp.ProcessExitedException) as e: + failed_local_rank = e.error_index + + # entrypoint for MultiprocessContext will always be a Callable + fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr] + failed_proc = self._pc.processes[failed_local_rank] + error_filepath = self.error_files[failed_local_rank] + + log.exception( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of fn: %s (start_method: %s)", + failed_proc.exitcode, + failed_local_rank, e.pid, + fn_name, self.start_method, + ) + + self.close() + return RunProcsResult( + failures={ + failed_local_rank: ProcessFailure( + local_rank=failed_local_rank, + pid=e.pid, + exitcode=failed_proc.exitcode, + error_file=error_filepath, + ) + }, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + + def pids(self) -> Dict[int, int]: + assert self._pc is not None # assertion for mypy type checking + return dict(enumerate(self._pc.pids())) + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self._pc: + return + for proc in self._pc.processes: + if proc.is_alive(): + log.warning("Closing process %s via signal %s", proc.pid, death_sig.name) + try: + os.kill(proc.pid, death_sig) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + end = time.monotonic() + timeout + for proc in self._pc.processes: + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + proc.join(time_to_wait) + for proc in self._pc.processes: + if proc.is_alive(): + log.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + proc.pid, death_sig, _get_kill_signal() + ) + try: + os.kill(proc.pid, _get_kill_signal()) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + proc.join() + +class SubprocessContext(PContext): + """``PContext`` holding worker processes invoked as a binary.""" + + def __init__( + self, + name: str, + entrypoint: str, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + # state vector; _vdone[local_rank] -> is local_rank finished or not + self._running_local_ranks: Set[int] = set(range(self.nprocs)) + self._failures: Dict[int, ProcessFailure] = {} + self.subprocess_handlers: Dict[int, SubprocessHandler] = {} + + def _start(self): + if self.subprocess_handlers: + raise ValueError( + "The subprocess handlers already initialized. Most likely the start method got called twice." + ) + self.subprocess_handlers = { + local_rank: get_subprocess_handler( + entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str + args=self.args[local_rank], + env=self.envs[local_rank], + stdout=self.stdouts[local_rank], + stderr=self.stderrs[local_rank], + local_rank_id=local_rank, + ) + for local_rank in range(self.nprocs) + } + + def _poll(self) -> Optional[RunProcsResult]: + done_local_ranks = set() + for local_rank in self._running_local_ranks: + handler = self.subprocess_handlers[local_rank] + exitcode = handler.proc.poll() + if exitcode is not None: + done_local_ranks.add(local_rank) + if exitcode != 0: # failed or signaled + self._failures[local_rank] = ProcessFailure( + local_rank=local_rank, + pid=handler.proc.pid, + exitcode=exitcode, + error_file=self.error_files[local_rank], + ) + # else: --> succeeded; nothing to do + + self._running_local_ranks.difference_update(done_local_ranks) + + # if ALL procs are finished or ANY have failed + if not self._running_local_ranks or self._failures: + self.close() # terminate all running procs + result = RunProcsResult( + failures=self._failures, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + if result.is_failed(): + first_failure = min(result.failures.values(), key=lambda f: f.timestamp) + log.error( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of binary: %s", + first_failure.exitcode, first_failure.local_rank, first_failure.pid, self.entrypoint + ) + else: + # Populate return with dummy values. This provides consistency with MultiprocessingHandler + result.return_values = dict.fromkeys(range(self.nprocs)) + + return result + else: # there are no failures and procs still running + return None + + def pids(self) -> Dict[int, int]: + return { + local_rank: sh.proc.pid + for local_rank, sh in self.subprocess_handlers.items() + } + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self.subprocess_handlers: + return + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + log.warning( + "Sending process %s closing signal %s", handler.proc.pid, death_sig.name + ) + handler.close(death_sig=death_sig) + end = time.monotonic() + timeout + for handler in self.subprocess_handlers.values(): + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + try: + handler.proc.wait(time_to_wait) + except subprocess.TimeoutExpired: + # Ignore the timeout expired exception, since + # the child process will be forcefully terminated via SIGKILL + pass + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + log.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + handler.proc.pid, death_sig, _get_kill_signal() + ) + handler.close(death_sig=_get_kill_signal()) + handler.proc.wait() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py new file mode 100644 index 0000000000000000000000000000000000000000..f99c3bda4580b0c8053e9884607f63502bcb90ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +import time +from concurrent.futures._base import Future +from concurrent.futures.thread import ThreadPoolExecutor +from threading import Event +from typing import Dict, List, Optional, TextIO + +__all__ = ["tail_logfile", "TailLog"] + +log = logging.getLogger(__name__) + + +def tail_logfile( + header: str, file: str, dst: TextIO, finished: Event, interval_sec: float +): + + while not os.path.exists(file): + if finished.is_set(): + return + time.sleep(interval_sec) + + with open(file, errors="replace") as fp: + while True: + line = fp.readline() + + if line: + dst.write(f"{header}{line}") + else: # reached EOF + if finished.is_set(): + # log line producer is finished + break + else: + # log line producer is still going + # wait for a bit before looping again + time.sleep(interval_sec) + + +class TailLog: + """ + Tail the given log files. + + The log files do not have to exist when the ``start()`` method is called. The tail-er will gracefully wait until + the log files are created by the producer and will tail the contents of the + log files until the ``stop()`` method is called. + + .. warning:: ``TailLog`` will wait indefinitely for the log file to be created! + + Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``, + where the ``name`` is user-provided and ``idx`` is the index of the log file + in the ``log_files`` mapping. ``log_line_prefixes`` can be used to override the + header for each log file. + + Usage: + + :: + + log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"} + tailer = TailLog("trainer", log_files, sys.stdout).start() + # actually run the trainers to produce 0_stdout.log and 1_stdout.log + run_trainers() + tailer.stop() + + # once run_trainers() start writing the ##_stdout.log files + # the tailer will print to sys.stdout: + # >>> [trainer0]:log_line1 + # >>> [trainer1]:log_line1 + # >>> [trainer0]:log_line2 + # >>> [trainer0]:log_line3 + # >>> [trainer1]:log_line2 + + .. note:: Due to buffering log lines between files may not necessarily + be printed out in order. You should configure your application's + logger to suffix each log line with a proper timestamp. + + """ + + def __init__( + self, + name: str, + log_files: Dict[int, str], + dst: TextIO, + log_line_prefixes: Optional[Dict[int, str]] = None, + interval_sec: float = 0.1, + ): + n = len(log_files) + self._threadpool = None + if n > 0: + self._threadpool = ThreadPoolExecutor( + max_workers=n, + thread_name_prefix=f"{self.__class__.__qualname__}_{name}", + ) + + self._name = name + self._dst = dst + self._log_files = log_files + self._log_line_prefixes = log_line_prefixes + self._finished_events: Dict[int, Event] = { + local_rank: Event() for local_rank in log_files.keys() + } + self._futs: List[Future] = [] + self._interval_sec = interval_sec + self._stopped = False + + def start(self) -> "TailLog": + if not self._threadpool: + return self + + for local_rank, file in self._log_files.items(): + header = f"[{self._name}{local_rank}]:" + if self._log_line_prefixes and local_rank in self._log_line_prefixes: + header = self._log_line_prefixes[local_rank] + self._futs.append( + self._threadpool.submit( + tail_logfile, + header=header, + file=file, + dst=self._dst, + finished=self._finished_events[local_rank], + interval_sec=self._interval_sec, + ) + ) + return self + + def stop(self) -> None: + for finished in self._finished_events.values(): + finished.set() + + for local_rank, f in enumerate(self._futs): + try: + f.result() + except Exception as e: + log.error( + "error in log tailor for %s%s. %s: %s", + self._name, local_rank, + e.__class__.__qualname__, e, + ) + + if self._threadpool: + self._threadpool.shutdown(wait=True) + + self._stopped = True + + def stopped(self) -> bool: + return self._stopped diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f339c9c69e16bef2e3ce90950422e652d66447ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/c10d_rendezvous_backend.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0384ddce0471d6a483d6b6ad9907905f4c71df1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/etcd_server.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14aac90ff3303084e6852fb167c97fc9e98b1558 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__pycache__/registry.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..144e691ae08caa1c2f6bf28ef3faf3e4bdbe8f77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py @@ -0,0 +1,269 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import binascii +import logging +import os +import tempfile +from base64 import b64decode, b64encode +from datetime import timedelta +from typing import Any, Optional, Tuple, cast + +from torch.distributed import FileStore, Store, TCPStore +from torch.distributed.elastic.events import ( + NodeState, + construct_and_record_rdzv_event, +) + +from .api import ( + RendezvousConnectionError, + RendezvousError, + RendezvousParameters, + RendezvousStateError, +) +from .dynamic_rendezvous import RendezvousBackend, Token +from .utils import _matches_machine_hostname, parse_rendezvous_endpoint + +log = logging.getLogger(__name__) + + +class C10dRendezvousBackend(RendezvousBackend): + """Represents a C10d-backed rendezvous backend. + + Args: + store: + The :py:class:`torch.distributed.Store` instance to use to + communicate with the C10d store. + run_id: + The run id of the rendezvous. + """ + + # See the explanation in the __init__ method. + _NULL_SENTINEL = "Y2FuaW1hZGFt" + + _store: Store + _key: str + + def __init__(self, store: Store, run_id: str) -> None: + if not run_id: + raise ValueError("The run id must be a non-empty string.") + + self._store = store + + self._key = "torch.rendezvous." + run_id + + # The read operation of a store blocks the caller until the specified + # key becomes available. This behavior makes it tricky to use a store + # as a regular key-value dictionary. + # + # As a workaround we initially set a sentinel value as the rendezvous + # state. Whenever this value gets returned we treat it as a None. + self._call_store("compare_set", self._key, "", self._NULL_SENTINEL) + + @property + def name(self) -> str: + """See base class.""" + return "c10d" + + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """See base class.""" + base64_state: bytes = self._call_store("get", self._key) + + return self._decode_state(base64_state) + + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """See base class.""" + base64_state_str: str = b64encode(state).decode() + + if token: + # Shortcut if we know for sure that the token is not valid. + if not isinstance(token, bytes): + result = self.get_state() + if result is not None: + tmp = *result, False + # Python 3.6 does not support tuple unpacking in return + # statements. + return tmp + return None + + token = token.decode() + else: + token = self._NULL_SENTINEL + + base64_state: bytes = self._call_store("compare_set", self._key, token, base64_state_str) + + state_token_pair = self._decode_state(base64_state) + if state_token_pair is None: + return None + + new_state, new_token = state_token_pair + + # C10d Store's compare_set method does not offer an easy way to find out + # whether our write attempt was successful. As a brute-force solution we + # perform a bitwise comparison of our local state and the remote state. + return new_state, new_token, new_state == state + + def _call_store(self, store_op: str, *args, **kwargs) -> Any: + try: + return getattr(self._store, store_op)(*args, **kwargs) + except (ValueError, RuntimeError, TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + def _decode_state(self, base64_state: bytes) -> Optional[Tuple[bytes, Token]]: + if base64_state == self._NULL_SENTINEL.encode(): + return None + + try: + state = b64decode(base64_state) + except binascii.Error as exc: + raise RendezvousStateError( + "The state object is corrupt. See inner exception for details." + ) from exc + + return state, base64_state + + +def _create_tcp_store(params: RendezvousParameters) -> TCPStore: + host, port = parse_rendezvous_endpoint(params.endpoint, default_port=29400) + + cfg_is_host = params.get_as_bool("is_host") + # If the user has explicitly specified whether our process should host the + # the store, respect it. + if cfg_is_host is not None: + is_host = cfg_is_host + # Otherwise try to determine whether we are the host based on our hostname + # and IP address. + else: + is_host = _matches_machine_hostname(host) + + use_libuv = params.get_as_bool("use_libuv", False) + + # The timeout + read_timeout = cast(int, params.get_as_int("read_timeout", 60)) + if read_timeout <= 0: + raise ValueError("The read timeout must be a positive integer.") + + # In specific cases we attempt to instantiate the store twice. For details + # see the explanation in the except clause below. + for is_server in [is_host, False]: + try: + store = TCPStore( + host, + port, + is_master=is_server, + timeout=timedelta(seconds=read_timeout), + use_libuv=use_libuv, + ) + + if is_server: + msg = f"Process {os.getpid()} hosts the TCP store for the C10d rendezvous backend." + construct_and_record_rdzv_event( + run_id=params.run_id, message=msg, node_state=NodeState.INIT + ) + log.info(msg) + + break + except (ValueError, RuntimeError, TimeoutError) as exc: + # If we heuristically inferred the value of is_host as True and our + # first attempt to instantiate the TCP store has failed, try it one + # more time with is_host set to False. As an edge case there can be + # more than one process that is part of the same rendezvous on this + # machine and only one of them will eventually host the store. + + if not is_server or cfg_is_host is not None: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + return store # type: ignore[possibly-undefined] + + +def _create_file_store(params: RendezvousParameters) -> FileStore: + # If a user specifies an endpoint, we treat it as a path to a file. + if params.endpoint: + path = params.endpoint + else: + try: + # The temporary file is readable and writable only by the user of + # this process. + _, path = tempfile.mkstemp() + except OSError as exc: + raise RendezvousError( + "The file creation for C10d store has failed. See inner exception for details." + ) from exc + + try: + store = FileStore(path) + except (ValueError, RuntimeError) as exc: + raise RendezvousConnectionError( + "The connection to the C10d store has failed. See inner exception for details." + ) from exc + + return store + + +def create_backend(params: RendezvousParameters) -> Tuple[C10dRendezvousBackend, Store]: + """Create a new :py:class:`C10dRendezvousBackend` from the specified parameters. + + +--------------+-----------------------------------------------------------+ + | Parameter | Description | + +==============+===========================================================+ + | store_type | The type of the C10d store. The currently supported types | + | | are "tcp" and "file" which correspond to | + | | :py:class:`torch.distributed.TCPStore` and | + | | :py:class:`torch.distributed.FileStore`, respectively. | + | | Defaults to "tcp". | + +--------------+-----------------------------------------------------------+ + | read_timeout | The read timeout, in seconds, for store operations. | + | | Defaults to 60 seconds. | + | | | + | | Note this only applies to | + | | :py:class:`torch.distributed.TCPStore`. It is not relevant| + | | to :py:class:`torch.distributed.FileStore` which does not | + | | take in timeout as a parameter. | + +--------------+-----------------------------------------------------------+ + | is_host | A boolean value indicating whether this backend instance | + | | will host the C10d store. If not specified it will be | + | | inferred heuristically by matching the hostname or the IP | + | | address of this machine against the specified rendezvous | + | | endpoint. Defaults to ``None``. | + | | | + | | Note that this configuration option only applies to | + | | :py:class:`torch.distributed.TCPStore`. In normal | + | | circumstances you can safely skip it; the only time when | + | | it is needed is if its value cannot be correctly | + | | determined (e.g. the rendezvous endpoint has a CNAME as | + | | the hostname or does not match the FQDN of the machine). | + +--------------+-----------------------------------------------------------+ + """ + # As of today we only support TCPStore and FileStore. Other store types do + # not have the required functionality (e.g. compare_set) yet. + store_type = params.get("store_type", "tcp").strip().lower() + store: Store + + try: + if store_type == "file": + store = _create_file_store(params) + elif store_type == "tcp": + store = _create_tcp_store(params) + else: + raise ValueError("Invalid store type given. Currently only supports file and tcp.") + + backend = C10dRendezvousBackend(store, params.run_id) + + except Exception as e: + construct_and_record_rdzv_event( + message=f"{type(e).__name__}: {str(e)}", + run_id=params.run_id, + node_state=NodeState.FAILED, + ) + raise + + return backend, store diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2ce80ed903905c419f83c8e36ccd118457ad5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py @@ -0,0 +1,1343 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +import logging +import os +import pickle +import socket +import threading +import time +import weakref +from abc import ABC, abstractmethod +from dataclasses import dataclass +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple + +from torch.distributed import PrefixStore, Store +from torch.distributed.elastic.events import construct_and_record_rdzv_event, NodeState + +from .api import ( + RendezvousClosedError, + RendezvousError, + RendezvousGracefulExitError, + RendezvousHandler, + RendezvousParameters, + RendezvousStateError, + RendezvousTimeoutError, +) +from .utils import _delay, _PeriodicTimer + +__all__ = ['RendezvousBackend', 'RendezvousTimeout', 'RendezvousSettings', 'DynamicRendezvousHandler', 'create_handler'] + +log = logging.getLogger(__name__) + + +def get_method_name(depth=2): + if len(inspect.stack()) > depth: + return inspect.stack()[depth].function + return "no_method_name" + + +Token = Any +"""Represent an opaque fencing token used by the rendezvous backend.""" + +class RendezvousBackend(ABC): + """Represent a backend that holds the rendezvous state.""" + + @property + @abstractmethod + def name(self) -> str: + """Get the name of the backend.""" + + @abstractmethod + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """Get the rendezvous state. + + Returns: + A tuple of the encoded rendezvous state and its fencing token or + ``None`` if no state is found in the backend. + + Raises: + RendezvousConnectionError: + The connection to the backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + """ + + @abstractmethod + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """Set the rendezvous state. + + The new rendezvous state is set conditionally: + + - If the specified ``token`` matches the fencing token stored in the + backend, the state will be updated. The new state will be returned + to the caller along with its fencing token. + - If the specified ``token`` does not match the fencing token stored + in the backend, the state won't be updated; instead the existing + state along with its fencing token will be returned to the caller. + - If the specified ``token`` is ``None``, the new state will be set + only if there is no existing state in the backend. Either the new + state or the existing state along with its fencing token will be + returned to the caller. + + Args: + state: + The encoded rendezvous state. + token: + An optional fencing token that was retrieved by a previous call + to :py:meth:`get_state` or ``set_state()``. + + Returns: + A tuple of the serialized rendezvous state, its fencing token, and + a boolean value indicating whether our set attempt succeeded. + + Raises: + RendezvousConnectionError: + The connection to the backend has failed. + RendezvousStateError: + The rendezvous state is corrupt. + """ + + +class RendezvousTimeout: + """Hold the timeout configuration of a rendezvous. + + Args: + join: + The time within which the rendezvous is expected to complete. + last_call: + An additional wait amount before completing the rendezvous once the + rendezvous has the minimum number of required participants. + close: + The time within which the rendezvous is expected to close after a + call to :py:meth:`RendezvousHandler.set_closed` or + :py:meth:`RendezvousHandler.shutdown`. + keep_alive: + The time within which a keep-alive heartbeat is expected to + complete. + """ + + _ZERO = timedelta(0) + + _DEFAULT_TIMEOUTS = { + "join": timedelta(seconds=600), + "last_call": timedelta(seconds=30), + "close": timedelta(seconds=30), + "heartbeat": timedelta(seconds=5), + } + + _join: timedelta + _last_call: timedelta + _close: timedelta + _heartbeat: timedelta + + def __init__( + self, + join: Optional[timedelta] = None, + last_call: Optional[timedelta] = None, + close: Optional[timedelta] = None, + heartbeat: Optional[timedelta] = None, + ) -> None: + self._set_timeouts(join=join, last_call=last_call, close=close, heartbeat=heartbeat) + + @property + def join(self) -> timedelta: + """Get the join timeout.""" + return self._join + + @property + def last_call(self) -> timedelta: + """Get the last call timeout.""" + return self._last_call + + @property + def close(self) -> timedelta: + """Get the close timeout.""" + return self._close + + @property + def heartbeat(self) -> timedelta: + """Get the keep-alive heartbeat timeout.""" + return self._heartbeat + + def _set_timeouts(self, **timeouts: Optional[timedelta]): + for name, timeout in timeouts.items(): + if timeout is None: + timeout = self._DEFAULT_TIMEOUTS[name] + if timeout <= self._ZERO: + raise ValueError(f"The {name} timeout ({timeout}) must be positive.") + setattr(self, "_" + name, timeout) + + +@dataclass(repr=False, eq=False, frozen=True) +class RendezvousSettings: + """Hold the settings of the rendezvous. + + Attributes: + run_id: + The run id of the rendezvous. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + timeout: + The timeout configuration of the rendezvous. + keep_alive_interval: + The amount of time a node waits before sending a heartbeat to keep + it alive in the rendezvous. + keep_alive_max_attempt: + The maximum number of failed heartbeat attempts after which a node + is considered dead. + """ + + run_id: str + min_nodes: int + max_nodes: int + timeout: RendezvousTimeout + keep_alive_interval: timedelta + keep_alive_max_attempt: int + + +@dataclass(eq=True, order=True, frozen=True) +class _NodeDesc: + """Describe a node in the rendezvous. + + Attributes: + addr: + The FQDN of the node or user specified local node address. + pid: + The id of the process in which the rendezvous handler runs. + local_id: + A process-wide unique id. + """ + + addr: str + pid: int + local_id: int + + def __repr__(self) -> str: + return f"{self.addr}_{self.pid}_{self.local_id}" + + +class _NodeDescGenerator: + """Generate node descriptors. + + A node descriptor is a combination of an FQDN, a process id, and an auto- + incremented integer that uniquely identifies a node in the rendezvous. + """ + + _lock: threading.Lock + _local_id: int + + def __init__(self) -> None: + self._lock = threading.Lock() + + # An integer that is incremented with each call to generate(). + self._local_id = 0 + + def generate(self, local_addr: Optional[str] = None) -> _NodeDesc: + # This method can be called by multiple threads concurrently; therefore, + # we must increment the integer atomically. + with self._lock: + local_id = self._local_id + + self._local_id += 1 + + return _NodeDesc(local_addr or socket.getfqdn(), os.getpid(), local_id) + + +class _RendezvousState: + """Hold the state of a rendezvous. + + Attributes: + round: + The current round of the rendezvous. + complete: + A boolean value indicating whether the current round of the + rendezvous is complete. + deadline: + The time at which the current round of the rendezvous will be + considered complete if it is still waiting for nodes to join. + closed: + A boolean value indicating whether the rendezvous is closed. + participants: + A dictionary of the participants and their corresponding ranks. + wait_list: + A set of nodes that are waiting to participate in the next round of + the rendezvous. + redundancy_list: + A set of nodes that are redundant in the current round and can join + the next rendezvous without triggering re-rendezvous. + last_heartbeats: + A dictionary containing each node's last heartbeat time. + """ + + round: int + complete: bool + deadline: Optional[datetime] + closed: bool + participants: Dict[_NodeDesc, int] + wait_list: Set[_NodeDesc] + redundancy_list: Set[_NodeDesc] + last_heartbeats: Dict[_NodeDesc, datetime] + + def __init__(self) -> None: + self.round = 0 + self.complete = False + self.deadline = None + self.closed = False + self.participants = {} + self.wait_list = set() + self.redundancy_list = set() + self.last_heartbeats = {} + + +def _remove_participant_epilogue(state: _RendezvousState, settings: RendezvousSettings) -> None: + if state.complete: + # If we do not have any participants left, move to the next round. + if not state.participants: + msg = "No participants left in the rendezvous, marking rendezvous as incomplete" + log.debug(msg) + state.complete = False + + state.round += 1 + else: + if len(state.participants) < settings.min_nodes: + msg = ( + f"Number of participants {len(state.participants)}) less than" + f"min_nodes {settings.min_nodes}, clearning deadline in state" + ) + log.debug(msg) + state.deadline = None + + +class _RendezvousStateHolder(ABC): + """Hold the shared rendezvous state synced with other nodes.""" + + @property + @abstractmethod + def state(self) -> _RendezvousState: + """Get the local state.""" + + @abstractmethod + def sync(self) -> Optional[bool]: + """Read or writes the latest state. + + Returns: + A boolean value indicating whether the local state, in case marked + as dirty, was successfully synced with other nodes. + """ + + @abstractmethod + def mark_dirty(self) -> None: + """Mark the local state as dirty.""" + + +class _BackendRendezvousStateHolder(_RendezvousStateHolder): + """Hold the rendezvous state synced with other nodes via a backend. + + Args: + backend: + The rendezvous backend to use. + settings: + The rendezvous settings. + cache_duration: + The amount of time, in seconds, to cache the last rendezvous state + before requesting it from the backend again. + """ + + _backend: RendezvousBackend + _state: _RendezvousState + _settings: RendezvousSettings + _cache_duration: int + _token: Token + _dirty: bool + _last_sync_time: float + _dead_nodes: List[_NodeDesc] + + def __init__( + self, + backend: RendezvousBackend, + settings: RendezvousSettings, + cache_duration: int = 1, + ) -> None: + self._backend = backend + self._state = _RendezvousState() + self._settings = settings + self._cache_duration = cache_duration + self._token = None + self._dirty = False + self._last_sync_time = -1 + self._dead_nodes = [] + + def _record(self, message: str, node_state: NodeState = NodeState.RUNNING): + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + ) + + @property + def state(self) -> _RendezvousState: + """See base class.""" + return self._state + + def sync(self) -> Optional[bool]: + """See base class.""" + state_bits: Optional[bytes] = None + + token = None + + has_set: Optional[bool] + + if self._dirty: + has_set = False + + state_bits = pickle.dumps(self._state) + + set_response = self._backend.set_state(state_bits, self._token) + if set_response is not None: + state_bits, token, has_set = set_response + else: + has_set = None + + if self._cache_duration > 0: + # Avoid overloading the backend if we are asked to retrieve the + # state repeatedly. Try to serve the cached state. + if self._last_sync_time >= max(time.monotonic() - self._cache_duration, 0): + return None + + get_response = self._backend.get_state() + if get_response is not None: + state_bits, token = get_response + + if state_bits is not None: + try: + self._state = pickle.loads(state_bits) + except pickle.PickleError as exc: + raise RendezvousStateError( + "The rendezvous state is corrupt. See inner exception for details." + ) from exc + else: + self._state = _RendezvousState() + + if has_set and self._dead_nodes and log.isEnabledFor(logging.DEBUG): + node_list = ", ".join(f"'{dead_node}'" for dead_node in self._dead_nodes) + + msg = ( + f"As part of the sync operation the node(s) {node_list} have been removed from the " + f"rendezvous '{self._settings.run_id}' since they had no heartbeat." + ) + self._record(message=msg) + log.debug(msg) + + self._token = token + + self._dirty = False + + self._last_sync_time = time.monotonic() + + self._sanitize() + + return has_set + + def _sanitize(self) -> None: + state = self._state + + expire_time = datetime.utcnow() - ( + self._settings.keep_alive_interval * self._settings.keep_alive_max_attempt + ) + + # Filter out the dead nodes. + self._dead_nodes = [ + node + for node, last_heartbeat in state.last_heartbeats.items() + if last_heartbeat < expire_time + ] + + participant_removed = False + + for dead_node in self._dead_nodes: + msg = f"Detected dead node '{dead_node}', removing it from the rendezvous" + log.debug(msg) + del state.last_heartbeats[dead_node] + + try: + del state.participants[dead_node] + + participant_removed = True + except KeyError: + pass + + try: + state.wait_list.remove(dead_node) + except KeyError: + pass + + try: + state.redundancy_list.remove(dead_node) + except KeyError: + pass + + if participant_removed: + # Common epilogue shared with the _remove_from_participants() + # function of _DistributedRendezvousOpExecutor. + _remove_participant_epilogue(state, self._settings) + + def mark_dirty(self) -> None: + """See base class. + + If the local rendezvous state is dirty, the next sync call will try to + write the changes back to the backend. However this attempt might fail + if another node, which had the same state, also made changes and wrote + them before us. + """ + self._dirty = True + + +class _Action(Enum): + """Specifies the possible actions based on the state of the rendezvous.""" + + KEEP_ALIVE = 1 + ADD_TO_PARTICIPANTS = 2 + ADD_TO_WAIT_LIST = 3 + ADD_TO_REDUNDANCY_LIST = 4 + REMOVE_FROM_PARTICIPANTS = 5 + REMOVE_FROM_WAIT_LIST = 6 + REMOVE_FROM_REDUNDANCY_LIST = 7 + MARK_RENDEZVOUS_COMPLETE = 8 + MARK_RENDEZVOUS_CLOSED = 9 + SYNC = 10 + ERROR_CLOSED = 11 + ERROR_TIMEOUT = 12 + FINISH = 13 + + +class _RendezvousContext: + """Holds the context of the rendezvous. + + Attributes: + node: + The node descriptor associated with the current rendezvous handler + instance. + state: + The current state of the rendezvous. + settings: + The rendezvous settings. + """ + + node: _NodeDesc + state: _RendezvousState + settings: RendezvousSettings + + def __init__( + self, node: _NodeDesc, state: _RendezvousState, settings: RendezvousSettings + ) -> None: + self.node = node + self.state = state + self.settings = settings + + +class _RendezvousOpExecutor(ABC): + """Execute rendezvous operations.""" + + @abstractmethod + def run( + self, + state_handler: Callable[[_RendezvousContext, float], _Action], + deadline: float, + update_deadline: Optional[Callable[[timedelta], float]] = None, + ) -> None: + """Execute a rendezvous operation. + + An operation is run inside a state machine and is expected to transition + the rendezvous from one state to another. + + Args: + state_handler: + A callable that is expected to return the next state transition + action based on the current state of the rendezvous. + deadline: + The time, in seconds, at which the operation will be considered + timed-out. + update_deadline: + Function to generate a new operation deadline if the current + node may participate in the next rendezvous. + """ + + +class _DistributedRendezvousOpExecutor(_RendezvousOpExecutor): + """Execute rendezvous operations using a shared state. + + Args: + node: + The node descriptor associated with the current rendezvous handler + instance. + state_holder: + The ``RendezvousStateHolder`` to use to sync the rendezvous state + with other nodes. + settings: + The rendezvous settings. + """ + + _node: _NodeDesc + _state: _RendezvousState + _state_holder: _RendezvousStateHolder + _settings: RendezvousSettings + + def __init__( + self, + node: _NodeDesc, + state_holder: _RendezvousStateHolder, + settings: RendezvousSettings, + ) -> None: + self._node = node + self._state_holder = state_holder + self._settings = settings + + def _record(self, message: str, node_state: NodeState = NodeState.RUNNING) -> None: + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + hostname=self._node.addr, + pid=self._node.pid, + local_id=self._node.local_id, + ) + + def run( + self, + state_handler: Callable[[_RendezvousContext, float], _Action], + deadline: float, + update_deadline: Optional[Callable[[timedelta], float]] = None, + ) -> None: + """See base class.""" + action = None + while action != _Action.FINISH: + # Reads or writes the latest rendezvous state shared by all nodes in + # the rendezvous. Note that our local changes might get overridden + # by another node if that node synced its changes before us. + has_set = self._state_holder.sync() + if has_set is not None: + if has_set: + msg = ( + f"The node '{self._node}' has successfully synced its local changes with " + f"other nodes in the rendezvous '{self._settings.run_id}'." + ) + else: + msg = ( + f"The node '{self._node}' has a stale state and failed to sync its local " + f"changes with other nodes in the rendezvous '{self._settings.run_id}'." + ) + + self._record(message=msg) + log.debug(msg) + + self._state = self._state_holder.state + + ctx = _RendezvousContext(self._node, self._state, self._settings) + + # Determine the next action to take based on the current state of + # the rendezvous. + action = state_handler(ctx, deadline) + + if action == _Action.FINISH: + continue + + if action == _Action.ERROR_CLOSED: + raise RendezvousClosedError() + + if action == _Action.ERROR_TIMEOUT: + raise RendezvousTimeoutError() + + if action == _Action.SYNC: + # Delay the execution by one second to avoid overloading the + # backend if we are asked to poll for state changes. + _delay(seconds=1) + else: + if action == _Action.KEEP_ALIVE: + self._keep_alive() + elif action == _Action.ADD_TO_PARTICIPANTS: + self._add_to_participants() + elif action == _Action.ADD_TO_WAIT_LIST: + self._add_to_wait_list() + elif action == _Action.ADD_TO_REDUNDANCY_LIST: + self._add_to_redundancy_list() + elif action == _Action.REMOVE_FROM_PARTICIPANTS: + self._remove_from_participants() + elif action == _Action.REMOVE_FROM_WAIT_LIST: + self._remove_from_wait_list() + elif action == _Action.REMOVE_FROM_REDUNDANCY_LIST: + self._remove_from_redundancy_list() + # update deadline since the node may participate in rendezvous process + if update_deadline: + deadline = update_deadline(self._settings.timeout.join) + elif action == _Action.MARK_RENDEZVOUS_COMPLETE: + self._mark_rendezvous_complete() + elif action == _Action.MARK_RENDEZVOUS_CLOSED: + self._mark_rendezvous_closed() + + # Attempt to sync our changes back to other nodes. + self._state_holder.mark_dirty() + + def _keep_alive(self) -> None: + msg = ( + f"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous " + f"'{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.last_heartbeats[self._node] = datetime.utcnow() + + def _add_to_participants(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the participants of round " + f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + state = self._state + + try: + state.wait_list.remove(self._node) + except KeyError: + pass + + # The ranks of the participants will be set once the rendezvous is + # complete. + state.participants[self._node] = 0 + + self._keep_alive() + + if len(state.participants) == self._settings.min_nodes: + state.deadline = datetime.utcnow() + self._settings.timeout.last_call + + if len(state.participants) == self._settings.max_nodes: + self._mark_rendezvous_complete() + + def _add_to_wait_list(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the wait list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + if self._node in self._state.redundancy_list: + self._state.redundancy_list.remove(self._node) + self._state.wait_list.add(self._node) + + self._keep_alive() + + def _add_to_redundancy_list(self) -> None: + msg = ( + f"The node '{self._node}' added itself to the redundancy list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.redundancy_list.add(self._node) + + self._keep_alive() + + def _remove_from_participants(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the participants of round " + f"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + state = self._state + + del state.participants[self._node] + + del state.last_heartbeats[self._node] + + # Common epilogue shared with the sanitizer() function of + # _BackendRendezvousStateHolder. + _remove_participant_epilogue(state, self._settings) + + def _remove_from_wait_list(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the wait list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.wait_list.remove(self._node) + + del self._state.last_heartbeats[self._node] + + def _remove_from_redundancy_list(self) -> None: + msg = ( + f"The node '{self._node}' removed itself from the redunant list of round " + f"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync." + ) + self._record(message=msg) + log.debug(msg) + + self._state.redundancy_list.remove(self._node) + + del self._state.last_heartbeats[self._node] + + def _mark_rendezvous_complete(self) -> None: + msg = ( + f"The node '{self._node}' marked round {self._state.round} of the rendezvous " + f"'{self._settings.run_id}' as complete. Pending sync." + ) + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.debug(msg) + + state = self._state + + state.complete = True + state.deadline = None + + # Assign the ranks. + for rank, node in enumerate(sorted(state.participants)): + state.participants[node] = rank + + def _mark_rendezvous_closed(self) -> None: + msg = ( + f"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. " + "Pending sync." + ) + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.debug(msg) + + self._state.closed = True + + +def _should_keep_alive(ctx: _RendezvousContext) -> bool: + """Determine whether a keep-alive heartbeat should be sent.""" + try: + last_heartbeat = ctx.state.last_heartbeats[ctx.node] + except KeyError: + return False + + return last_heartbeat <= datetime.utcnow() - ctx.settings.keep_alive_interval + + +class _RendezvousExitOp: + """Represent a rendezvous exit operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if ctx.node in ctx.state.participants: + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.REMOVE_FROM_PARTICIPANTS + return _Action.FINISH + + +class _RendezvousJoinOp: + """Represent a rendezvous join operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + state = ctx.state + + # A closed rendezvous means that it no longer accepts new nodes. + if state.closed: + if ctx.node in state.redundancy_list: + msg = f"The rendezvous '{ctx.settings.run_id}' is closed, terminating pending rendezvous." + raise RendezvousGracefulExitError(msg) + return _Action.ERROR_CLOSED + + if ctx.node in state.redundancy_list: + msg = f"The node {ctx.node} is in redunancy list" + log.debug(msg) + # don't apply the timeout logic here, since we want to allow the node to rejoin + if len(state.participants) == ctx.settings.max_nodes: + if _should_keep_alive(ctx): + return _Action.KEEP_ALIVE + else: + return _Action.SYNC + else: + # transition to waiting state that will respect timeouts. + msg = f"The node {ctx.node} is removed from redunancy list" + log.debug(msg) + return _Action.REMOVE_FROM_REDUNDANCY_LIST + + is_participant = ctx.node in state.participants + + # If we are part of the rendezvous and it is already complete there is + # no further action to take. + if state.complete and is_participant: + return _Action.FINISH + + now = time.monotonic() + if now > deadline: + rollback_period = 5 # 5 seconds + + # If we still have time to rollback (a short period on top of the + # operation deadline), try to remove ourself from the rendezvous. + # It is okay if we can't though as our keep-alive will eventually + # expire. + if now <= deadline + rollback_period: + # If we are part of the rendezvous, it means we couldn't find + # enough participants to complete it on time. + if is_participant: + return _Action.REMOVE_FROM_PARTICIPANTS + # If we are in the wait list, it means we couldn't wait till the + # next round of the rendezvous. + if ctx.node in state.wait_list: + return _Action.REMOVE_FROM_WAIT_LIST + return _Action.ERROR_TIMEOUT + + if state.complete: + # If we are here, it means we are not part of the rendezvous. In + # case the rendezvous has capacity for additional participants add + # ourself to the wait list for the next round. + if len(state.participants) < ctx.settings.max_nodes: + if ctx.node not in state.wait_list: + return _Action.ADD_TO_WAIT_LIST + elif len(state.participants) >= ctx.settings.max_nodes: + if ctx.node not in state.redundancy_list and ctx.node not in state.wait_list: + return _Action.ADD_TO_REDUNDANCY_LIST + elif is_participant: + # If the rendezvous has enough number of participants including us, + # check whether we have passed the rendezvous deadline. If yes, + # complete it. + if len(state.participants) >= ctx.settings.min_nodes and \ + len(state.participants) <= ctx.settings.max_nodes: + if cast(datetime, state.deadline) < datetime.utcnow(): + msg = ( + f"The node '{ctx.node}' marking the rendezvous complete, " + f"quorum established within deadline" + ) + log.debug(msg) + return _Action.MARK_RENDEZVOUS_COMPLETE + else: + msg = f"The node '{ctx.node}' can't complete rendezvous: deadline reached" + log.debug(msg) + else: + msg = f"The node '{ctx.node}' can't complete rendezvous: not enough participants" + log.debug(msg) + else: + # The rendezvous is not complete yet and we are not part of it. Try + # to join. + return _Action.ADD_TO_PARTICIPANTS + + if _should_keep_alive(ctx): + return _Action.KEEP_ALIVE + + # At this point either the rendezvous is not complete, but we are part + # of it, which means we have to wait for other participants to join; or + # the rendezvous is complete, but we are not part of it, which means we + # have to wait for the next round. + return _Action.SYNC + + +class _RendezvousCloseOp: + """Represent a rendezvous close operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if ctx.state.closed: + return _Action.FINISH + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.MARK_RENDEZVOUS_CLOSED + + +class _RendezvousKeepAliveOp: + """Represent a rendezvous keep-alive update operation.""" + + def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action: + if _should_keep_alive(ctx): + if time.monotonic() > deadline: + return _Action.ERROR_TIMEOUT + return _Action.KEEP_ALIVE + return _Action.FINISH + + +class DynamicRendezvousHandler(RendezvousHandler): + """Represent a handler that sets up a rendezvous among a set of nodes.""" + + # Static + _node_desc_generator = _NodeDescGenerator() + + _this_node: _NodeDesc + _settings: RendezvousSettings + _backend_name: str + _store: Store + _state_holder: _RendezvousStateHolder + _op_executor: _RendezvousOpExecutor + _heartbeat_lock: threading.Lock + _keep_alive_timer: Optional[_PeriodicTimer] + + @classmethod + def from_backend( + cls, + run_id: str, + store: Store, + backend: RendezvousBackend, + min_nodes: int, + max_nodes: int, + local_addr: Optional[str] = None, + timeout: Optional[RendezvousTimeout] = None, + ): + """Create a new :py:class:`DynamicRendezvousHandler`. + + Args: + run_id: + The run id of the rendezvous. + store: + The C10d store to return as part of the rendezvous. + backend: + The backend to use to hold the rendezvous state. + min_nodes: + The minimum number of nodes to admit to the rendezvous. + max_nodes: + The maximum number of nodes to admit to the rendezvous. + local_addr: + The local node address. + timeout: + The timeout configuration of the rendezvous. + """ + # We associate each handler instance with a unique node descriptor. + node = cls._node_desc_generator.generate(local_addr) + + settings = RendezvousSettings( + run_id, + min_nodes, + max_nodes, + timeout or RendezvousTimeout(), + keep_alive_interval=timedelta(seconds=5), + keep_alive_max_attempt=3, + ) + + state_holder = _BackendRendezvousStateHolder(backend, settings) + + return cls(node, settings, backend.name, store, state_holder) + + def __init__( + self, + node: _NodeDesc, + settings: RendezvousSettings, + backend_name: str, + store: Store, + state_holder: _RendezvousStateHolder, + ) -> None: + if not settings.run_id: + raise ValueError("The run id must be a non-empty string.") + + if settings.min_nodes < 1: + raise ValueError( + f"The minimum number of nodes ({settings.min_nodes}) must be greater than zero." + ) + + if settings.max_nodes < settings.min_nodes: + raise ValueError( + f"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal " + f"to the minimum number of nodes ({settings.min_nodes})." + ) + + self._this_node = node + + self._settings = settings + + self._backend_name = backend_name + + self._store = store + + self._state_holder = state_holder + + self._op_executor = _DistributedRendezvousOpExecutor( + self._this_node, self._state_holder, self._settings + ) + + self._heartbeat_lock = threading.Lock() + + self._keep_alive_timer = None + + def _record( + self, + message: str, + node_state: NodeState = NodeState.RUNNING, + rank: Optional[int] = None, + ) -> None: + construct_and_record_rdzv_event( + name=f"{self.__class__.__name__}.{get_method_name()}", + run_id=self._settings.run_id, + message=message, + node_state=node_state, + hostname=self._this_node.addr, + pid=self._this_node.pid, + local_id=self._this_node.local_id, + rank=rank, + ) + + @property + def settings(self) -> RendezvousSettings: + """Get the settings of the rendezvous.""" + return self._settings + + def get_backend(self) -> str: + """See base class.""" + return self._backend_name + + def next_rendezvous(self) -> Tuple[Store, int, int]: + """See base class.""" + msg = ( + f"The node '{self._this_node}' attempts to join the next round of the rendezvous " + f"'{self._settings.run_id}'." + ) + self._record(message=msg) + log.info(msg) + + try: + self._stop_heartbeats() + + # Delay the execution for a small random amount of time if this is our + # first run. This will slightly skew the rendezvous attempts across the + # nodes and reduce the load on the backend. + if self._state_holder.state.round == 0: + _delay(seconds=(0, 0.3)) + + exit_op = _RendezvousExitOp() + join_op = _RendezvousJoinOp() + + deadline = self._get_deadline(self._settings.timeout.join) + self._op_executor.run(exit_op, deadline) + self._op_executor.run( + join_op, + deadline, + self._get_deadline) + + self._start_heartbeats() + + rank, world_size = self._get_world() + store = self._get_store() + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + msg = ( + f"The node '{self._this_node}' has joined round {self._state_holder.state.round} of " + f"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size " + f"{world_size}." + ) + self._record(message=msg, rank=rank) + log.info(msg) + + return store, rank, world_size + + def is_closed(self) -> bool: + """See base class.""" + try: + with self._heartbeat_lock: + self._state_holder.sync() + + return self._state_holder.state.closed + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def set_closed(self) -> None: + """See base class.""" + try: + with self._heartbeat_lock: + self._close() + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def num_nodes_waiting(self) -> int: + """See base class.""" + try: + with self._heartbeat_lock: + self._state_holder.sync() + + return len(self._state_holder.state.wait_list) + + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def get_run_id(self) -> str: + """See base class.""" + return self._settings.run_id + + def shutdown(self) -> bool: + """See base class.""" + self._stop_heartbeats() + + try: + self._close() + + return True + except RendezvousError as ex: + msg = ( + f"The node '{self._this_node}' has failed to shutdown the rendezvous " + f"'{self._settings.run_id}' due to an error of type {type(ex).__name__}." + ) + self._record(message=msg, node_state=NodeState.FAILED) + log.warning(msg) + + return False + except Exception as e: + self._record( + message=f"{type(e).__name__}: {str(e)}", + node_state=NodeState.FAILED, + ) + raise + + def _close(self) -> None: + op = _RendezvousCloseOp() + + deadline = self._get_deadline(self._settings.timeout.close) + + self._op_executor.run(op, deadline) + + msg = f"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'." + self._record(message=msg, node_state=NodeState.SUCCEEDED) + log.info(msg) + + @staticmethod + def _keep_alive_weak(weak_self) -> None: + self = weak_self() + if self is not None: + self._keep_alive() + + def _keep_alive(self) -> None: + self._heartbeat_lock.acquire() + + op = _RendezvousKeepAliveOp() + + deadline = self._get_deadline(self._settings.timeout.heartbeat) + + try: + self._op_executor.run(op, deadline) + + msg = ( + f"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous " + f"'{self._settings.run_id}'." + ) + self._record(message=msg) + log.debug(msg) + except RendezvousError as ex: + msg = ( + f"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the " + f"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}." + ) + self._record(message=msg, node_state=NodeState.FAILED) + log.warning(msg) + finally: + self._heartbeat_lock.release() + + def _start_heartbeats(self) -> None: + self._keep_alive_timer = _PeriodicTimer( + self._settings.keep_alive_interval, self._keep_alive_weak, weakref.ref(self) + ) + + self._keep_alive_timer.set_name(f"RendezvousKeepAliveTimer_{self._this_node.local_id}") + + self._keep_alive_timer.start() + + def _stop_heartbeats(self) -> None: + if self._keep_alive_timer is None: + return + + self._keep_alive_timer.cancel() + + def _get_world(self) -> Tuple[int, int]: + state = self._state_holder.state + + return state.participants[self._this_node], len(state.participants) + + def _get_store(self) -> Store: + key_prefix = f"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}" + + return PrefixStore(key_prefix, self._store) + + def _get_deadline(self, timeout: timedelta) -> float: + return time.monotonic() + timeout.total_seconds() + + +def _get_timeout(params: RendezvousParameters, key: str) -> Optional[timedelta]: + timeout = params.get_as_int(key + "_timeout") + if timeout is None: + return None + return timedelta(seconds=timeout) + + +def create_handler( + store: Store, backend: RendezvousBackend, params: RendezvousParameters +) -> DynamicRendezvousHandler: + """Create a new :py:class:`DynamicRendezvousHandler` from the specified parameters. + + Args: + store: + The C10d store to return as part of the rendezvous. + backend: + The backend to use to hold the rendezvous state. + + +-------------------+------------------------------------------------------+ + | Parameter | Description | + +===================+======================================================+ + | join_timeout | The total time, in seconds, within which the | + | | rendezvous is expected to complete. Defaults to 600 | + | | seconds. | + +-------------------+------------------------------------------------------+ + | last_call_timeout | An additional wait amount, in seconds, before | + | | completing the rendezvous once the minimum number of | + | | nodes has been reached. Defaults to 30 seconds. | + +-------------------+------------------------------------------------------+ + | close_timeout | The time, in seconds, within which the rendezvous is | + | | expected to close after a call to | + | | :py:meth:`RendezvousHandler.set_closed` or | + | | :py:meth:`RendezvousHandler.shutdown`. Defaults to | + | | 30 seconds. | + +-------------------+------------------------------------------------------+ + """ + try: + timeout = RendezvousTimeout( + _get_timeout(params, "join"), + _get_timeout(params, "last_call"), + _get_timeout(params, "close"), + ) + + return DynamicRendezvousHandler.from_backend( + params.run_id, + store, + backend, + params.min_nodes, + params.max_nodes, + params.local_addr, + timeout, + ) + except Exception as e: + construct_and_record_rdzv_event( + message=f"{type(e).__name__}: {str(e)}", + run_id=params.run_id, + node_state=NodeState.FAILED, + ) + raise diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..cacb888590f88567a2cd094328916389de6302d8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import binascii +from base64 import b64decode, b64encode +from typing import Optional, Tuple, cast + +import urllib3.exceptions # type: ignore[import] +from etcd import Client as EtcdClient # type: ignore[import] +from etcd import ( + EtcdAlreadyExist, + EtcdCompareFailed, + EtcdException, + EtcdKeyNotFound, + EtcdResult, +) +from torch.distributed import Store + +from .api import RendezvousConnectionError, RendezvousParameters, RendezvousStateError +from .dynamic_rendezvous import RendezvousBackend, Token +from .etcd_store import EtcdStore +from .utils import parse_rendezvous_endpoint + + +class EtcdRendezvousBackend(RendezvousBackend): + """Represents an etcd-based rendezvous backend. + + Args: + client: + The ``etcd.Client`` instance to use to communicate with etcd. + run_id: + The run id of the rendezvous. + key_prefix: + The path under which to store the rendezvous state in etcd. + ttl: + The TTL of the rendezvous state. If not specified, defaults to two hours. + """ + + _DEFAULT_TTL = 7200 # 2 hours + + _client: EtcdClient + _key: str + _ttl: int + + def __init__( + self, + client: EtcdClient, + run_id: str, + key_prefix: Optional[str] = None, + ttl: Optional[int] = None, + ) -> None: + if not run_id: + raise ValueError("The run id must be a non-empty string.") + + self._client = client + + if key_prefix: + self._key = key_prefix + "/" + run_id + else: + self._key = run_id + + if ttl and ttl > 0: + self._ttl = ttl + else: + self._ttl = self._DEFAULT_TTL + + @property + def name(self) -> str: + """See base class.""" + return "etcd-v2" + + def get_state(self) -> Optional[Tuple[bytes, Token]]: + """See base class.""" + try: + result = self._client.read(self._key) + except EtcdKeyNotFound: + return None + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + return self._decode_state(result) + + def set_state( + self, state: bytes, token: Optional[Token] = None + ) -> Optional[Tuple[bytes, Token, bool]]: + """See base class.""" + base64_state = b64encode(state).decode() + + kwargs = {} + + def get_state(): + result = self.get_state() + if result is not None: + tmp = *result, False + # Python 3.6 does not support tuple unpacking in return + # statements. + return tmp + return None + + if token: + try: + token = int(token) + except ValueError: + return get_state() + + if token: + kwargs["prevIndex"] = token + else: + kwargs["prevExist"] = False + + try: + result = self._client.write(self._key, base64_state, self._ttl, **kwargs) + except (EtcdAlreadyExist, EtcdCompareFailed): + result = None + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + if result is None: + return get_state() + + tmp = *self._decode_state(result), True + return tmp + + def _decode_state(self, result: EtcdResult) -> Tuple[bytes, Token]: + base64_state = result.value.encode() + + try: + state = b64decode(base64_state) + except binascii.Error as exc: + raise RendezvousStateError( + "The state object is corrupt. See inner exception for details." + ) from exc + + return state, result.modifiedIndex + + +def _create_etcd_client(params: RendezvousParameters) -> EtcdClient: + host, port = parse_rendezvous_endpoint(params.endpoint, default_port=2379) + + # The timeout + read_timeout = cast(int, params.get_as_int("read_timeout", 60)) + if read_timeout <= 0: + raise ValueError("The read timeout must be a positive integer.") + + # The communication protocol + protocol = params.get("protocol", "http").strip().lower() + if protocol != "http" and protocol != "https": + raise ValueError("The protocol must be HTTP or HTTPS.") + + # The SSL client certificate + ssl_cert = params.get("ssl_cert") + if ssl_cert: + ssl_cert_key = params.get("ssl_cert_key") + if ssl_cert_key: + # The etcd client expects the certificate key as the second element + # of the `cert` tuple. + ssl_cert = (ssl_cert, ssl_cert_key) + + # The root certificate + ca_cert = params.get("ca_cert") + + try: + return EtcdClient( + host, + port, + read_timeout=read_timeout, + protocol=protocol, + cert=ssl_cert, + ca_cert=ca_cert, + allow_reconnect=True, + ) + except (EtcdException, urllib3.exceptions.TimeoutError) as exc: + raise RendezvousConnectionError( + "The connection to etcd has failed. See inner exception for details." + ) from exc + + +def create_backend(params: RendezvousParameters) -> Tuple[EtcdRendezvousBackend, Store]: + """Create a new :py:class:`EtcdRendezvousBackend` from the specified parameters. + + +--------------+-----------------------------------------------------------+ + | Parameter | Description | + +==============+===========================================================+ + | read_timeout | The read timeout, in seconds, for etcd operations. | + | | Defaults to 60 seconds. | + +--------------+-----------------------------------------------------------+ + | protocol | The protocol to use to communicate with etcd. Valid | + | | values are "http" and "https". Defaults to "http". | + +--------------+-----------------------------------------------------------+ + | ssl_cert | The path to the SSL client certificate to use along with | + | | HTTPS. Defaults to ``None``. | + +--------------+-----------------------------------------------------------+ + | ssl_cert_key | The path to the private key of the SSL client certificate | + | | to use along with HTTPS. Defaults to ``None``. | + +--------------+-----------------------------------------------------------+ + | ca_cert | The path to the rool SSL authority certificate. Defaults | + | | to ``None``. | + +--------------+-----------------------------------------------------------+ + """ + client = _create_etcd_client(params) + + backend = EtcdRendezvousBackend(client, params.run_id, key_prefix="/torch/elastic/rendezvous") + + store = EtcdStore(client, "/torch/elastic/store") + + return backend, store diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py new file mode 100644 index 0000000000000000000000000000000000000000..7690439237add63999cbbb0b5179043036d5b9c3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/etcd_store.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import datetime +import random +import time +from base64 import b64decode, b64encode +from typing import Optional + +import etcd # type: ignore[import] + +# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`. +from torch.distributed import Store + + +# Delay (sleep) for a small random amount to reduce CAS failures. +# This does not affect correctness, but will reduce requests to etcd server. +def cas_delay(): + time.sleep(random.uniform(0, 0.1)) + + +# pyre-fixme[11]: Annotation `Store` is not defined as a type. +class EtcdStore(Store): + """ + Implement a c10 Store interface by piggybacking on the rendezvous etcd instance. + + This is the store object returned by ``EtcdRendezvous``. + """ + + def __init__( + self, + etcd_client, + etcd_store_prefix, + # Default timeout same as in c10d/Store.hpp + timeout: Optional[datetime.timedelta] = None, + ): + super().__init__() # required for pybind trampoline. + + self.client = etcd_client + self.prefix = etcd_store_prefix + + if timeout is not None: + self.set_timeout(timeout) + + if not self.prefix.endswith("/"): + self.prefix += "/" + + def set(self, key, value): + """ + Write a key/value pair into ``EtcdStore``. + + Both key and value may be either Python ``str`` or ``bytes``. + """ + self.client.set(key=self.prefix + self._encode(key), value=self._encode(value)) + + def get(self, key) -> bytes: + """ + Get a value by key, possibly doing a blocking wait. + + If key is not immediately present, will do a blocking wait + for at most ``timeout`` duration or until the key is published. + + + Returns: + value ``(bytes)`` + + Raises: + LookupError - If key still not published after timeout + """ + b64_key = self.prefix + self._encode(key) + kvs = self._try_wait_get([b64_key]) + + if kvs is None: + raise LookupError(f"Key {key} not found in EtcdStore") + + return self._decode(kvs[b64_key]) + + def add(self, key, num: int) -> int: + """ + Atomically increment a value by an integer amount. + + The integer is represented as a string using base 10. If key is not present, + a default value of ``0`` will be assumed. + + Returns: + the new (incremented) value + + + """ + b64_key = self._encode(key) + # c10d Store assumes value is an integer represented as a decimal string + try: + # Assume default value "0", if this key didn't yet: + node = self.client.write( + key=self.prefix + b64_key, + value=self._encode(str(num)), # i.e. 0 + num + prevExist=False, + ) + return int(self._decode(node.value)) + except etcd.EtcdAlreadyExist: + pass + + while True: + # Note: c10d Store does not have a method to delete keys, so we + # can be sure it's still there. + node = self.client.get(key=self.prefix + b64_key) + new_value = self._encode(str(int(self._decode(node.value)) + num)) + try: + node = self.client.test_and_set( + key=node.key, value=new_value, prev_value=node.value + ) + return int(self._decode(node.value)) + except etcd.EtcdCompareFailed: + cas_delay() + + def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None): + """ + Wait until all of the keys are published, or until timeout. + + Raises: + LookupError - if timeout occurs + """ + b64_keys = [self.prefix + self._encode(key) for key in keys] + kvs = self._try_wait_get(b64_keys, override_timeout) + if kvs is None: + raise LookupError("Timeout while waiting for keys in EtcdStore") + # No return value on success + + def check(self, keys) -> bool: + """Check if all of the keys are immediately present (without waiting).""" + b64_keys = [self.prefix + self._encode(key) for key in keys] + kvs = self._try_wait_get( + b64_keys, + override_timeout=datetime.timedelta(microseconds=1), # as if no wait + ) + return kvs is not None + + # + # Encode key/value data in base64, so we can store arbitrary binary data + # in EtcdStore. Input can be `str` or `bytes`. + # In case of `str`, utf-8 encoding is assumed. + # + def _encode(self, value) -> str: + if type(value) == bytes: + return b64encode(value).decode() + elif type(value) == str: + return b64encode(value.encode()).decode() + raise ValueError("Value must be of type str or bytes") + + # + # Decode a base64 string (of type `str` or `bytes`). + # Return type is `bytes`, which is more convenient with the Store interface. + # + def _decode(self, value) -> bytes: + if type(value) == bytes: + return b64decode(value) + elif type(value) == str: + return b64decode(value.encode()) + raise ValueError("Value must be of type str or bytes") + + # + # Get all of the (base64-encoded) etcd keys at once, or wait until all the keys + # are published or timeout occurs. + # This is a helper method for the public interface methods. + # + # On success, a dictionary of {etcd key -> etcd value} is returned. + # On timeout, None is returned. + # + def _try_wait_get(self, b64_keys, override_timeout=None): + timeout = self.timeout if override_timeout is None else override_timeout # type: ignore[attr-defined] + deadline = time.time() + timeout.total_seconds() + + while True: + # Read whole directory (of keys), filter only the ones waited for + all_nodes = self.client.get(key=self.prefix) + req_nodes = { + node.key: node.value for node in all_nodes.children if node.key in b64_keys + } + + if len(req_nodes) == len(b64_keys): + # All keys are available + return req_nodes + + watch_timeout = deadline - time.time() + if watch_timeout <= 0: + return None + + try: + self.client.watch( + key=self.prefix, + recursive=True, + timeout=watch_timeout, + index=all_nodes.etcd_index + 1, + ) + except etcd.EtcdWatchTimedOut: + if time.time() >= deadline: + return None + else: + continue + except etcd.EtcdEventIndexCleared: + continue diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa5bcfd80e247fcb8497999d2350005dd8134a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/registry.py @@ -0,0 +1,66 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .api import RendezvousHandler, RendezvousParameters +from .api import rendezvous_handler_registry as handler_registry +from .dynamic_rendezvous import create_handler + +__all__ = ['get_rendezvous_handler'] + +def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler: + from . import static_tcp_rendezvous + + return static_tcp_rendezvous.create_rdzv_handler(params) + + +def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler: + from . import etcd_rendezvous + + return etcd_rendezvous.create_rdzv_handler(params) + + +def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler: + from .etcd_rendezvous_backend import create_backend + + backend, store = create_backend(params) + + return create_handler(store, backend, params) + + +def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler: + from .c10d_rendezvous_backend import create_backend + + backend, store = create_backend(params) + + return create_handler(store, backend, params) + + +def _register_default_handlers() -> None: + handler_registry.register("etcd", _create_etcd_handler) + handler_registry.register("etcd-v2", _create_etcd_v2_handler) + handler_registry.register("c10d", _create_c10d_handler) + handler_registry.register("static", _create_static_handler) + + +def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler: + """ + Obtain a reference to a :py:class`RendezvousHandler`. + + Custom rendezvous handlers can be registered by + + :: + + from torch.distributed.elastic.rendezvous import rendezvous_handler_registry + from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler + + def create_my_rdzv(params: RendezvousParameters): + return MyCustomRdzv(params) + + rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv) + + my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters) + """ + return handler_registry.create_handler(params) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..fe600213bcc89d34d2cba4021ff6989a7bad96ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import datetime +import logging +from typing import Tuple, cast, Optional + +# pyre-ignore[21]: Could not find name `Store` in `torch.distributed`. +from torch.distributed import Store, TCPStore, PrefixStore +from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters +from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint + +log = logging.getLogger(__name__) + +_default_timeout_seconds = 600 + + +class StaticTCPRendezvous(RendezvousHandler): + """ + Static rendezvous that is a wrapper around the TCPStore. + + Creates TCPStore based on the input parameters with the + listener on the agent with group_rank=0 + """ + + def __init__( + self, + master_addr: str, + master_port: int, + rank: int, + world_size: int, + run_id: str, + timeout: int, + ): + self.master_addr = master_addr + self.master_port = master_port + self.rank = rank + self.world_size = world_size + self.run_id = run_id + self.timeout = datetime.timedelta(seconds=timeout) + self._store: Optional[Store] = None + + def get_backend(self) -> str: + return "static" + + def next_rendezvous(self) -> Tuple[Store, int, int]: + log.info("Creating TCPStore as the c10d::Store implementation") + if not self._store: + is_master = self.rank == 0 + self._store = TCPStore( # type: ignore[call-arg] + self.master_addr, + self.master_port, + self.world_size, + is_master, + self.timeout, + multi_tenant=True, + ) + store = PrefixStore(self.run_id, self._store) + return store, self.rank, self.world_size + + def is_closed(self): + return False + + def set_closed(self): + pass + + def num_nodes_waiting(self): + return 0 + + def get_run_id(self) -> str: + return self.run_id + + def shutdown(self) -> bool: + return True + + +def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler: + if "rank" not in params.config: + raise ValueError( + "rank is absent in RendezvousParameters." + "Try add --node-rank to the cmd request" + ) + endpoint = params.endpoint.strip() + if not endpoint: + raise ValueError( + "endpoint is absent in RendezvousParameters" + "Try add --master-port and --master-addr to the cmd request" + ) + master_addr, master_port = parse_rendezvous_endpoint(endpoint, -1) + if master_port == -1: + raise ValueError( + f"Port is absent in endpoint: {endpoint}. Try launching with --master-port" + ) + world_size = params.max_nodes + rank = cast(int, params.config.get("rank")) + run_id = params.run_id + if "timeout" in params.config: + timeout = int(params.config["timeout"]) + else: + timeout = _default_timeout_seconds + return StaticTCPRendezvous( + master_addr, master_port, rank, world_size, run_id, timeout + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..326bc604a91444f14416954cf376718fe613f9db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/utils.py @@ -0,0 +1,279 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import ipaddress +import random +import re +import socket +import time +import weakref +from datetime import timedelta +from threading import Event, Thread +from typing import Any, Callable, Dict, Optional, Tuple, Union + +__all__ = ['parse_rendezvous_endpoint'] + +def _parse_rendezvous_config(config_str: str) -> Dict[str, str]: + """Extract key-value pairs from a rendezvous configuration string. + + Args: + config_str: + A string in format =,...,=. + """ + config: Dict[str, str] = {} + + config_str = config_str.strip() + if not config_str: + return config + + key_values = config_str.split(",") + for kv in key_values: + key, *values = kv.split("=", 1) + + key = key.strip() + if not key: + raise ValueError( + "The rendezvous configuration string must be in format " + "=,...,=." + ) + + value: Optional[str] + if values: + value = values[0].strip() + else: + value = None + if not value: + raise ValueError( + f"The rendezvous configuration option '{key}' must have a value specified." + ) + + config[key] = value + return config + + +def _try_parse_port(port_str: str) -> Optional[int]: + """Try to extract the port number from ``port_str``.""" + if port_str and re.match(r"^[0-9]{1,5}$", port_str): + return int(port_str) + return None + + +def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> Tuple[str, int]: + """Extract the hostname and the port number from a rendezvous endpoint. + + Args: + endpoint: + A string in format [:]. + default_port: + The port number to use if the endpoint does not include one. + + Returns: + A tuple of hostname and port number. + """ + if endpoint is not None: + endpoint = endpoint.strip() + + if not endpoint: + return ("localhost", default_port) + + # An endpoint that starts and ends with brackets represents an IPv6 address. + if endpoint[0] == "[" and endpoint[-1] == "]": + host, *rest = endpoint, *[] + else: + host, *rest = endpoint.rsplit(":", 1) + + # Sanitize the IPv6 address. + if len(host) > 1 and host[0] == "[" and host[-1] == "]": + host = host[1:-1] + + if len(rest) == 1: + port = _try_parse_port(rest[0]) + if port is None or port >= 2 ** 16: + raise ValueError( + f"The port number of the rendezvous endpoint '{endpoint}' must be an integer " + "between 0 and 65536." + ) + else: + port = default_port + + if not re.match(r"^[\w\.:-]+$", host): + raise ValueError( + f"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of " + "labels, an IPv4 address, or an IPv6 address." + ) + + return host, port + + +def _matches_machine_hostname(host: str) -> bool: + """Indicate whether ``host`` matches the hostname of this machine. + + This function compares ``host`` to the hostname as well as to the IP + addresses of this machine. Note that it may return a false negative if this + machine has CNAME records beyond its FQDN or IP addresses assigned to + secondary NICs. + """ + if host == "localhost": + return True + + try: + addr = ipaddress.ip_address(host) + except ValueError: + addr = None + + if addr and addr.is_loopback: + return True + + try: + host_addr_list = socket.getaddrinfo( + host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME + ) + except (ValueError, socket.gaierror) as _: + host_addr_list = [] + + host_ip_list = [ + host_addr_info[4][0] + for host_addr_info in host_addr_list + ] + + this_host = socket.gethostname() + if host == this_host: + return True + + addr_list = socket.getaddrinfo( + this_host, None, proto=socket.IPPROTO_TCP, flags=socket.AI_CANONNAME + ) + for addr_info in addr_list: + # If we have an FQDN in the addr_info, compare it to `host`. + if addr_info[3] and addr_info[3] == host: + return True + + # Otherwise if `host` represents an IP address, compare it to our IP + # address. + if addr and addr_info[4][0] == str(addr): + return True + + # If the IP address matches one of the provided host's IP addresses + if addr_info[4][0] in host_ip_list: + return True + + return False + + +def _delay(seconds: Union[float, Tuple[float, float]]) -> None: + """Suspend the current thread for ``seconds``. + + Args: + seconds: + Either the delay, in seconds, or a tuple of a lower and an upper + bound within which a random delay will be picked. + """ + if isinstance(seconds, tuple): + seconds = random.uniform(*seconds) + # Ignore delay requests that are less than 10 milliseconds. + if seconds >= 0.01: + time.sleep(seconds) + + +class _PeriodicTimer: + """Represent a timer that periodically runs a specified function. + + Args: + interval: + The interval, in seconds, between each run. + function: + The function to run. + """ + + # The state of the timer is hold in a separate context object to avoid a + # reference cycle between the timer and the background thread. + class _Context: + interval: float + function: Callable[..., None] + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + stop_event: Event + + _name: Optional[str] + _thread: Optional[Thread] + _finalizer: Optional[weakref.finalize] + + # The context that is shared between the timer and the background thread. + _ctx: _Context + + def __init__( + self, + interval: timedelta, + function: Callable[..., None], + *args: Any, + **kwargs: Any, + ) -> None: + self._name = None + + self._ctx = self._Context() + self._ctx.interval = interval.total_seconds() + self._ctx.function = function # type: ignore[assignment] + self._ctx.args = args or () + self._ctx.kwargs = kwargs or {} + self._ctx.stop_event = Event() + + self._thread = None + self._finalizer = None + + @property + def name(self) -> Optional[str]: + """Get the name of the timer.""" + return self._name + + def set_name(self, name: str) -> None: + """Set the name of the timer. + + The specified name will be assigned to the background thread and serves + for debugging and troubleshooting purposes. + """ + if self._thread: + raise RuntimeError("The timer has already started.") + + self._name = name + + def start(self) -> None: + """Start the timer.""" + if self._thread: + raise RuntimeError("The timer has already started.") + + self._thread = Thread( + target=self._run, name=self._name or "PeriodicTimer", args=(self._ctx,), daemon=True + ) + + # We avoid using a regular finalizer (a.k.a. __del__) for stopping the + # timer as joining a daemon thread during the interpreter shutdown can + # cause deadlocks. The weakref.finalize is a superior alternative that + # provides a consistent behavior regardless of the GC implementation. + self._finalizer = weakref.finalize( + self, self._stop_thread, self._thread, self._ctx.stop_event + ) + + # We do not attempt to stop our background thread during the interpreter + # shutdown. At that point we do not even know whether it still exists. + self._finalizer.atexit = False + + self._thread.start() + + def cancel(self) -> None: + """Stop the timer at the next opportunity.""" + if self._finalizer: + self._finalizer() + + @staticmethod + def _run(ctx) -> None: + while not ctx.stop_event.wait(ctx.interval): + ctx.function(*ctx.args, **ctx.kwargs) + + @staticmethod + def _stop_thread(thread, stop_event): + stop_event.set() + + thread.join() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69a8295c2e553ce8bcc23b71996b1711540cad69 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a53449ffd7d7db416368eb6f232a282e41213599 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73d6e33c384d77c1efbb55f015af5c032d48ddcf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f71d29ab6f67c9079aca08c53a306686ec47d606 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9735549282019d9ef8bd519531a117e0d1792a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py @@ -0,0 +1,280 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import abc +import logging +import threading +import time +from contextlib import contextmanager +from inspect import getframeinfo, stack +from typing import Any, Dict, List, Optional, Set + +__all__ = ['TimerRequest', 'TimerClient', 'RequestQueue', 'TimerServer', 'configure', 'expires'] + +log = logging.getLogger(__name__) + +class TimerRequest: + """ + Data object representing a countdown timer acquisition and release + that is used between the ``TimerClient`` and ``TimerServer``. + A negative ``expiration_time`` should be interpreted as a "release" + request. + + .. note:: the type of ``worker_id`` is implementation specific. + It is whatever the TimerServer and TimerClient implementations + have on to uniquely identify a worker. + """ + + __slots__ = ["worker_id", "scope_id", "expiration_time"] + + def __init__(self, worker_id: Any, scope_id: str, expiration_time: float): + self.worker_id = worker_id + self.scope_id = scope_id + self.expiration_time = expiration_time + + def __eq__(self, other): + if isinstance(other, TimerRequest): + return ( + self.worker_id == other.worker_id + and self.scope_id == other.scope_id + and self.expiration_time == other.expiration_time + ) + return False + + +class TimerClient(abc.ABC): + """ + Client library to acquire and release countdown timers by communicating + with the TimerServer. + """ + + @abc.abstractmethod + def acquire(self, scope_id: str, expiration_time: float) -> None: + """ + Acquires a timer for the worker that holds this client object + given the scope_id and expiration_time. Typically registers + the timer with the TimerServer. + """ + pass + + @abc.abstractmethod + def release(self, scope_id: str): + """ + Releases the timer for the ``scope_id`` on the worker this + client represents. After this method is + called, the countdown timer on the scope is no longer in effect. + """ + pass + + +class RequestQueue(abc.ABC): + """ + Consumer queue holding timer acquisition/release requests + """ + + @abc.abstractmethod + def size(self) -> int: + """ + Returns the size of the queue at the time this method is called. + Note that by the time ``get`` is called the size of the queue + may have increased. The size of the queue should not decrease + until the ``get`` method is called. That is, the following assertion + should hold: + + size = q.size() + res = q.get(size, timeout=0) + assert size == len(res) + + -- or -- + + size = q.size() + res = q.get(size * 2, timeout=1) + assert size <= len(res) <= size * 2 + """ + pass + + @abc.abstractmethod + def get(self, size: int, timeout: float) -> List[TimerRequest]: + """ + Gets up to ``size`` number of timer requests in a blocking fashion + (no more than ``timeout`` seconds). + """ + pass + + +class TimerServer(abc.ABC): + """ + Entity that monitors active timers and expires them + in a timely fashion. This server is responsible for + reaping workers that have expired timers. + """ + + def __init__( + self, request_queue: RequestQueue, max_interval: float, daemon: bool = True + ): + """ + :param request_queue: Consumer ``RequestQueue`` + :param max_interval: max time (in seconds) to wait + for an item in the request_queue + :param daemon: whether to run the watchdog thread as a daemon + """ + super().__init__() + self._request_queue = request_queue + self._max_interval = max_interval + self._daemon = daemon + self._watchdog_thread: Optional[threading.Thread] = None + self._stop_signaled = False + + @abc.abstractmethod + def register_timers(self, timer_requests: List[TimerRequest]) -> None: + """ + Processes the incoming timer requests and registers them with the server. + The timer request can either be a acquire-timer or release-timer request. + Timer requests with a negative expiration_time should be interpreted + as a release-timer request. + """ + pass + + @abc.abstractmethod + def clear_timers(self, worker_ids: Set[Any]) -> None: + """ + Clears all timers for the given ``worker_ids``. + """ + pass + + @abc.abstractmethod + def get_expired_timers(self, deadline: float) -> Dict[str, List[TimerRequest]]: + """ + Returns all expired timers for each worker_id. An expired timer + is a timer for which the expiration_time is less than or equal to + the provided deadline. + """ + pass + + @abc.abstractmethod + def _reap_worker(self, worker_id: Any) -> bool: + """ + Reaps the given worker. Returns True if the worker has been + successfully reaped, False otherwise. If any uncaught exception + is thrown from this method, the worker is considered reaped + and all associated timers will be removed. + """ + + def _reap_worker_no_throw(self, worker_id: Any) -> bool: + """ + Wraps ``_reap_worker(worker_id)``, if an uncaught exception is + thrown, then it considers the worker as reaped. + """ + try: + return self._reap_worker(worker_id) + except Exception: + log.exception( + "Uncaught exception thrown from _reap_worker(), " + "check that the implementation correctly catches exceptions", + ) + return True + + def _watchdog_loop(self): + while not self._stop_signaled: + try: + self._run_watchdog() + except Exception: + log.exception("Error running watchdog") + + def _run_watchdog(self): + batch_size = max(1, self._request_queue.size()) + timer_requests = self._request_queue.get(batch_size, self._max_interval) + self.register_timers(timer_requests) + now = time.time() + reaped_worker_ids = set() + for worker_id, expired_timers in self.get_expired_timers(now).items(): + log.info( + "Reaping worker_id=[%s]." + " Expired timers: %s", + worker_id, self._get_scopes(expired_timers) + ) + if self._reap_worker_no_throw(worker_id): + log.info("Successfully reaped worker=[%s]", worker_id) + reaped_worker_ids.add(worker_id) + else: + log.error( + "Error reaping worker=[%s]. Will retry on next watchdog.", worker_id + ) + self.clear_timers(reaped_worker_ids) + + def _get_scopes(self, timer_requests): + return [r.scope_id for r in timer_requests] + + def start(self) -> None: + log.info( + "Starting %s..." + " max_interval=%s," + " daemon=%s", + type(self).__name__, self._max_interval, self._daemon + ) + self._watchdog_thread = threading.Thread( + target=self._watchdog_loop, daemon=self._daemon + ) + log.info("Starting watchdog thread...") + self._watchdog_thread.start() + + def stop(self) -> None: + log.info("Stopping %s", type(self).__name__) + self._stop_signaled = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join(self._max_interval) + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + + +_timer_client: Optional[TimerClient] = None + + +def configure(timer_client: TimerClient): + """ + Configures a timer client. Must be called before using ``expires``. + """ + global _timer_client + _timer_client = timer_client + log.info("Timer client configured to: %s", type(_timer_client).__name__) + + +@contextmanager +def expires( + after: float, scope: Optional[str] = None, client: Optional[TimerClient] = None +): + """ + Acquires a countdown timer that expires in ``after`` seconds from now, + unless the code-block that it wraps is finished within the timeframe. + When the timer expires, this worker is eligible to be reaped. The + exact meaning of "reaped" depends on the client implementation. In + most cases, reaping means to terminate the worker process. + Note that the worker is NOT guaranteed to be reaped at exactly + ``time.now() + after``, but rather the worker is "eligible" for being + reaped and the ``TimerServer`` that the client talks to will ultimately + make the decision when and how to reap the workers with expired timers. + + Usage:: + + torch.distributed.elastic.timer.configure(LocalTimerClient()) + with expires(after=10): + torch.distributed.all_reduce(...) + """ + if client is None: + if _timer_client is None: + raise RuntimeError("Configure timer client before using countdown timers.") + client = _timer_client + if scope is None: + # grab the caller file + lineno + caller = getframeinfo(stack()[1][0]) + scope = f"{caller.filename}#{caller.lineno}" + expiration = time.time() + after + client.acquire(scope, expiration) + try: + yield + finally: + client.release(scope) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2bbf5bbe2348bb0eaa411a034710dd14f7648e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__init__.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from .api import get_env_variable_or_raise, get_socket_with_port, macros # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17be7eb38f9994231a1f8e6146e67d974d79ec7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..555bc6c0956b66645761a8e73b4ed2f20b944a5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e1ba05ee17ab4fcb498141b177a63d855eba586 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/distributed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4605f74e229e7f37881499cdb072302188782ee1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/log_level.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45444c886fe8c675faac660c3b6cbd373f31223a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68716465cf525ef41e303c66896cac8ddb4db1e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/__pycache__/store.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py new file mode 100644 index 0000000000000000000000000000000000000000..87ea0f7d64182488b40fd7fed6965ce57ec475a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/log_level.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +def get_log_level() -> str: + """ + Return default log level for pytorch. + """ + return "WARNING" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..e4f1345e4c33993cb6cf020bd612ab28ff86c59a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/logging.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +import logging +import os +import warnings +from typing import Optional + +from torch.distributed.elastic.utils.log_level import get_log_level + + +def get_logger(name: Optional[str] = None): + """ + Util function to set up a simple logger that writes + into stderr. The loglevel is fetched from the LOGLEVEL + env. variable or WARNING as default. The function will use the + module name of the caller if no name is provided. + + Args: + name: Name of the logger. If no name provided, the name will + be derived from the call stack. + """ + + # Derive the name of the caller, if none provided + # Use depth=2 since this function takes up one level in the call stack + return _setup_logger(name or _derive_module_name(depth=2)) + + +def _setup_logger(name: Optional[str] = None): + log = logging.getLogger(name) + log.setLevel(os.environ.get("LOGLEVEL", get_log_level())) + return log + + +def _derive_module_name(depth: int = 1) -> Optional[str]: + """ + Derives the name of the caller module from the stack frames. + + Args: + depth: The position of the frame in the stack. + """ + try: + stack = inspect.stack() + assert depth < len(stack) + # FrameInfo is just a named tuple: (frame, filename, lineno, function, code_context, index) + frame_info = stack[depth] + + module = inspect.getmodule(frame_info[0]) + if module: + module_name = module.__name__ + else: + # inspect.getmodule(frame_info[0]) does NOT work (returns None) in + # binaries built with @mode/opt + # return the filename (minus the .py extension) as modulename + filename = frame_info[1] + module_name = os.path.splitext(os.path.basename(filename))[0] + return module_name + except Exception as e: + warnings.warn( + f"Error deriving logger module name, using . Exception: {e}", + RuntimeWarning, + ) + return None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7abab9291c617b2851d8cea4a32cfd19413ed4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/utils/store.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from datetime import timedelta +from typing import List + + +def get_all(store, rank: int, prefix: str, size: int): + r""" + Given a store and a prefix, the method goes through the array of keys + of the following format: ``{prefix}{idx}``, where idx is in a range + from 0 to size, and tries to retrieve the data. + + The Rank0 process waits at the end to make sure all other processes + finished the procedure before exiting. + + Usage + + :: + + values = get_all(store, 'torchelastic/data', 3) + value1 = values[0] # retrieves the data for key torchelastic/data0 + value2 = values[1] # retrieves the data for key torchelastic/data1 + value3 = values[2] # retrieves the data for key torchelastic/data2 + + """ + data_arr = [] + for idx in range(size): + data = store.get(f"{prefix}{idx}") + data_arr.append(data) + store.set(f"{prefix}{rank}.FIN", b"FIN") + if rank == 0: + # Rank0 runs the TCPStore daemon, as a result it needs to exit last. + # Otherwise, the barrier may timeout if rank0 process finished the work + # before other processes finished `get_all` method + for node_rank in range(size): + store.get(f"{prefix}{node_rank}.FIN") + + return data_arr + + +def synchronize( + store, + data: bytes, + rank: int, + world_size: int, + key_prefix: str, + barrier_timeout: float = 300, +) -> List[bytes]: + """ + Synchronizes ``world_size`` agents between each other using the underlying c10d store. + The ``data`` will be available on each of the agents. + + Note: The data on the path is not deleted, as a result there can be stale data if + you use the same key_prefix twice. + """ + store.set_timeout(timedelta(seconds=barrier_timeout)) + store.set(f"{key_prefix}{rank}", data) + agent_data = get_all(store, rank, key_prefix, world_size) + return agent_data + + +def barrier( + store, rank: int, world_size: int, key_prefix: str, barrier_timeout: float = 300 +) -> None: + """ + A global lock between agents. + + Note: Since the data is not removed from the store, the barrier can be used + once per unique ``key_prefix``. + """ + data = f"{rank}".encode() + synchronize(store, data, rank, world_size, key_prefix, barrier_timeout)