diff --git a/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1b1fc27fc14bda8ae1533134f3d02bc10f53f9d5 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b76ba44617583c711f86f1de7e4a659c37648ded1f657f897dba30513ef8656 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..7553423326fb3250b6d14f958a6a12661db5ea99 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34a40fb1e962c52cede791fa69ed0d037966235db1a663f491aadda732b091ed +size 33555612 diff --git a/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a7b50a597a5b633257d5b9b3be19114936c04639 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b604db8453bd1484546feaeba94c1ea31052848a6de2234e191cf3dfd534717 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a78a256ab655ae4640314b43617699a51cdb25de --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04f24ac126b0f57c4a53ee91ea64fd799da90b0b6d07ab50b38ab25eb682b804 +size 50332843 diff --git a/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a337c84bfd0e8c10a50cae41f98ef97d7475886 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2eaa8e4a45847d44158859761ec77111585296ae41b4e64dac17284d4bcd62d +size 50332749 diff --git a/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..e37ba0891a184b75c7d0b831ec7a78408def4fc5 --- /dev/null +++ b/ckpts/universal/global_step120/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7ea30c1e8b0a2b4cb29ab44c993294f6c852a73425f9be0a57a01d9f6478834 +size 415237291 diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea6ec6c5aec74d6ae76d1055959d9c6ddb91ac3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/context.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1d50d636f78d0c4c1106b476e228d64089ca3a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/context.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/debug_prims.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/debug_prims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac0d6611a54625e53420733e2b9fe6dea200ec5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/debug_prims.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af3b23e4f9c4dfa46c76fe0aa2df48acd0b7894f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/rng_prims.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/rng_prims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3085be7ed77715f854215bc09a97a95bb7b876c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_prims/__pycache__/rng_prims.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_prims/rng_prims.py b/venv/lib/python3.10/site-packages/torch/_prims/rng_prims.py new file mode 100644 index 0000000000000000000000000000000000000000..1a149047e697b36ff2dc31a92f1f8639471906cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_prims/rng_prims.py @@ -0,0 +1,268 @@ +from typing import Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch import _prims +from torch._C import DispatchKey +from torch._higher_order_ops.utils import autograd_not_implemented +from torch._ops import HigherOrderOperator + +from torch._prims_common import CUDARngStateHelper, make_contiguous_strides_for +from torch._prims_common.wrappers import backwards_not_supported +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.fx.experimental.proxy_tensor import ( + disable_proxy_modes_tracing, + ProxyTorchDispatchMode, + track_tensor_tree, +) +from torch.types import _device, _dtype + + +rngprim_namespace = "rngprims" +rngprim = torch.library.Library(rngprim_namespace, "DEF") +rngprim_impl = torch.library.Library( + rngprim_namespace, "IMPL", "CompositeExplicitAutograd" +) +rngprim_autograd_impl = torch.library.Library(rngprim_namespace, "IMPL", "Autograd") +rngprim_meta_impl = torch.library.Library(rngprim_namespace, "IMPL", "Meta") + + +def throw_on_non_cuda(device): + raise RuntimeError( + f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not " + f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is " + "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU." + ) + + +def register_rng_prim(name, schema, impl_aten, impl_meta, doc, tags=None): + rngprim.define(schema) + rngprim_impl.impl(name, impl_aten) + rngprim_meta_impl.impl(name, impl_meta) + + prim_packet = getattr(torch._ops.ops.rngprims, name) + prim = prim_packet.default + if tags: + prim._tags = tags + + rngprim_autograd_impl.impl(name, backwards_not_supported(prim)) + + for p in (prim_packet, prim): + p.__doc__ = doc + p.return_type = torch._prims_common.RETURN_TYPE.NEW # type: ignore[attr-defined] + + p.schema = schema + p.impl_aten = impl_aten + p.prim_meta_impl = impl_meta + + +# Philox rand offsets could be shared in future with other philox ops, so +# keeping these functions in global scope. +def philox_rand_offset_meta( + shape: torch.Size, +): + return _prims.TensorLike(torch.tensor(0, dtype=torch.int64)) + + +def philox_rand_offset( + shape: torch.Size, +): + # For impl, look at the function calc_execution_policy in the file + # aten/src/ATen/native/cuda/DistributionTemplates.h. The impl was copied at + # commit hash 72aa0667bd16707d50eb8fa337092a1f5d11dfb6 + numel_scalar = 1 + for dim_size in shape: + numel_scalar *= dim_size + numel = torch.scalar_tensor(numel_scalar, dtype=torch.int64) + + block_size = 256 + unroll = 4 + curand4_engine_calls = 4 + device_property = torch.cuda.get_device_properties(torch.cuda.current_device()) + blocks_per_sm = device_property.max_threads_per_multi_processor // block_size + grid_size = (numel + block_size - 1) // block_size + grid_size = min(grid_size, device_property.multi_processor_count * blocks_per_sm) + offset = ( + (numel - 1) // (block_size * grid_size * unroll) + 1 + ) * curand4_engine_calls + return offset + + +def register_philox_rand(): + name = "philox_rand" + schema = "philox_rand(SymInt[] size, Tensor seed, Tensor offset, int[]? stride, Device? device=None, ScalarType? dtype=None) -> (Tensor, Tensor)" # noqa: B950 + + def _philox_rand_meta( + shape: torch.Size, + seed: torch.Tensor, + offset: torch.Tensor, + stride: Optional[Tuple[int, ...]], + device: _device, + dtype: _dtype, + ): + # stride arg will be useful for distributed usecase. Currently, its unused. + assert stride is None + stride = make_contiguous_strides_for(shape) + random_values = _prims.TensorMeta( + shape=shape, strides=stride, dtype=dtype, device=device + ) + offset = philox_rand_offset_meta(shape) + return (random_values, offset) + + def _philox_rand( + shape: torch.Size, + seed: torch.Tensor, + offset: torch.Tensor, + stride: Optional[Tuple[int, ...]], + device: _device, + dtype: _dtype, + ): + # stride arg will be useful for distributed usecase. Currently, its unused. + assert stride is None + if device.type == "cpu": + devices = [] + else: + devices = [device] + + if device.type != "cuda": + raise throw_on_non_cuda(device) + + with torch.random.fork_rng(devices): + CUDARngStateHelper.set_torch_state_tensor(seed, offset) + random_values = torch.rand(shape, device=device, dtype=dtype) + + return random_values, philox_rand_offset(shape) + + register_rng_prim( + name=name, + schema=schema, + impl_aten=_philox_rand, + impl_meta=_philox_rand_meta, + doc="Philox based stateless rand operator", + tags=(torch.Tag.nondeterministic_seeded,), + ) + + +def get_device(args, kwargs): + if kwargs.get("device"): + device = kwargs.get("device") + if isinstance(device, str): + device = torch.device(device) + return device.type + + devices = {arg.device.type for arg in args if isinstance(arg, torch.Tensor)} + if any(dev == "cuda" for dev in devices): + return "cuda" + elif any(dev == "cpu" for dev in devices): + return "cpu" + return None + + +def register_run_and_save_rng_state_op(): + run_and_save_rng_state = HigherOrderOperator("run_and_save_rng_state") + + run_and_save_rng_state.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(run_and_save_rng_state, deferred_error=True) + ) + + @run_and_save_rng_state.py_impl(DispatchKey.CUDA) + def impl_cuda(op, *args, **kwargs): + return torch.cuda.get_rng_state(), op(*args, **kwargs) + + @run_and_save_rng_state.py_impl(DispatchKey.CPU) + def impl_cpu(op, *args, **kwargs): + return torch.get_rng_state(), op(*args, **kwargs) + + @run_and_save_rng_state.py_impl(DispatchKey.BackendSelect) + def impl_backend_select(op, *args, **kwargs): + impl_map = {"cuda": impl_cuda, "cpu": impl_cpu} + device = get_device(args, kwargs) + assert device in impl_map, f"Backend not supported for {device}" + impl = impl_map[device] + return impl(op, *args, **kwargs) + + @run_and_save_rng_state.py_impl(FakeTensorMode) + def impl_fake_tensor_mode(mode, op, *args, **kwargs): + # Check device to call the right impl + with mode: + return impl_backend_select(op, *args, **kwargs) + + @run_and_save_rng_state.py_impl(ProxyTorchDispatchMode) + def impl_proxy_dispatch_mode(mode, op, *args, **kwargs): + if mode.enable_tracing: + out = impl_backend_select(op, *args, **kwargs) + proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, (op, *args)) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + out_proxy = mode.tracer.create_proxy( + "call_function", run_and_save_rng_state, proxy_args, proxy_kwargs + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + else: + return run_and_save_rng_state(op, *args, **kwargs) + + return run_and_save_rng_state + + +def register_run_with_rng_state_op(): + run_with_rng_state = HigherOrderOperator("run_with_rng_state") + + run_with_rng_state.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(run_with_rng_state, deferred_error=True) + ) + + @run_with_rng_state.py_impl(DispatchKey.CUDA) + def impl_cuda(rng_state, op, *args, **kwargs): + current_state = torch.cuda.get_rng_state() + torch.cuda.set_rng_state(rng_state.cpu()) + out = op(*args, **kwargs) + torch.cuda.set_rng_state(current_state) + return out + + @run_with_rng_state.py_impl(DispatchKey.CPU) + def impl_cpu(rng_state, op, *args, **kwargs): + current_state = torch.get_rng_state() + torch.set_rng_state(rng_state) + out = op(*args, **kwargs) + torch.set_rng_state(current_state) + return out + + @run_with_rng_state.py_impl(ProxyTorchDispatchMode) + def impl_proxy_dispatch_mode(mode, rng_state, op, *args, **kwargs): + if mode.enable_tracing: + with disable_proxy_modes_tracing(): + out = run_with_rng_state(rng_state, op, *args, **kwargs) + proxy_args = pytree.tree_map( + mode.tracer.unwrap_proxy, (rng_state, op, *args) + ) + proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs) + out_proxy = mode.tracer.create_proxy( + "call_function", run_with_rng_state, proxy_args, proxy_kwargs + ) + return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer) + else: + return run_with_rng_state(rng_state, op, *args, **kwargs) + + @run_with_rng_state.py_impl(DispatchKey.BackendSelect) + def impl_backend_select(rng_state, op, *args, **kwargs): + impl_map = {"cuda": impl_cuda, "cpu": impl_cpu} + device = get_device(args, kwargs) + assert device in impl_map, f"Backend not supported for {device}" + impl = impl_map[device] + return impl(rng_state, op, *args, **kwargs) + + @run_with_rng_state.py_impl(FakeTensorMode) + def impl_fake_tensor_mode(mode, rng_state, op, *args, **kwargs): + # Skip setting the set_rng_state as it does not work well with fake tensors. + # And it does not matter for the fake tensor mode. + with mode: + return op(*args, **kwargs) + + return run_with_rng_state + + +run_and_save_rng_state = register_run_and_save_rng_state_op() +run_with_rng_state = register_run_with_rng_state_op() + + +def register_rng_prims(): + register_philox_rand() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb05a3477176368bf833c50f4fabacf1ab15353 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/__init__.py @@ -0,0 +1,132 @@ +import os +import sys +from enum import Enum +import pdb +import io + +import torch + +def is_available() -> bool: + """ + Return ``True`` if the distributed package is available. + + Otherwise, + ``torch.distributed`` does not expose any other APIs. Currently, + ``torch.distributed`` is available on Linux, MacOS and Windows. Set + ``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source. + Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows, + ``USE_DISTRIBUTED=0`` for MacOS. + """ + return hasattr(torch._C, "_c10d_init") + + +if is_available() and not torch._C._c10d_init(): + raise RuntimeError("Failed to initialize torch.distributed") + +# Custom Runtime Errors thrown from the distributed package +DistError = torch._C._DistError +DistBackendError = torch._C._DistBackendError +DistNetworkError = torch._C._DistNetworkError +DistStoreError = torch._C._DistStoreError + +if is_available(): + from torch._C._distributed_c10d import ( + Store, + FileStore, + TCPStore, + ProcessGroup as ProcessGroup, + Backend as _Backend, + PrefixStore, + Reducer, + Logger, + BuiltinCommHookType, + GradBucket, + Work as _Work, + _DEFAULT_FIRST_BUCKET_BYTES, + _register_comm_hook, + _register_builtin_comm_hook, + _broadcast_coalesced, + _compute_bucket_assignment_by_size, + _verify_params_across_processes, + _test_python_store, + DebugLevel, + get_debug_level, + set_debug_level, + set_debug_level_from_env, + _make_nccl_premul_sum, + ) + + class _DistributedPdb(pdb.Pdb): + """ + Supports using PDB from inside a multiprocessing child process. + + Usage: + _DistributedPdb().set_trace() + """ + def interaction(self, *args, **kwargs): + _stdin = sys.stdin + try: + sys.stdin = open('/dev/stdin') + pdb.Pdb.interaction(self, *args, **kwargs) + finally: + sys.stdin = _stdin + + def breakpoint(rank: int = 0): + """ + Set a breakpoint, but only on a single rank. All other ranks will wait for you to be + done with the breakpoint before continuing. + + Args: + rank (int): Which rank to break on. Default: ``0`` + """ + if get_rank() == rank: + pdb = _DistributedPdb() + pdb.message( + "\n!!! ATTENTION !!!\n\n" + f"Type 'up' to get to the frame that called dist.breakpoint(rank={rank})\n" + ) + pdb.set_trace() + barrier() + + if sys.platform != "win32": + from torch._C._distributed_c10d import ( + HashStore, + _round_robin_process_groups, + ) + + from .distributed_c10d import * # noqa: F403 + + # Variables prefixed with underscore are not auto imported + # See the comment in `distributed_c10d.py` above `_backend` on why we expose + # this. + + from .distributed_c10d import ( + _all_gather_base, + _reduce_scatter_base, + _create_process_group_wrapper, + _rank_not_in_group, + _coalescing_manager, + _CoalescingManager, + _get_process_group_name, + ) + + from .rendezvous import ( + rendezvous, + _create_store_from_options, + register_rendezvous_handler, + ) + + from .remote_device import _remote_device + + set_debug_level_from_env() + +else: + # This stub is sufficient to get + # python test/test_public_bindings.py -k test_correct_module_names + # working even when USE_DISTRIBUTED=0. Feel free to add more + # stubs as necessary. + # We cannot define stubs directly because they confuse pyre + + class _ProcessGroupStub: + pass + sys.modules["torch.distributed"].ProcessGroup = _ProcessGroupStub # type: ignore[attr-defined] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_composable_state.py b/venv/lib/python3.10/site-packages/torch/distributed/_composable_state.py new file mode 100644 index 0000000000000000000000000000000000000000..f50da98f8c63e22a294c5739c172339904cbccab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_composable_state.py @@ -0,0 +1,37 @@ +from typing import cast, Dict, Optional + +import torch.nn as nn + + +class _State: + pass + + +_module_state_mapping: Dict[nn.Module, _State] = {} + + +def _insert_module_state(module: nn.Module, state: _State) -> None: + global _module_state_mapping + assert module not in _module_state_mapping, f"Inserting {module} more than once." + _module_state_mapping[module] = state + + +def _get_module_state(module: nn.Module) -> Optional[_State]: + """ + Return the ``_State`` in ``model``. + + Given a ``module``, this API finds out if the module is also a ``_State`` + instance or if the module is managed by a composable API. If the module + is also a ``_State``, ``module`` will be casted to ``_State` and returned. + If it is managed by a composable API, the corresponding ``_State`` will + be returned. + """ + global _module_state_mapping + if isinstance(module, _State): + return cast(_State, module) + else: + # https://github.com/pytorch/pytorch/issues/107054 + if module in _module_state_mapping: + return _module_state_mapping[module] + else: + return None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py b/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py new file mode 100644 index 0000000000000000000000000000000000000000..e6fc1c0619c61b63b26028713d85bbff66f12799 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py @@ -0,0 +1,1084 @@ +import sys +import warnings +from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +import torch.distributed as dist +import torch.distributed.distributed_c10d as c10d +from torch._custom_ops import impl_abstract +from torch.distributed.device_mesh import DeviceMesh +from torch.fx.experimental.proxy_tensor import get_innermost_proxy_mode + +from . import _functional_collectives_impl as fun_col_impl +from ._functional_collectives_impl import ( # noqa: F401 + _register_tensor_wrapper, + native_funcol_enabled, +) + +try: + from torch.utils._cxx_pytree import tree_map_only +except ImportError: + from torch.utils._pytree import tree_map_only # type: ignore[no-redef] + + +if torch._running_with_deploy(): + + def is_torchdynamo_compiling(): + """Can't import torchdynamo in torchdeploy builds currently.""" + return False + +else: + try: + from torch.compiler import is_dynamo_compiling as is_torchdynamo_compiling + except Exception: + warnings.warn( + "Unable to import torchdynamo util `is_torchdynamo_compiling`, so won't support torchdynamo correctly" + ) + + def is_torchdynamo_compiling(): + return False + + +""" +New traceable, functional collectives. +RFC: https://github.com/pytorch/pytorch/issues/93173 + + compiler: trace these ops with plain-old-data schemas, then choose how to lower them. + eager: execute these 'functional' ops which in eager return AsyncCollectiveTensor subclasses, + automatically calling .wait() on underlying/hidden async 'work' obj only when fed to + a downstream op. + +Issues: +* Where should these ops live? Couldn't `import torch` if putting these ops in existing torch.distributed files +* Proper support for eager requires inplace ops. We should explore having it as an option for the API. +""" + +""" +Functional collectives are asynchronous only and we perform implicit stream synchronization +on behalf of the user. + +We use AsyncCollectiveTensor to wrap the result tensor of a collective and it lets us witness +first usage of the tensor and insert cross stream sync at the right place. + +The above are the easy bits, the hard one is how we match the Work object returned by +c10d and the tensor AsyncCollectiveTensor wraps. We alloc the tensor inside the collective +op implementation (see ``clone()`` call in ``_all_reduce``) and then it's handled by the +dispatcher which might call other implementations that are allowed to change the returned +tensor - even return a tensor with a different shape (see ``torch.vmap``). + +This means the caller of our ops receives a Tensor that is not guaranteed to be the same +allocated by our implementations and that makes pairing The AsyncTensor to the original +tensor a lot harder. This pairing is needed so we can lookup the Work object to use. + +Originally, we tried WeakKeyDictionary to map from Tensor to Work, but because Tensor's +identity is not stable across dispatch, the op caller would end up with a different Tensor +instance that would not match any in the dictionary. + +With Tensor identity out of the question, we decided use the tensor data pointer, which +should be stable across all the Tensor changes done during dispatch. + +We have a dictionary of tensor::data_ptr -> Work that we insert right after we call into c10d. + +We use this dictionary when AsyncCollectiveTensor is used to invoke Work::wait() + +Finally, we setup a finalizer against the tensor wrapper to observe it getting collected so we +can clean up stale entries in the dictionary. + +To eliminate the possibility of races we have a global version counter that is used by the finalizer. + +As a wise man said once: Don't cross the streams (https://www.youtube.com/watch?v=wyKQe_i9yyo) + +""" + +""" +Functional collectives can accept any of these types to describe the ranks participating in collectives. + +The different types will be desugared to a canonical format +""" +RANK_TYPES = Union[ + List[int], + List[List[int]], + dist.ProcessGroup, + DeviceMesh, + Tuple["dist._tensor.DeviceMesh", int], + str, +] + + +""" +User facing APIs for functional collectives +------------------------------------------- + +These apis are called by user code and expected to work both in eager execution and compilation, +but there are significant differences to how the two modes are implemented underneath. + +Eager execution is 'optimized' using a tensor subclass that schedules the synchronization (via wait_tensor() op) +just before the tensor is first used. Compiled tracing currently relies on the compiler to perform this optimization, +and cannot yet correctly trace the AsyncTensor wrapper class. In the future, these paths may be unified +if sufficient subclass support is added in dynamo. + +Example: all_reduce is an entrypoint API, and other collectives follow a similar pattern. + +Here's how it works under torch.compile/dynamo: +all_reduce(...) + |--> _expand_group(...) - desugars processgroup into canonical/traceable format + |--> c10d_functional.all_reduce(...) - dynamo captures this op call, doesn't trace deeper + |--> _maybe_wrap_tensor(...) - wait_tensor() op is immediately called, no AsyncTensor subclass needed + +And under eager execution: +all_reduce(...) + |--> _expand_group(...) - same as above, but less critical for eager + |--> c10d_functional.all_reduce(...) - dispatches to real kernel OR records op in trace + |--> _maybe_wrap_tensor(...) - AsyncTensor wrapper applied to returned tensor, + which issues wait_tensor() at the time of first use +""" + + +def wait_tensor(tensor): + """ + Wait on a tensor returned by the collectives ops. + + Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA. + """ + if native_funcol_enabled(): + return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined] + else: + return torch.ops.c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined] + + +def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ""): + """ + Broadcasts the tensor to all processes in the given process group. + + Args: + src (int): Source rank + group (ProcessGroup or List[int]): The process group to work on. + tag (str, optional): A unique identifier for the collective. Default: empty string + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + tensor = torch.ops._c10d_functional.broadcast(self, src, group_name) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor = torch.ops.c10d_functional.broadcast( + self, src, tag, rankset, group_size + ) + return _maybe_wrap_tensor(tensor) + + +def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str = ""): + """ + Reduces the tensor data across all machines in such a way that all get + the final result. + + The input tensor is left unmodified. + + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + tensor = torch.ops._c10d_functional.all_reduce( + self, reduceOp.lower(), group_name + ) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor = torch.ops.c10d_functional.all_reduce( # type: ignore[attr-defined] + self, + reduceOp, + tag, + rankset, + group_size, + ) + return _maybe_wrap_tensor(tensor) + + +def all_gather_tensor( + self: torch.Tensor, + gather_dim: int, + group: RANK_TYPES, + tag: str = "", +): + """ + Gather tensor data across from all machines and concatenate over ``gather_dim``. + + Note that it currently only supports gather_dim = 0. + + The input tensor is left unmodified. + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + assert self.is_contiguous() + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + tensor = torch.ops._c10d_functional.all_gather_into_tensor( + self, group_size, group_name + ) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor = torch.ops.c10d_functional.all_gather_into_tensor( # type: ignore[attr-defined] + self, + tag, + rankset, + group_size, + ) + res = _maybe_wrap_tensor(tensor) + # TODO this should be done inside AsyncCollectiveTensor to delay the wait() call + if gather_dim != 0: + # torch.cat access the data so we already need to wait here, first do wait + # and then chunk + cat avoid us going through ACT dispatching logic again + if isinstance(res, AsyncCollectiveTensor): + res = res.wait() # type: ignore[attr-defined] + res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim) + return res + + +def reduce_scatter_tensor( + self: torch.Tensor, + reduceOp: str, + scatter_dim: int, + group: RANK_TYPES, + tag: str = "", +): + """ + Reduces the tensor data across all machines in such a way that all get + the final result, then scatter the results to corresponding ranks. + + + The input tensor is left unmodified. + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + else: + tag, rankset, group_size = _expand_group(group, tag) + + assert ( + self.size(scatter_dim) % group_size == 0 + ), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}" + if scatter_dim != 0: + tensor_list = torch.chunk(self, group_size, dim=scatter_dim) + self = torch.cat(tensor_list) + + if native_funcol_enabled(): + tensor = torch.ops._c10d_functional.reduce_scatter_tensor( + self, + reduceOp.lower(), + group_size, + group_name, # type: ignore[possibly-undefined] + ) + else: + tensor = torch.ops.c10d_functional.reduce_scatter_tensor( # type: ignore[attr-defined] + self, + reduceOp, + tag, + rankset, # type: ignore[possibly-undefined] + group_size, + ) + res = _maybe_wrap_tensor(tensor) + return res + + +def all_reduce_coalesced( + self: List[torch.Tensor], reduceOp: str, group: RANK_TYPES, tag: str = "" +) -> List[torch.Tensor]: + """ + Reduces a list of tensors across all machines in such a way that all get + the final result. + + The all tensors in the input list are left unmodified. + + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + tensor_list = torch.ops._c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined] + self, + reduceOp.lower(), + group_name, + ) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor_list = torch.ops.c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined] + self, + reduceOp, + tag, + rankset, + group_size, + ) + return list(map(_maybe_wrap_tensor, tensor_list)) + + +def all_gather_into_tensor_coalesced( + self: List[torch.Tensor], group: RANK_TYPES, tag: str = "" +) -> List[torch.Tensor]: + """ + Gather a list of tensors across from all machines. + + Note that it currently only supports gather_dim = 0. + + The input tensor is left unmodified. + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined] + self, + group_size, + group_name, + ) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined] + self, + tag, + rankset, + group_size, + ) + return list(map(_maybe_wrap_tensor, tensor_list)) + + +def reduce_scatter_tensor_coalesced( + inputs: List[torch.Tensor], + reduceOp: str, + scatter_dim: List[int], + group: RANK_TYPES, + tag: str = "", +) -> List[torch.Tensor]: + """ + Reduces a list of tensors across all machines in such a way that all get + the final result, then scatter the results to corresponding ranks. + + The input tensors are left unmodified. + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + else: + tag, rankset, group_size = _expand_group(group, tag) + + assert len(scatter_dim) == len(inputs) + for idx, (dim, tensor) in enumerate(zip(scatter_dim, inputs)): + assert ( + tensor.size(dim) % group_size == 0 + ), f"input dimension {dim} ({tensor.size(dim)} must be a multiple of group_size {group_size} for tensor at index {idx}" + if dim != 0: + tensor_list = torch.chunk(tensor, group_size, dim=dim) + inputs[idx] = torch.cat(tensor_list) + + if native_funcol_enabled(): + tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined] + inputs, + reduceOp.lower(), + group_size, + group_name, # type: ignore[possibly-undefined] + ) + else: + tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined] + inputs, + reduceOp, + tag, + rankset, # type: ignore[possibly-undefined] + group_size, + ) + + return list(map(_maybe_wrap_tensor, tensor_list)) + + +# This is a bit unsafe: it checks if the first argument in the schema reports as a non-mutable alias. +# Today, this maps 1:1 with "aten ops that are views". +def _is_view_op(tgt): + assert isinstance(tgt, torch._ops.OpOverload) + schema = tgt._schema + if len(schema.arguments) > 0: + first_arg = schema.arguments[0] + # check if op is a view + return first_arg.alias_info is not None and not first_arg.alias_info.is_write + + +def all_to_all_single( + self: torch.Tensor, + output_split_sizes: Optional[List[int]], + input_split_sizes: Optional[List[int]], + group: RANK_TYPES, + tag: str = "", +) -> torch.Tensor: + """ + Each process splits input tensor and then scatters the split list + to all processes in a group. Then concatenate the received tensors from all + the processes in the group and return single output tensor. + + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh + + :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover + that information and perform collective algebraic optimization. Use other forms of input for that. + """ + if output_split_sizes is not None: + assert all( + isinstance(size, (int, torch.SymInt)) for size in output_split_sizes + ), output_split_sizes + if input_split_sizes is not None: + assert all( + isinstance(size, (int, torch.SymInt)) for size in input_split_sizes + ), input_split_sizes + if native_funcol_enabled(): + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + if output_split_sizes is None or input_split_sizes is None: + assert output_split_sizes is None and input_split_sizes is None, ( + "output_split_sizes and input_split_sizes must either be " + "specified together or both set to None" + ) + output_split_sizes = [self.shape[0] // group_size] * group_size + input_split_sizes = output_split_sizes + tensor = torch.ops._c10d_functional.all_to_all_single( # type: ignore[attr-defined] + self, + output_split_sizes, + input_split_sizes, + group_name, + ) + else: + tag, rankset, group_size = _expand_group(group, tag) + tensor = torch.ops.c10d_functional.all_to_all_single( # type: ignore[attr-defined] + self, + output_split_sizes, + input_split_sizes, + tag, + rankset, + group_size, + ) + return _maybe_wrap_tensor(tensor) + + +def permute_tensor( + self: torch.Tensor, + src_dst: List[int], + group: RANK_TYPES, + tag: str = "", +) -> torch.Tensor: + """ + Permutes the elements of the tensor according to the given source/destination pairs. `src_dst` should + be defined such that src_dst[m] == n means m sends to n. + + Group can be one of: + List[int]: ranks participating in the collective. + List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. + ProcessGroup: Will perform a collective using the ranks and tag of the PG. + DeviceMesh: Do a SPMD collective over all ranks of the mesh + (DeviceMesh, int): Do a MPMD collective over one + """ + t, rankset, group_size = _expand_group(group, tag) + local_pg = c10d._find_or_create_pg_by_ranks_and_tag(t, rankset, group_size) + + output_split_sizes = [0] * group_size + input_split_sizes = [0] * group_size + for src, dst in enumerate(src_dst): + if src == dist.get_rank(local_pg): + input_split_sizes[dst] = self.numel() + if dst == dist.get_rank(local_pg): + output_split_sizes[src] = self.numel() + + return all_to_all_single(self, output_split_sizes, input_split_sizes, group, tag) + + +class AsyncCollectiveTensor(torch.Tensor): + r""" + A Tensor wrapper subclass that is used to trigger a call to wait + prior to first use of the underlying tensor. + Use it inside functional collective pytorch wrappers like the following: + def functional_collective(self, group, tag): + tag, rankset, group_size = _expand_group(group, tag) + tensor = torch.ops.c10d_functional.{collective}(self, tag, rankset, group_size) + return _maybe_wrap_tensor(tensor) + """ + elem: torch.Tensor + completed: bool + + __slots__ = ["elem", "completed"] + + @staticmethod + def __new__(cls, elem: torch.Tensor): + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + elem.size(), + strides=elem.stride(), + storage_offset=elem.storage_offset(), + dtype=elem.dtype, + layout=elem.layout, + device=elem.device, + requires_grad=False, + ) + r.elem = elem + r.completed = False + return r + + def __tensor_flatten__(self): + return ["elem"], None + + def tolist(self): + self.trigger_wait() + return self.elem.tolist() + + @staticmethod + def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): + assert meta is None + elem = inner_tensors["elem"] + return AsyncCollectiveTensor(elem) + + def __repr__(self): + self.trigger_wait() + return f"AsyncCollectiveTensor({self.elem})" + + def trigger_wait(self): + if not self.completed: + wait_tensor(self.elem) + self.completed = True + return self.elem + + def wait(self) -> torch.Tensor: + wait_tensor(self.elem) + return self.elem + + def _get_acs_underlying_tensor(self): + """This method enables _functional_collectives_impl to test if a tensor is an ACS""" + return self.elem + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + if func == torch.ops.aten.view.default: + # Fast handle aten.view as a lot of view related op goes to aten.view + # eventually, this avoids pytree slowdown + res = func(args[0].elem, args[1]) + wrapper_res = AsyncCollectiveTensor(res) + _register_tensor_wrapper(wrapper_res) + return wrapper_res + + is_view_op = _is_view_op(func) + + def unwrap(e: AsyncCollectiveTensor): + # wait_tensor is idepotent and will do stream sync only once + if not is_view_op: + e.trigger_wait() + return e.elem + + def wrap(e: torch.Tensor): + # wait_tensor is idepotent and will do stream sync only once + assert not isinstance(e, AsyncCollectiveTensor) + res = AsyncCollectiveTensor(e) + _register_tensor_wrapper(res) + return res + + unwrapped_args = tree_map_only(AsyncCollectiveTensor, unwrap, args) + unwrapped_kwargs = tree_map_only(AsyncCollectiveTensor, unwrap, kwargs) + + # we don't wrap the result as it doesn't need to be waited on. + out = func(*unwrapped_args, **unwrapped_kwargs) + + # View ops dont require a sync, so we should re-wrap the outputs. + if is_view_op: + out = tree_map_only(torch.Tensor, wrap, out) + + return out + + def numpy(self): + return self.wait().numpy() + + +""" +Utils and infrastructure for tracing support +""" + + +def _expand_group(group: RANK_TYPES, tag: str = "") -> Tuple[str, List[int], int]: + """ + _expand_group desugars the different RANK_TYPES types into a canonical format that is traceable. + + By having this be part of the explicit eager codepath, we avoid having to specialize behavior inside + torchdynamo and can still interoperate with processgroup objects or other untraceable forms. + """ + # had to define this hack _inside_ expand_group to avoid + # graph_break [('torch.* op returned non-Tensor int + # caused by 'cast_*` functions being treated as 'torch.*' ops (iiuc) + if TYPE_CHECKING: + + def cast_listlistint(x): + return cast(List[List[int]], x) + + def cast_listint(x): + return cast(List[int], x) + + else: + # fake cast op for use at runtime since dynamo doesn't support real cast + # also, dynamo didn't like encountering 'typing' objects () + # NotImplementedError: argument of type: + def cast_listlistint(x): + return x + + def cast_listint(x): + return x + + rankset: List[int] + if isinstance(group, list): + if isinstance(group[0], list): + nested_list = cast_listlistint(group) + rankset = [] + group_size = -1 + for rs in nested_list: + rankset.extend(rs) + if group_size != -1 and group_size != len(rs): + raise ValueError( + f"group sizes must be identical found {group_size} and {len(rs)}" + ) + group_size = len(rs) + else: + rankset = cast_listint(group) + group_size = len(rankset) + elif isinstance(group, dist.ProcessGroup): + rankset = dist.get_process_group_ranks(group) + group_size = len(rankset) + tag = tag or c10d._get_group_tag(group) + elif isinstance(group, DeviceMesh): + assert ( + group.ndim == 1 + ), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D" + # TODO: it should run collective in the whole mesh instead of dim 0 + tag, rankset, _ = group._dim_group_infos[0] + group_size = len(rankset) + elif isinstance(group, tuple): + if ( + len(group) == 2 + and isinstance(group[0], DeviceMesh) + and isinstance(group[1], int) + ): + dmesh = group[0] + dim = group[1] + tag, rankset, _ = dmesh._dim_group_infos[dim] + group_size = len(rankset) + else: + raise ValueError("Invalid tuple for group must be (DeviceMesh, int)") + else: + raise ValueError( + "Invalid type for group, must be one of List, Processgroup, DeviceMesh or (DeviceMesh, int)." + ) + + return (tag, rankset, group_size) + + +def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str: + """ + Given group in RANK_TYPES, return the group name. + """ + # `tag` will be deprecated. See details in: + # https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208 + if isinstance(group, dist.ProcessGroup): + return group.group_name + elif isinstance(group, str): + return group + elif isinstance(group, DeviceMesh): + assert ( + group.ndim == 1 + ), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D" + return group._dim_group_infos[0][2] + elif isinstance(group, tuple): + if ( + len(group) == 2 + and isinstance(group[0], DeviceMesh) + and isinstance(group[1], int) + ): + dmesh = group[0] + dim = group[1] + return dmesh._dim_group_infos[dim][2] + else: + raise ValueError("Invalid tuple for group must be (DeviceMesh, int)") + elif isinstance(group, list): + if not is_torchdynamo_compiling(): + warnings.warn( + "The combination of ranks + tag as process group " + "identifier has been deprecated. Please switch to " + "using ProcessGroup, DeviceMesh, or group name instead." + ) + return c10d._resolve_group_name_by_ranks_and_tag(cast(List[int], group), tag) + else: + raise ValueError(f"Unsupported group type: {type(group)}, {group}") + + +def _are_we_tracing() -> bool: + if is_torchdynamo_compiling(): + return True + # If functionalization is turned on, we are almost definitely compiling/tracing. + # (In particular, AOTAutograd traces a model once with functionalization on + # but proxy tracing turned of, so this is how we detect it). + if ( + torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL) + is not None + ): + return True + mode = get_innermost_proxy_mode() + if mode is None: + return False + return mode.tracer is not None + + +def _maybe_wrap_tensor(self) -> torch.Tensor: + if _are_we_tracing(): + return wait_tensor(self) + res = AsyncCollectiveTensor(self) + _register_tensor_wrapper(res) + return cast(torch.Tensor, res) + + +def _all_gather_into_tensor_coalesced_meta(self, tag, rankset, group_size): + def mk_out_tensor(shard): + out_size = list(shard.size()) + out_size[0] *= group_size + out_tensor = shard.new_empty(out_size) + return out_tensor + + return [mk_out_tensor(t) for t in self] + + +# We now register meta kernels to deal with tracing +def _broadcast_meta(self, *args): + return torch.empty_like(self) + + +def _all_reduce_meta(self, *args): + return torch.empty_like(self) + + +def _wait_tensor_meta(self, *args): + return torch.empty_like(self) + + +def _all_gather_into_tensor_meta(shard, tag, rankset, group_size): + out_size = list(shard.size()) + out_size[0] *= group_size + return shard.new_empty(out_size) + + +def _reduce_scatter_tensor_meta(input, reduce_op, tag, rankset, group_size): + out_size = list(input.size()) + out_size[0] //= group_size + return input.new_empty(out_size) + + +def _all_reduce_coalesced_meta(self, *args): + return [torch.empty_like(t) for t in self] + + +def _all_reduce__meta(inp, *args): + return inp + + +def _broadcast__meta(inp, *args): + return inp + + +def _all_reduce_coalesced__meta(inputs, *args): + return inputs + + +def _reduce_scatter_tensor_coalesced_meta(inputs, reduceOp, tag, rankset, group_size): + def mk_out_tensor(input): + out_size = list(input.size()) + out_size[0] //= group_size + out_tensor = input.new_empty(out_size) + return out_tensor + + return [mk_out_tensor(t) for t in inputs] + + +# NB: We often say all_to_all has dynamic output size, but this is not +# technically true: instead, what typically happens is you manually +# communicate the output_split_sizes ahead of time (which is dynamic), +# but then you pass those sizes explicitly, and the all to all itself +# isn't dynamic, it just follows the specified output splits +def _all_to_all_single_meta( + input, output_split_sizes, input_split_sizes, *args, **kwargs +): + if output_split_sizes is None: + return input.new_empty(input.size()) + else: + for s in output_split_sizes: + torch._check_is_size(s) + out_size = list(input.size()) + out_size[0] = sum(output_split_sizes) + return input.new_empty(out_size) + + +def _all_gather_into_tensor_native_meta(input, group_size, group_name): + shape = list(input.size()) + shape[0] *= group_size + return input.new_empty(shape) + + +def _all_gather_into_tensor_coalesced_native_meta(inputs, group_size, group_name): + return [ + _all_gather_into_tensor_native_meta(input, group_size, group_name) + for input in inputs + ] + + +def _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name): + shape = list(inp.size()) + shape[0] //= group_size + return inp.new_empty(shape) + + +def _reduce_scatter_tensor_coalesced_native_meta( + inputs, reduce_op, group_size, group_name +): + return [ + _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name) + for inp in inputs + ] + + +def _register_ops(): + ops_defs = [ + "broadcast(Tensor self, int src, str tag, int[] ranks, int group_size) -> Tensor", + "all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor", + "all_reduce_coalesced(Tensor[] self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]", + "wait_tensor(Tensor self) -> Tensor", + "all_gather_into_tensor(Tensor shard, str tag, int[] ranks, int group_size) -> Tensor", + "all_gather_into_tensor_coalesced(Tensor[] input, str tag, int[] ranks, int group_size) -> Tensor[]", + "reduce_scatter_tensor(Tensor input, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor", + "reduce_scatter_tensor_coalesced(Tensor[] inputs, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]", + "all_to_all_single(Tensor input, SymInt[]? output_split_sizes, SymInt[]? input_split_sizes, str tag, int[] ranks, int group_size) -> Tensor", # noqa: B950 + ] + + my_module = sys.modules[__name__] + for op_def in ops_defs: + op_name = op_def[0 : op_def.index("(")] + backend_impl = getattr(fun_col_impl, f"_{op_name}") + meta_impl = getattr(my_module, f"_{op_name}_meta") + c10_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag) + c10_lib_impl.impl(op_name, backend_impl, "CompositeExplicitAutograd") + impl_abstract(f"c10d_functional::{op_name}")(meta_impl) + + +if not torch._running_with_deploy(): + # Library MUST be defined at module scope or it doesn't work + # Creating a "DEF" Library always crashes torch::deploy so we create our Library instances here + # guarded against running inside it + c10_lib = torch.library.Library("c10d_functional", "DEF") + c10_lib_impl = torch.library.Library("c10d_functional", "IMPL") + _register_ops() + + _c10_lib_impl = torch.library.Library("_c10d_functional", "IMPL") + _c10_lib_impl.impl("all_reduce", _all_reduce_meta, "Meta") + _c10_lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta") + _c10_lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta") + _c10_lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta") + _c10_lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta") + _c10_lib_impl.impl( + "all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta" + ) + _c10_lib_impl.impl( + "all_gather_into_tensor_coalesced", + _all_gather_into_tensor_coalesced_native_meta, + "Meta", + ) + _c10_lib_impl.impl( + "reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta" + ) + _c10_lib_impl.impl( + "reduce_scatter_tensor_coalesced", + _reduce_scatter_tensor_coalesced_native_meta, + "Meta", + ) + _c10_lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta") + _c10_lib_impl.impl("broadcast", _broadcast_meta, "Meta") + _c10_lib_impl.impl("broadcast_", _broadcast__meta, "Meta") +else: + warnings.warn( + "PyTorch Distributed functional collectives do not work with torch::deploy." + ) + + +""" +Dynamo Remappings allow seamless translation from non-functional collectives of supportable form into +functional collective calls followed by inplace copy ops, allowing them to be traced into a functional graph. + +We implement this by writing a decomposition and teaching dynamo how to associate it to a corresponding op via +the mapping dict below. + +These schemas intentionally match torch.distributed.distributed_c10d.* ops that we are trying to remap from +""" + + +def all_gather_tensor_inplace( + output_tensor: torch.Tensor, + input_tensor: torch.Tensor, + group, # TODO add a type, + async_op: bool = False, + tag: str = "", + gather_dim: int = 0, +): + assert ( + not async_op + ), "Can't remap async version of inplace op to functional collective" + return output_tensor.copy_(all_gather_tensor(input_tensor, gather_dim, group, tag)) + + +def reduce_scatter_tensor_inplace( + output: torch.Tensor, + input: torch.Tensor, + op: str = "sum", # TODO type is actually c10d ReduceOp. is this ok? + group=None, # TODO add a type + async_op: bool = False, + scatter_dim: int = 0, + tag: str = "", +): + assert ( + not async_op + ), "Can't remap async version of inplace op to functional collective" + return output.copy_(reduce_scatter_tensor(input, op, scatter_dim, group, tag)) + + +REDUCE_OP_TO_STR = { + dist.ReduceOp.SUM: "sum", + dist.ReduceOp.AVG: "avg", + dist.ReduceOp.PRODUCT: "product", + dist.ReduceOp.MIN: "min", + dist.ReduceOp.MAX: "max", + dist.ReduceOp.BAND: "band", + dist.ReduceOp.BOR: "bor", + dist.ReduceOp.BXOR: "bxor", +} + + +def all_reduce_inplace( + tensor: torch.Tensor, + op: str = "sum", + group=None, + async_op: bool = False, + tag: str = "", +): + assert ( + not async_op + ), "Can't remap async version of inplace op to functional collective" + + return tensor.copy_(all_reduce(tensor, op, group, tag)) + + +def all_to_all_inplace( + output: torch.Tensor, + input: torch.Tensor, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False, + tag: str = "", +): + assert ( + not async_op + ), "Can't remap async version of inplace op to functional collective" + return output.copy_( + all_to_all_single(input, output_split_sizes, input_split_sizes, group, tag) + ) + + +def all_gather_inplace( + tensor_list: List[torch.Tensor], + tensor: torch.Tensor, + group=None, + async_op=False, + tag: str = "", +): + assert ( + not async_op + ), "Can't remap async version of inplace op to functional collective" + assert all( + t.size(0) == tensor.size(0) for t in tensor_list + ), "Remapping variable size all_gather is not yet supported" + + output = all_gather_tensor(tensor, 0, group, tag) + + # Use aten.slice instead of aten.split because the latter causes + # tensor.shape(0) to be unnecessarily baked in when it's a SymInt. + output_splits = [] + offset = 0 + for t in tensor_list: + output_splits.append(output[offset : offset + t.size(0)]) + offset += t.size(0) + for dst, src in zip(tensor_list, output_splits): + dst.copy_(src) + return tensor_list + + +from torch.distributed.distributed_c10d import ( + _all_gather_base as legacy_all_gather_base, + _reduce_scatter_base as legacy_reduce_scatter_base, + all_gather as legacy_all_gather, + all_gather_into_tensor as legacy_allgather, + all_reduce as legacy_allreduce, + all_to_all_single as legacy_all_to_all_single, + reduce_scatter_tensor as legacy_reducescatter, +) + +# This dict should contain sets of functions that dynamo is allowed to remap. +# Functions in this set should accept the same args/kwargs 1:1 as their mapping. +traceable_collective_remaps = { + legacy_allgather: all_gather_tensor_inplace, + legacy_reducescatter: reduce_scatter_tensor_inplace, + legacy_allreduce: all_reduce_inplace, + legacy_all_to_all_single: all_to_all_inplace, + legacy_all_gather: all_gather_inplace, + legacy_reduce_scatter_base: reduce_scatter_tensor_inplace, + legacy_all_gather_base: all_gather_tensor_inplace, +} diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py b/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ae3bbbb070e3dcbc2599d4ff2a06b2e8a6515d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py @@ -0,0 +1,409 @@ +import logging +import os +import warnings +import weakref +from typing import cast, Dict, List, Optional + +import torch +import torch.distributed as dist +import torch.distributed.distributed_c10d as c10d + +""" +Moved eager kernel implementations to a separate file partly for readability and partly as it is currently +easier in dynamo to set tracing policy on a file-by-file level. + +Do not put code in this file that Dynamo is expected to trace into, as dynamo may disallow this whole file. + +DEBUG/TESTING HELPERS: + +This module includes some helpers that are quite useful when debugging or testing functional collectives: + +_tensor_needs_wait +_outstanding_wait_count +_wait_all + +""" + +_use_native_funcol: Optional[bool] = None + + +if torch._running_with_deploy(): + + def native_funcol_enabled(): + return False + +else: + from torch._dynamo import assume_constant_result + + @assume_constant_result + def native_funcol_enabled(): + global _use_native_funcol + if _use_native_funcol is None: + try: + # Disable native funcol when torch_xla is installed. This check + # will be removed once torch_xla adopts the native_funcol IR. + import torch_xla # noqa: F401 + + _use_native_funcol = False + except Exception: + # When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol + _use_native_funcol = ( + os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1" + ) + + return _use_native_funcol + + +logger = logging.getLogger(__name__) + +data_ptr_to_work: Dict[int, "_WaitRegistration"] = dict() +work_version = 0 + + +class _WaitRegistration: + def __init__(self, work): + global work_version + self.work = work + self.version = work_version + self.ptrs = set() + self.ptr_alias_count = {} + self.cleanup_count = 0 + work_version += 1 + + def _register_tensor_ptr(self, data_ptr): + global data_ptr_to_work + data_ptr_to_work[data_ptr] = self + self.ptrs.add(data_ptr) + + def _record_wrapper(self, ptr): + self._register_tensor_ptr(ptr) + self.ptr_alias_count.setdefault(ptr, 0) + self.ptr_alias_count[ptr] += 1 + self.cleanup_count += 1 + + def wait(self): + if self.work is not None: + self.work.wait() + self.work = None + self.cleanup() + + def decrement_live_tensor(self, ptr): + self.cleanup_count -= 1 + if self.cleanup_count == 0: + self.wait() + else: + self.ptr_alias_count[ptr] -= 1 + if ( + self.ptr_alias_count[ptr] < 1 + and data_ptr_to_work.get(ptr, None) == self + ): + del data_ptr_to_work[ptr] + + def cleanup(self): + for ptr in self.ptrs: + if data_ptr_to_work.get(ptr, None) == self: + del data_ptr_to_work[ptr] + + +def _register_tensor_work(tensor_or_list, work_or_list): + if not isinstance(tensor_or_list, list): + tensor_or_list = [tensor_or_list] + if not isinstance(work_or_list, list): + reg = _WaitRegistration(work_or_list) + for tensor in tensor_or_list: + reg._register_tensor_ptr(tensor.data_ptr()) + else: + for tensor, work in zip(tensor_or_list, work_or_list): + reg = _WaitRegistration(work) + reg._register_tensor_ptr(tensor.data_ptr()) + + +def _wait_reg_dec(ptr, wait_reg): + wait_reg.decrement_live_tensor(ptr) + + +def _register_tensor_wrapper(tensor) -> None: + if native_funcol_enabled(): + # Tensor storage -> work mapping is maintained in C++ + return + global data_ptr_to_work + data_ptr = tensor.elem.data_ptr() + # Note: we should NEVER try to trace this, bc it registers runtime stuff during trace. + # Instead, backends must call this themselves when implementing traced collectives. + wait_reg = data_ptr_to_work.get(data_ptr, None) + if wait_reg is None: + warnings.warn( + "Trying to register finalizer to AsyncCollectiveTensor but the inner tensor is already gone" + ) + else: + # We force the collective to be waited in the case this tensor goes away to reduce the change of deadlocks. + # NOTE: we register the callback to the ACT wrapper class, for the following reasons: + # 1. The inner tensor is referenced by the associated Work object, so it's uncollective until we release the + # associated work object + # 2. There's a n-to-1 relationship between wrappers and inner tensor due to non-waitable ops like view() + wait_reg._record_wrapper(data_ptr) + weakref.finalize(tensor, _wait_reg_dec, data_ptr, wait_reg) + + +def _wait_tensor(tensor: torch.Tensor) -> torch.Tensor: + global data_ptr_to_work + data_ptr = tensor.data_ptr() + wait_reg = data_ptr_to_work.get(data_ptr) + if wait_reg is not None: + wait_reg.wait() + return tensor + + +def _tensor_needs_wait(tensor: torch.Tensor) -> bool: + """Returns true if ```tensor``` needs to be waited. Works with ACS and inner tensors.""" + if hasattr(tensor, "_get_acs_underlying_tensor"): + tensor = tensor._get_acs_underlying_tensor() + data_ptr = tensor.data_ptr() + wait_reg = data_ptr_to_work.get(data_ptr) + return wait_reg is not None and wait_reg.work is not None + + +def _outstanding_wait_count() -> int: + """Returns the number of outstanding work objects waiting to be waited (sic).""" + return len(data_ptr_to_work) + + +def _wait_all() -> None: + """Wait for all outstanding collectives.""" + for work_reg in list(data_ptr_to_work.values()): + work_reg.wait() + + +def _str_to_reduce_op(reduceOp: str) -> dist.ReduceOp: + reduceOp = reduceOp.upper() + op = dist.ReduceOp.RedOpType.__members__.get(reduceOp) + if op is None: + raise ValueError(f"Invalid reduce operation {reduceOp}") + return cast(dist.ReduceOp, op) + + +""" +Kernel implementations (for eager runtime only) - should never be traced by torch.compile + +These functions should all be bound to dispatcher ops. During tracing, the op itself should be +captured in the graph and the backend should implement the op however it prefers. +""" + + +def _broadcast(self, src, tag, ranks, group_size): + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + + inplace_tensor = self.clone(memory_format=torch.contiguous_format) + work = dist.broadcast(inplace_tensor, src, group=group, async_op=True) + _register_tensor_work(inplace_tensor, work) + + return inplace_tensor + + +# TODO assert if ranks has duplicated entries +def _all_reduce(self, reduceOp, tag, ranks, group_size): + op = _str_to_reduce_op(reduceOp) + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + + inplace_tensor = self.clone(memory_format=torch.contiguous_format) + work = dist.all_reduce(inplace_tensor, op=op, group=group, async_op=True) + _register_tensor_work(inplace_tensor, work) + + return inplace_tensor + + +def _all_reduce_coalesced(self, reduceOp, tag, ranks, group_size): + op = _str_to_reduce_op(reduceOp) + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + + inplace_tensor_list = [t.clone(memory_format=torch.contiguous_format) for t in self] + work = dist.all_reduce_coalesced( + inplace_tensor_list, op=op, group=group, async_op=True + ) + _register_tensor_work(inplace_tensor_list, work) + + return inplace_tensor_list + + +def _all_gather_into_tensor(shard, tag, ranks, group_size): + # TODO add dim support? + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + out_size = list(shard.size()) + out_size[0] *= group_size + out_tensor = shard.new_empty(out_size) + assert out_tensor.is_contiguous() + # FIXME gloo doesn't support _allgather_base + if dist.get_backend(group) == dist.Backend.GLOO or shard.is_cpu: + tensor_list = list(torch.chunk(out_tensor, group_size)) + work = dist.all_gather(tensor_list, shard, group=group, async_op=True) + else: + work = dist.all_gather_into_tensor( + out_tensor, shard, group=group, async_op=True + ) + _register_tensor_work(out_tensor, work) + + return out_tensor + + +def _all_gather_into_tensor_coalesced(self, tag, rankset, group_size): + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, rankset, group_size) + assert group is not None + + def mk_out_tensor(shard): + out_size = list(shard.size()) + out_size[0] *= group_size + out_tensor = shard.new_empty(out_size) + assert out_tensor.is_contiguous() + return out_tensor + + out_tensors = [mk_out_tensor(t) for t in self] + + work_list = _all_gather_into_tensor_coalesced_fallback( + output_tensors=out_tensors, input_tensors=self, group=group, async_op=True + ) + + _register_tensor_work(out_tensors, work_list) + return out_tensors + + +def _reduce_scatter_tensor( + input: torch.Tensor, + reduceOp: str, + tag: str, + ranks: List[int], + group_size: int, +): + # TODO add dim support? + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + op = _str_to_reduce_op(reduceOp) + + if dist.get_backend(group) == dist.Backend.GLOO or input.is_cpu: + # cpu::gloo backend does not have reduce_scatter we fallback to do all_reduce + # + local chunk + logger.warning( + "ProcessGroupGloo does not support reduce_scatter, falling back with all reduce!" + ) + reduction_input = input.clone() + group_rank = dist.get_rank(group) + work = dist.all_reduce(reduction_input, op=op, group=group, async_op=True) + out_tensor = reduction_input.chunk(group_size, dim=0)[group_rank] + _register_tensor_work(out_tensor, work) + else: + out_size = list(input.size()) + out_size[0] //= group_size + out_tensor = input.new_empty(out_size) + work = dist.reduce_scatter_tensor( + out_tensor, input, op=op, group=group, async_op=True + ) + _register_tensor_work(out_tensor, work) + + return out_tensor + + +def _reduce_scatter_tensor_coalesced( + inputs: List[torch.Tensor], + reduce_op: str, + tag: str, + ranks: List[int], + group_size: int, +): + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + assert group is not None + op = _str_to_reduce_op(reduce_op) + + def mk_out_tensor(shard): + out_size = list(shard.size()) + out_size[0] //= group_size + out_tensor = shard.new_empty(out_size) + assert out_tensor.is_contiguous() + return out_tensor + + out_tensors = [mk_out_tensor(t) for t in inputs] + + work_list = _reduce_scatter_tensor_coalesced_fallback( + output_tensors=out_tensors, + input_tensors=inputs, + op=op, + group=group, + async_op=False, + ) + + _register_tensor_work(out_tensors, work_list) + return out_tensors + + +def _all_gather_into_tensor_coalesced_fallback( + output_tensors, input_tensors, group, async_op=False +): + # all_gather_coalesced is useless, it doesn't work under NCCL and does lots of copies under Gloo + # all_gather is useless too because it's single tensor + # NCCL's PG::all_gather with multiple tensors is broken, it only works for the multi-device setting + # and fails if you mix same-size with different-size tensor lists. + # _coalescing_manager crashed NCCL when used with all_gather_into_tensor. + if input_tensors[0].is_cpu or not async_op: + work_list = [] + out_tensors_sliced = [ + list(torch.chunk(out_tensor, dist.get_world_size(group))) + for out_tensor in output_tensors + ] + for shard, out_tensor in zip(input_tensors, out_tensors_sliced): + work = c10d.all_gather(out_tensor, shard, group=group, async_op=async_op) + work_list.append(work) + return work_list + else: + with c10d._coalescing_manager(group=group, async_ops=True) as cm: + for in_t, out_t in zip(input_tensors, output_tensors): + dist.all_gather_into_tensor(out_t, in_t, group=group, async_op=True) + return cm + + +def _reduce_scatter_tensor_coalesced_fallback( + output_tensors, input_tensors, op, group, async_op=False +): + # All the same reasons as the all_gather fallback + work_list = [] + for shard, out_tensor in zip(input_tensors, output_tensors): + work = c10d.reduce_scatter_tensor( + out_tensor, shard, op=op, group=group, async_op=async_op + ) + work_list.append(work) + return work_list + + +def _all_to_all_single( + input: torch.Tensor, + output_split_sizes: Optional[List[int]], + input_split_sizes: Optional[List[int]], + tag: str, + ranks: List[int], + group_size: int, +): + group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size) + + if output_split_sizes is not None: + torch._check( + input.dim() >= 1, + lambda: f"Expected input to have at least 1 dim but got {input.dim()} dim", + ) + out_size = list(input.size()) + out_size[0] = sum(output_split_sizes) + out_tensor = input.new_empty(out_size) + else: + out_tensor = input.new_empty(input.size()) + + work = c10d.all_to_all_single( + out_tensor, + input, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + async_op=True, + ) + _register_tensor_work(out_tensor, work) + + return out_tensor diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e6b1662589c47b81534d5d04493d6e68f89b12f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py @@ -0,0 +1,12 @@ +# Keep old package for BC purposes, this file should be removed once +# everything moves to the `torch.distributed._shard` package. +import sys +import torch +import warnings + +from torch.distributed._shard.sharded_tensor import * # noqa: F403 +warnings.warn( + "torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead", + DeprecationWarning +) +sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff859d70b6fb7dfdec8d43edc3151e09b32a46b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3060005dbdd4beef2d6a7a240b4afdb9e9a4186 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__init__.py @@ -0,0 +1,14 @@ +# Keep old package for BC purposes, this file should be removed once +# everything moves to the `torch.distributed._shard` package. +import sys +import torch +import warnings + +from torch.distributed._shard.sharding_spec import * # noqa: F403 +warnings.warn( + "torch.distributed._sharding_spec will be deprecated, use torch.distributed._shard.sharding_spec instead", + DeprecationWarning +) + +import torch.distributed._shard.sharding_spec as _sharding_spec +sys.modules['torch.distributed._sharding_spec'] = _sharding_spec diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e262999d4fb4f47a11d0b4f89a83015d7102bdda Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_sharding_spec/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb08d445c4625aa7b45845f5268446e27bc1b732 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad718cd1913389ec20fc25738020c0d392e4b668 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/batch_dim_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/batch_dim_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01c7e7a657e52f0bc17bdd03f8e30f89f08ca6b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/batch_dim_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/comm_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/comm_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14c1768ecb7a80b83874e44498f181e34e09cad4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/comm_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61b89c4aae5f304df70b29f9e8e5267e12d541b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/data_parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/data_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e1b6228819ba78c599208b2272f1373cb0bbb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/data_parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/distribute.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/distribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..997fe6feb9458ca92aacef2d4118ecc72b4a9037 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/distribute.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/experimental_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/experimental_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aed93fc50be6e5edf70e8c51b45c0370fbb01e15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/experimental_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/gm_transformation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/gm_transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27aae8c7cd471436cf40a02d2565bbc7f5555cbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/gm_transformation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_optimization.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_optimization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8440de718516b6510e68d26de8c365bfb6e22ac1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_optimization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ce919300761226f1eaab1728aa30eea4b33afda Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/graph_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/iter_graph_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/iter_graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e0237ce54eed560a2d117dd6850410f2f0d709 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/iter_graph_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/log_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/log_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf78ef59857c16db46b955ee7fcf1d56dc08154c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/log_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/parallel_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/parallel_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec0a1e84f981c1589dfa1bc3e07e856888df25c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/parallel_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/partial_lower.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/partial_lower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9887a185ff3921e4bf7aa41909ac1e9181b8962 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/__pycache__/partial_lower.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/api.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0ebd58d17a89c73634c418ac3f0ac0d27b0711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/api.py @@ -0,0 +1,575 @@ +from abc import ABC, abstractmethod +from contextlib import contextmanager, nullcontext +from copy import copy +from dataclasses import dataclass +from functools import partial, wraps +from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Union + +from functorch import make_fx + +import torch +import torch.distributed as dist + +# We need to import _functional_collectives to trigger op registration +import torch.distributed._functional_collectives +import torch.nn as nn +import torch.utils._pytree as pytree + +from torch import fx +from torch._decomp.decompositions import native_layer_norm_backward + +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.distributed._spmd.data_parallel import gradients_tagging +from torch.distributed._spmd.parallel_mode import ( + DataParallel, + DTensorExpandMode, + ParallelMode, +) +from torch.distributed._tensor import Placement +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo, CodeGen +from torch.nn.utils import stateless +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + + +class Override(ABC): + r"""Override the tracing and transformation behavior of :meth:`~torch.distributed._spmd.compile`. + + This is useful when any part of the model is not traceable or if you prefer + to not trace it due to any reason. More specifically, users can implement + :meth:`torch.distributed._spmd.Override.replacement` to replace an original + submodule with the return new submodule. The new submodule contains + operations that users preferred to be traced, which simply be a dummy + placeholder operator. After tracing, users can implement + :meth:`torch.distributed._spmd.Override.transform` to transform the traced + graph, where the dummy placeholder operator serves as an anchor to insert + new sub-graphs. + """ + + @abstractmethod + def replacement(self, fqn: str, orig_submodule: torch.nn.Module) -> torch.nn.Module: + r"""Implement this method to return a new :class:`nn.Module` instance to replace the ``orig_submodule`` + argument in the model. + + This helps if ``orig_submodule`` is not traceable or should not be traced. + + Args: + fqn (str): fully quantified name of the submodule. + orig_submodule (class:`nn.Module`): original submodule instance to replace. + + Returns: + A new :class:`nn.Module` instance to replace the original one. + + """ + pass + + @abstractmethod + def transform( + self, + gm: fx.GraphModule, + flat_state: List[torch.Tensor], + ) -> fx.GraphModule: + r""" + Given a DTensor-expanded graph and sharding schema for every node, + conduct additional transformation for the sub-graph from the :class:`nn.Module` + returned by :meth:`torch.distributed._spmd.Override.replacement` if + necessary. + + Args: + gm (:class:`fx.Graph`): a DTensor-expanded graph. + flat_state (List[str, :class:`Tensor`]): a reference to the list of + flattened state. The elements in ``flat_state`` map to the first + ``len(flat_state)`` placeholders in the graph. The transformation + can add state to or remove state from ``flat_state`` as long as + it keeps ``flat_state`` and the placeholders consistent. + + Returns: + The :class:`fx.Graph` after transformation. + + """ + pass + + +class _PyTreeCodeGenOutputsOnly(_PyTreeCodeGen): + # pyre-ignore[3] + def process_inputs(self, *args: Any) -> Any: + return args + + # pyre-ignore[2, 3] + def gen_fn_def(self, free_vars, maybe_return_annotation): + return CodeGen.gen_fn_def(self, free_vars, maybe_return_annotation) + + +def _to_caller_flattened_graph_module(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """Move the responsibility of flattening the input arguments from the graph module to the caller. + + Example: + + output = gm(my_struct) + + gm = gm(to_caller_flattened_graph_module) + + output = gm(*pytree.flatten(my_struct)[0]) + + """ + # pyre-ignore[16] + gm._graph._codegen = _PyTreeCodeGenOutputsOnly( + pytree_info=_PyTreeInfo( + # pyre-ignore[6] + orig_args=None, # type: ignore[arg-type] + # pyre-ignore[6] + in_spec=None, # type: ignore[arg-type] + # pyre-ignore[16] + out_spec=gm._graph._codegen.pytree_info.out_spec, + ) + ) + gm.recompile() + return gm + + +# Use a dtensor expand mode for now to preserve the old behavior +# and avoid breaking existing code +dtensor_expand_mode = DTensorExpandMode() + + +def _override_placements(t: torch.Tensor, placements: List[Placement]): + global dtensor_expand_mode + dtensor_expand_mode._placements_override[id(t)] = placements + + +@contextmanager +def _rematerialize_optimizer( + opt: torch.optim.Optimizer, + named_states: Dict[str, Any], + params: Dict[str, nn.Parameter], +): + assert opt is not None + + # update opt.state with proxy tensors + orig_states = copy(opt.state) + for n in named_states: + # opt.state's key type is string, but optimizer uses Parameter as keys + opt.state[params[n]] = named_states[n] # type: ignore[index] + + # FIXME: support multiple parameter groups + param_group = opt.param_groups[0] + orig_params = param_group["params"] + param_group["params"] = params.values() + + try: + yield + finally: + param_group["params"] = orig_params + opt.state = orig_states + + +aten = torch.ops.aten # pyre-ignore + + +@contextmanager +def _enable_compile(): + # The return value of torch._utils.is_compiling changes optimizer behavior. + # We need that function to return True to include optimizer in the graph. + # See: https://github.com/pytorch/pytorch/blob/a524123c91ab399c9dd6882c1189596dd77e7734/torch/optim/optimizer.py#L41 + def f_true(): + return True + + orig_is_compiling_code = torch._utils.is_compiling.__code__ + torch._utils.is_compiling.__code__ = f_true.__code__ + try: + yield + finally: + torch._utils.is_compiling.__code__ = orig_is_compiling_code + + +def _foreach_add_decomp(self, other, alpha=1): + self_updated = aten._foreach_add.List(self, other, alpha=alpha) + for s, s_u in zip(self, self_updated): + s.copy_(s_u) + + +def _foreach_unaop_decomp(op, self): + self_updated = op(self) + for s, s_u in zip(self, self_updated): + s.copy_(s_u) + + +def _foreach_binop_list_decomp(op, self, other): + self_updated = op(self, other) + for s, s_u in zip(self, self_updated): + s.copy_(s_u) + + +def _foreach_binop_scalar_decomp(op, self, scalar=1): + self_updated = op(self, scalar) + for s, s_u in zip(self, self_updated): + s.copy_(s_u) + + +def _foreach_addcop_scalar_decomp(op, self, tensor1, tensor2, scalar=1): + self_updated = op(self, tensor1, tensor2, scalar) + for s, s_u in zip(self, self_updated): + s.copy_(s_u) + + +def _fused_adam_decomp( + self, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + *, + lr=1, + beta1=1, + beta2=1, + weight_decay=1, + eps=1, + amsgrad=True, + maximize=True, + grad_scale=None, + found_inf=None, +): + orig_tuple = (self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs) + updated_tuple = aten._fused_adam.default( + self, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + amsgrad=amsgrad, + maximize=maximize, + grad_scale=grad_scale, + found_inf=found_inf, + ) + + for idx, (orig, updated) in enumerate(zip(orig_tuple, updated_tuple)): + if idx == 1: + # skip gradient copying as we don't need to copy gradients back + continue + for o, u in zip(orig, updated): + o.copy_(u) + + +SPMD_DECOMP_TABLE = { + aten._foreach_add_.List: _foreach_add_decomp, + aten._foreach_add_.Scalar: partial( + _foreach_binop_scalar_decomp, aten._foreach_add.Scalar + ), + aten._foreach_addcdiv_.Scalar: partial( + _foreach_addcop_scalar_decomp, aten._foreach_addcdiv.Scalar + ), + aten._foreach_addcmul_.Scalar: partial( + _foreach_addcop_scalar_decomp, aten._foreach_addcmul.Scalar + ), + aten._foreach_div_.List: partial( + _foreach_binop_list_decomp, aten._foreach_div.List + ), + aten._foreach_mul_.Scalar: partial( + _foreach_binop_scalar_decomp, aten._foreach_mul.Scalar + ), + aten._foreach_div_.Scalar: partial( + _foreach_binop_scalar_decomp, aten._foreach_div.Scalar + ), + aten._foreach_neg_.default: partial( + _foreach_unaop_decomp, aten._foreach_neg.default + ), + aten._foreach_reciprocal_.default: partial( + _foreach_unaop_decomp, aten._foreach_reciprocal.default + ), + aten._foreach_sqrt_.default: partial( + _foreach_unaop_decomp, aten._foreach_sqrt.default + ), + aten._foreach_sub_.Scalar: partial( + _foreach_binop_scalar_decomp, aten._foreach_sub.Scalar + ), + aten._fused_adam_.default: _fused_adam_decomp, + aten.native_layer_norm_backward.default: native_layer_norm_backward, +} + + +DEDUP_TARGETS: Set[torch._ops.OpOverload] = { + torch.ops.c10d_functional.all_reduce.default, + torch.ops.c10d_functional.wait_tensor.default, +} + + +def _dedup_collectives(gm: fx.GraphModule) -> fx.GraphModule: + args_to_node: Dict[Tuple[Any, ...], fx.Node] = {} + + for node in gm.graph.nodes: + # replace all args with the results from the first unique comm op + args = pytree.arg_tree_leaves(*node.args) + + if node.target in DEDUP_TARGETS: + args_key = (node.target, *args) + unique_node = args_to_node.get(args_key, None) + if unique_node is None: + # first time seeing this combination, remember it + args_to_node[args_key] = node + else: + # the current node is a duplicate, replace it + node.replace_all_uses_with(unique_node) + gm.graph.erase_node(node) + + gm.recompile() + + return gm + + +@dataclass +class _CompiledResult: + gm: fx.GraphModule + mod: nn.Module + opt: Optional[torch.optim.Optimizer] + flat_state: List[torch.Tensor] + + +def _compile( + func: Callable, + module_override: Optional[List[Override]], + parallel_mode: ParallelMode, + *args: Any, + **kwargs: Any, +) -> _CompiledResult: + # 1. Extract nn.Module and Optimizer from args and kwargs + # FIXME(@mrshenli): support multiple nn.Module instances + # FIXME(@mrshenli): support multiple Optiimzer instances + # FIXME(@mrshenli): need to broadcast model to sync parameters + mod, opt = None, None + for arg in pytree.arg_tree_leaves(*args, **kwargs): + if isinstance(arg, nn.Module): + assert mod is None, "Only support single nn.Module for now" + mod = arg + if isinstance(arg, torch.optim.Optimizer): + assert opt is None, "Only support single Optimizer for now" + opt = arg + + assert mod is not None, "Couldn't find nn.Module instances from the arguments." + + # 2. Override target submodules (e.g., MoE) with dummy replacements + if module_override: + accessor = NamedMemberAccessor(mod) + + def swap(fqn_prefix: str, module: torch.nn.Module) -> None: + for override in module_override: # type: ignore[union-attr] + for name, child in module.named_children(): + if len(name) == 0: + continue + fqn = fqn_prefix + "." + name if fqn_prefix != "" else name + new_child = override.replacement(fqn, child) + if id(new_child) == id(child): + swap(fqn, new_child) + else: + accessor.swap_submodule(fqn, new_child) + + swap("", mod) + + # 3. Trace statelss version of the train_step + params = dict(mod.named_parameters(remove_duplicate=False)) + buffers = dict(mod.named_buffers(remove_duplicate=False)) + + named_states = {} + if opt is not None: + # Pass named_states instead of opt.state to stateless_func, because + # the later uses nn.Parameter as key. During tracing, we need to + # make sure optimizers can find the states using proxy tensors. + for n, p in params.items(): + if p in opt.state: + # opt.state's key type is string, but optimizer uses + # Parameter as keys + named_states[n] = opt.state[p] # type: ignore[index] + + is_data_parallel_mode = isinstance(parallel_mode, DataParallel) + + # Lift states and parameters as function arguments so that make_fx + # can trace operations applied to them. + def stateless_func(func, params, buffers, named_states, args, kwargs): + with stateless._reparametrize_module( + mod, {**params, **buffers} + ), _rematerialize_optimizer( + opt, named_states, params + ) if opt else nullcontext(): + # For DataParallel mode, install hooks first to tag the gradients + with gradients_tagging(params) if is_data_parallel_mode else nullcontext(): + ret = func(*args, **kwargs) + + # make sure updated parameters are returned + return ret, list(mod.parameters()), list(named_states.values()) # type: ignore[union-attr] + + # FIXME: Using symbolic tracing to work around in DTensor expand mode. + # Otherwise it hits shape mismatch error, as we use local inputs to + # trace local graph and use DTensor to expand operators, where + # DTensor's shape is the global shape. + tracing_mode = "fake" if is_data_parallel_mode else "symbolic" + + if is_data_parallel_mode: + fake_mode = FakeTensorMode() + data_parallel_mode = cast(DataParallel, parallel_mode) + + def _get_full_batch_arg(arg: torch.Tensor) -> torch.Tensor: + # since compilation happens in the first iteration and we + # receives mini-batch input, convert them to full batch + # fake tensor input first for data parallel sharding + # propagations + fake_arg = fake_mode.from_tensor(arg) + arg_dims = [1] * arg.ndim + # expand the tensor to full batch size on its batch dim + arg_dims[data_parallel_mode.input_batch_dim] *= dist.get_world_size() + return fake_arg.repeat(arg_dims) + + args = pytree.tree_map_only( + torch.Tensor, + _get_full_batch_arg, + args, + ) + kwargs = pytree.tree_map_only( + torch.Tensor, + _get_full_batch_arg, + kwargs, + ) + + with _enable_compile(), torch.autograd.detect_anomaly(check_nan=False): + # FIXME(@mrshenli): functionalization does not work for our use + # case yet. Use explicit decompositions for foreach ops. + # Remove this when the following issue is addressed. + # Issue: https://github.com/pytorch/pytorch/issues/97852 + gm = make_fx( + partial(stateless_func, func), + tracing_mode=tracing_mode, + decomposition_table=SPMD_DECOMP_TABLE, + _allow_non_fake_inputs=False, + )(params, buffers, named_states, args, kwargs) + + params_and_buffers: Dict[str, Union[torch.Tensor, nn.Parameter]] = { + **params, + **buffers, + } + + # 4. parallel mode to expand a single device graph to a distributed graph + gm = parallel_mode.partition( + gm, + mod, + opt, + params_and_buffers, + named_states, + args, + kwargs, + ) + + # 5. Move the responsibility of flattening the input arguments from the + # graph module to the caller. This serves two purposes: + # - Transformations that add/remove state need to manipulate a state + # container that maintains the state tensors in the same order as they + # appear in graph placeholders. + # - Reduced runtime cost. The state container is only flattened once upfront. + flat_state = pytree.tree_leaves([params_and_buffers, named_states]) + gm = _to_caller_flattened_graph_module(gm) + + # 6. dedup comm operators. + # The duplication could come from DTensor args and kwargs redistribution. + # Suppose one operator produces a Partial gradient tensor and model + # parameters are replicated. In this case, every optimizer operation using + # that Partial gradient tensor would trigger an allreduce. This is becuase + # DTensor only has local information on individual tensor/operator, which is + # not sufficient to detect duplications in the graph. This situation can + # also happen when inserting FSDP allgather if a parameter is used multiple + # times in the forward method. + # TODO(@mrshenli): @yifuwang has a suggestion of conducting expansion and + # dedup at tracer-level to avoid multiple graph passes. + gm = _dedup_collectives(gm) + + # 7. Replace previously inserted dummy ones with real graphs. + if module_override: + for override in module_override: + gm = override.transform(gm, flat_state) + + return _CompiledResult(gm, mod, opt, flat_state) + + +# Note that the Python convention of __dict__ requires the key to be str. +# TODO: ensure the key is unique. +COMPILED_OBJECT_KEY = "_compiled_obj" + + +def compile( + module_override: Optional[List[Override]] = None, + gm_transformation: Optional[Callable[[fx.GraphModule], fx.GraphModule]] = None, + parallel_mode: Optional[ParallelMode] = None, +): + r"""Compile and optimize a callable, which can be a train step within a training loop. + + This method will extract :class:`nn.Module` and :class:`torch.optim.Optimizer` + instances from the input arguments and trace operations applied to their + parameters and states. + + Args: + module_override (Optional[List[Override]]): a list of Override instances + that will be applied to the module in order. The :class:`Override` + objects provide :class:`nn.Module` replacements during tracing and a + graph transformation function after tracing. (Default: ``None``) + gm_transformation (Optional[Callable[fx.GraphModule, fx.GraphModule]]): + a callback that will be called after the original callable is + compiled and distributed (usually after the first iteration) to + transform the compiled GraphModule into a new optimized one. + parallel_mode (Optional[ParallelMode]): a :class:`ParallelMode` object + that specifies how to parallelize the callable. Each ParallelMode + would have its own strategy to partition the model and the captured + graph (Default: ``None``) + + """ + + def inner(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_train_step = kwargs.pop("last_train_step", False) if kwargs else False + first_iter = False + # Put the COMPILED_OBJECT_KEY in ``wrapper`` instead of ``func`` as + # ``wrapper`` is the one that users will get. + compiled_obj = wrapper.__dict__.get(COMPILED_OBJECT_KEY, None) + if compiled_obj is None: + first_iter = True + global dtensor_expand_mode + mode: ParallelMode = ( + dtensor_expand_mode if parallel_mode is None else parallel_mode + ) + + compiled_obj = _compile(func, module_override, mode, *args, **kwargs) + wrapper.__dict__[COMPILED_OBJECT_KEY] = compiled_obj + + flat_inps = compiled_obj.flat_state + pytree.arg_tree_leaves( + *args, **kwargs + ) + + with torch.no_grad(): + # N.B.: we don't need autograd as backward has already been + # captured in the graph. + if first_iter and gm_transformation: + # TODO: SPMD should provid a default and configurable + # transformation. + compiled_obj.gm = gm_transformation(compiled_obj.gm) + if not last_train_step: + output = compiled_obj.gm(*flat_inps)[0] + else: + # This is the last train step. Call IterGraphModule.forward() + # with the `last_iter` argument and catch the exception in + # case the compiled_obj is not wrapped with IterGraphModule. + try: + output = compiled_obj.gm(*flat_inps, last_iter=last_train_step)[ + 0 + ] + except TypeError as e: + if "last_iter" not in str(e): + raise e + output = compiled_obj.gm(*flat_inps)[0] + + return output + + return wrapper + + return inner diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/batch_dim_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/batch_dim_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..afb9dd2e7d3b4d1dc1349081aac9627e92a38dee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/batch_dim_utils.py @@ -0,0 +1,179 @@ +from typing import Callable, Dict, List, Set + +import torch + +import torch.fx as fx + +import torch.utils._pytree as pytree + +from torch import Tensor + +from torch.distributed._tensor import DeviceMesh, Replicate, Shard +from torch.distributed._tensor.ops.view_ops import ( + DimSpec, + InputDim, + ops as view_op_rules, +) +from torch.distributed._tensor.placement_types import _Partial, DTensorSpec + +aten = torch.ops.aten + + +class BatchDimAnalyzer: + """This class is used to analyze the batch dimension of each tensor/node in the graph. + + We need to know the batch dimension of each tensor/node so that we know + exactly the sharding layout of intermediate tensors. + + We possibly should evaluate using symbolic shapes to track the batch dimension. + We can experiment it later with dynamo integration (as dynamo have mark_dynamic + API which allows marking batch dimension only) or try to use FakeTensorMode to + mark the batch dimension. For now, let's just use the batch dimension of the first + input tensor as the hint to track the batch dimension of all tensors/nodes in + the graph. + """ + + def __init__(self, batch_dim: int = 0) -> None: + self.batch_dim = batch_dim + + self.batch_dim_map: Dict[fx.Node, int] = {} + # batch dim size is used to track the batch dim size of the input tensor + self.batch_dim_size = -1 + + self.dim_rule_map: Dict[torch._ops.OpOverload, Callable[..., torch.Tensor]] = { + aten.squeeze.default: torch.squeeze, + aten.squeeze.dim: torch.squeeze, + aten.view.default: Tensor.view, + aten.reshape.default: torch.reshape, + aten._unsafe_view.default: Tensor.view, + aten.unsqueeze.default: torch.unsqueeze, + aten.expand.default: Tensor.expand, + aten.permute.default: torch.permute, + aten.repeat.default: Tensor.repeat, + aten.transpose.int: torch.transpose, + } + + def init_batch_dim_size(self, batch_dim_size: int) -> None: + """Initialize batch dim size base on the first input batch size.""" + if self.batch_dim_size != -1 and self.batch_dim_size != batch_dim_size: + raise RuntimeError( + f"batch dim size is already initialized! " + f"Found new batch size: {batch_dim_size} not " + f"matching existing batch dim size: {self.batch_dim_size}!" + ) + self.batch_dim_size = batch_dim_size + + def set_batch_dim(self, node: fx.Node, batch_dim: int) -> None: + self.batch_dim_map[node] = batch_dim + + def get_batch_dim(self, node: fx.Node) -> int: + if node not in self.batch_dim_map: + raise RuntimeError(f"batch dim analysis failed on node: {node}!") + return self.batch_dim_map[node] + + def compute_batch_dim(self, node: fx.Node, full_reduction=False) -> int: + """Compute the batch dimension for the `node`.""" + assert self.batch_dim_size != -1, "batch dim size is not initialized!" + + if node in self.batch_dim_map: + # if batch dim already computed, simply return it + return self.batch_dim_map[node] + + if node.target in self.dim_rule_map: + view_op_rule = view_op_rules[self.dim_rule_map[node.target]] # type: ignore[index] + args_val = pytree.tree_map_only(fx.Node, lambda n: n.meta["val"], node.args) + kwargs_val = pytree.tree_map_only( + fx.Node, lambda n: n.meta["val"], node.kwargs + ) + output_dim_rules = view_op_rule.dim_map(*args_val, **kwargs_val) + + def collect_input_dim(cmd: DimSpec, input_dims: Set[int]): + if isinstance(cmd, InputDim): + input_dims.add(cmd.input_dim) + for inp in cmd.inputs(): + collect_input_dim(inp, input_dims) + + output_dim_to_input_dims: List[Set[int]] = [] + for inp in output_dim_rules: + input_dims: Set[int] = set() + collect_input_dim(inp, input_dims=input_dims) + output_dim_to_input_dims.append(input_dims) + + operand = node.all_input_nodes[0] + operand_batch_dim = self.get_batch_dim(operand) + for output_dim, input_dims in enumerate(output_dim_to_input_dims): + if operand_batch_dim in input_dims: + self.set_batch_dim(node, output_dim) + # update batch dim size before return + # this is because batch dim size might change during the middle + self.batch_dim_size = node.meta["val"].shape[output_dim] + return output_dim + + # if there's no hints from the output_dim_rules, we infer from output + # shape to see if there's batch dim, and shard correspondingly + node_val = node.meta["val"] + if isinstance(node_val, (list, tuple)): + shapes = [val.shape for val in node_val] + else: + shapes = [node_val.shape] + + # for reduction op that reduces over the sharded batch dim + # we don't generate partial, but rather, we generate shard + # This is because the intention of data parallel is to never + # do full reduction across batch dimension, it would still + # keep the reduction activation as sharded. + full_reduction = False + # loop through the dim size to find the output batch dim + for shape in shapes: + if len(shape) == 0: + full_reduction = True + + for i, dim_size in enumerate(shape): + if dim_size == self.batch_dim_size: + self.set_batch_dim(node, i) + return i + + operands = node.all_input_nodes + if not operands: + # if there's no operands, it must be factory ops and it's a tensor + # generated for computation and should be marked as replicated + self.set_batch_dim(node, -1) + # -1 means replicated + return -1 + else: + # if there's operand we see the operand have batch dim, if operand + # have batch dim but output does not, it's either a full reduction, + # where we should stay sharded, or it's a reduction on batch dim only + # where we should produce partial + operand_batch_dim = -1 + for operand in operands: + if operand in self.batch_dim_map: + operand_batch_dim = self.get_batch_dim(operand) + # self.get_batch_dim(operands[0]) + if operand_batch_dim < 0: + # if operand does not have batch dim, we also don't have batch dim + self.set_batch_dim(node, operand_batch_dim) + return operand_batch_dim + elif full_reduction: + self.set_batch_dim(node, operand_batch_dim) + return operand_batch_dim + else: + # if operand have batch dim but output does not, it should + # produce partial, we use -2 to indicate partial + self.set_batch_dim(node, -2) + return -2 + + def compute_act_spec(self, node: fx.Node, mesh: DeviceMesh) -> DTensorSpec: + """Compute the batch dimension for the current node, then generate the sharding spec that shards on the batch dimension.""" + node_batch_dim = self.compute_batch_dim(node) + if node_batch_dim == -1: + # indicate this activation is replicated + act_spec = DTensorSpec(mesh=mesh, placements=(Replicate(),)) + elif node_batch_dim == -2: + # indicate this activation is partial + act_spec = DTensorSpec(mesh=mesh, placements=(_Partial(),)) + else: + # indicate this activation is Shard + act_spec = DTensorSpec(mesh=mesh, placements=(Shard(node_batch_dim),)) + + return act_spec diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/comm_tensor.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/comm_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..292f5b2508612fdebda008e42e5260b97f1e2c07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/comm_tensor.py @@ -0,0 +1,247 @@ +from dataclasses import dataclass +from functools import partial +from typing import Any, List, Optional, Tuple + +import torch +from torch._C import _disabled_torch_function_impl +from torch.fx.experimental.proxy_tensor import ( + _ProxyTensor, + fetch_object_proxy, + get_innermost_proxy_mode, + get_proxy_slot, + set_proxy_slot, + track_tensor_tree, +) +from torch.utils import _pytree as pytree +from torch.utils._mode_utils import no_dispatch +from torch.utils._pytree import tree_flatten, tree_map, tree_map_only + + +@dataclass +class _CommResult: + # a custom type wrapping both inplace output tensor and work handle + _tensor: torch.Tensor + _work: torch.distributed._Work + + +def _wait_comm(comm_result: _CommResult): + # This function is only used by tracing mode as a call_function node right + # before consuming a collective result tensor. + comm_result._work.wait() + return comm_result._tensor + + +def _wrap_comm_result(result: Tuple[Any, Any]) -> Tuple[Any, Any]: + def wrap(work, e): + assert isinstance(e, torch.Tensor), ( + "Excepting collection of tensors as the first element in the " + "return value of communication operations." + ) + + return _CommResult(e, work) + + # E.g., + # allreduce_ returns ([tensor], work) + # allgather_ returns ([[tensor1, tensor2]], work) + work = result[1] + return (tree_map(partial(wrap, work), result[0]), work) + + +def _get_tracer() -> Optional[torch.fx.Tracer]: + mode = get_innermost_proxy_mode() + if mode is None: + return None + return mode.tracer + + +class CommTensor(torch.Tensor): + r""" + A Tensor subclass to wrap input tensors for collective communications. + + This Tensor subclass works for both eager and tracing mode. + In eager mode, it will record whether the inplace collective communication + has been launched using this Tensor and remember the corresponding work + handle. If yes, it will explicitly call wait() in the ``__torch_dispatch__`` + function before subsequent operations consuming the value of the Tensor. + + In tracing mode, ``CommTensor`` inserts two node into the graph using the + ``__torch_dispatch__`` function. + 1. The first node is inserted right after the + communication, wrapping both the inplace output tensor and the returned + work handle into a custom ``_CommResult`` type. We have to do this because + ``ProxyTorchDispatchMode`` only handles ``torch.Tensor``, ``_ProxyTensor``, + and ``torch.nn.Parameter`` objects and will treat the work handle + as a constant and embed that into the graph. As a result, during execution, + it will use the work handle created during tracing and will lead to wrong + result. The solution in this test is to manually create a proxy on the + return value of ``allreduce_`` which is ``([tensor], work)``, and wrap that + to ``[(_CommResult(tensor, work)), work]``. In this way, subsequent nodes can + directly consume ``_CommResult``. + 2. The second node is inserted right before any subsequent node reads from + ``_CommResult``. It will call ``wait()`` on the stashed work handle to ensure + that computation waits for communication. + """ + + _supported_comms: List[str] = [ + "_allgather_base_", + "_reduce_scatter_base_", + "allreduce_", + "allgather_", + "alltoall_", + "broadcast_", + "reduce_scatter_", + "scatter_", + ] + + _tensor: torch.Tensor + _work: Optional[torch.distributed._Work] + + @staticmethod + def __new__(cls, tensor: torch.Tensor): + t = tensor._tensor if isinstance(tensor, CommTensor) else tensor + if get_innermost_proxy_mode() is None: + # noop for eager mode + return tensor + + # Use non-CommTensor to avoid nested CommTensor Wrapping + r = torch.Tensor._make_subclass(cls, t, require_grad=t.requires_grad) + # The tensor object wrapped by this CommTensor + # NB: THIS CAN BE A CommTensor; see test_nested_comm_tensor_wrapping + r._tensor = tensor # type: ignore[attr-defined] + # Record the LAST `work` object returned by collective communication + # operations. If this is None, it means no collectives have called + # since last time a tensor is wrapped by CommTensor + r._work = None # type: ignore[attr-defined] + return r + + def __repr__(self): + return f"CommTensor({self._tensor}, work={self._work})" + + # disable __torch_function__ so that CommTensor can recursively dispatch + # with ProxyTorchDispatchMode in make_fx + __torch_function__ = _disabled_torch_function_impl + + @classmethod + def _is_supported(cls, op_name): + return any(comm in op_name for comm in cls._supported_comms) + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + # shared states when unwrapping args + tracer: Optional[torch.fx.Tracer] = None + work: Optional[torch.distributed._Work] = None + + # wrapped ._tensor if this is a CommTensor, and insert/call wait() + # if communication has been launched on this tensor. + def unwrap(e: Any): + if isinstance(e, CommTensor): + nonlocal tracer, work + + work = e._work + # TODO(ezyang): I don't really understand what's going on + # here, but it seems that tracer doesn't reflect whether or + # not there is ambient tracing going on, but rather, whether + # or not we will trace THIS particular invocation. If we + # have a nested CommTensor, the outer layer doesn't actually + # trace and we only trace the inner layer + if not isinstance(e._tensor, CommTensor): + tracer = _get_tracer() + + if work is not None: + if tracer is not None: + # insert a node to the traced graph. + proxy_res = tracer.create_proxy( # type: ignore[union-attr] + "call_function", + _wait_comm, + (get_proxy_slot(e._tensor, tracer).proxy,), + {}, + name="wait_comm", + ) + # HACK: update the proxy for the inplace output + set_proxy_slot(e._tensor, tracer, proxy_res) + # For eager mode, simply wait. + # During tracing, still need to wait here, to make sure the + # execution during tracing is correct. + work.wait() + + # communication has been waited, stop propagating CommTensor + return e._tensor + else: + return e + + def wrap(e: Any): + return CommTensor(e) if isinstance(e, torch.Tensor) else e + + def set_work(work: torch.distributed._Work, e: Any): + if isinstance(e, CommTensor): + e._work = work # type: ignore[attr-defined] + elif isinstance(e, torch.Tensor): + raise RuntimeError( + "Type of output tensors from collective communication during " + "tracing should always be CommTensor instead of torch.Tensor" + ) + return e + + unwrapped_args = tree_map(unwrap, args) + unwrapped_kwargs = tree_map(unwrap, kwargs) + + if cls._is_supported(func.__name__): + if tracer is not None: + # in tracing mode, get proxies for args + proxy_args, proxy_kwargs = tree_map_only( + _ProxyTensor, + lambda e: e.proxy, + tree_map_only( + torch.Tensor, + fetch_object_proxy(tracer), + (unwrapped_args, unwrapped_kwargs), + ), + ) + + # get proxy for output tuple + proxy_res = func(*proxy_args, **proxy_kwargs) + assert isinstance(proxy_res, torch.fx.Proxy) + # insert a node that wraps the output tuple into + # _CommResult(tensor, work) + comm_result_proxy = tracer.create_proxy( # type: ignore[union-attr] + "call_function", + _wrap_comm_result, + (proxy_res,), + {}, + name="comm_result", + ) + + with no_dispatch(): + # disable dispatch to avoid trigger ProxyTorchDispatchMode logic + out = func(*unwrapped_args, **unwrapped_kwargs) + + # wrap output with the proxy of _CommResult, so that subsequent + # ops and link to it. + track_tensor_tree(out, comm_result_proxy, constant=None, tracer=tracer) + + # N.B.: we still need to remember the work handle here, and wait + # for it later to make sure the execution during tracing is + # correct. Also, remember comm is already launched + # args[0] is always the collection of output tensors + pytree.tree_map_(partial(set_work, out[1]), args[0]) + + # HACK: update the proxy on the input argument as this is an + # inplace collective communication. + flat_args, args_spec = tree_flatten(unwrapped_args[0]) + flat_out, out_spec = tree_flatten(out[0]) + for a, o in zip(flat_args, flat_out): + set_proxy_slot(a, tracer, get_proxy_slot(o, tracer)) + + return out + else: + # in eager mode, simply remember work handle as an attribute + out = func(*unwrapped_args, **unwrapped_kwargs) + pytree.tree_map_(partial(set_work, out[1]), args[0]) + return out + else: + if work is not None: + return func(*unwrapped_args, **unwrapped_kwargs) + else: + # we need to propagate CommTensor wrapping until the first + # subsequent operation has waited for it. + return tree_map(wrap, func(*unwrapped_args, **unwrapped_kwargs)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/config.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/config.py new file mode 100644 index 0000000000000000000000000000000000000000..54f0cc4dc5c8be65ecc7ca3f5f5a78dfce4b5a14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/config.py @@ -0,0 +1,27 @@ +import logging +import sys +from types import ModuleType +from typing import Set + +# log level (levels print what it says + all levels listed below it) +# DEBUG print full traces <-- lowest level + print tracing of every instruction +# INFO print compiler functions + distributed graphs +# WARN print warnings +# ERROR print exceptions +log_level: int = logging.DEBUG +# Verbose will print full stack traces on warnings and errors +verbose = False + +# the name of a file to write the logs to +log_file_name: None = None + + +class _AccessLimitingConfig(ModuleType): + def __setattr__(self, name, value) -> None: + if name not in _allowed_config_names: + raise AttributeError(f"{__name__}.{name} does not exist") + return object.__setattr__(self, name, value) + + +_allowed_config_names: Set[str] = {*globals().keys()} +sys.modules[__name__].__class__ = _AccessLimitingConfig diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/data_parallel.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..80ad107b794f034ecea5b1def84abe30133e169f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/data_parallel.py @@ -0,0 +1,824 @@ +import operator +from contextlib import contextmanager +from enum import Enum + +from typing import Any, cast, Dict, List, Optional, Tuple + +import torch + +import torch.distributed.distributed_c10d as c10d +import torch.fx as fx +import torch.library +import torch.nn as nn + +import torch.utils._pytree as pytree + +from torch.distributed._spmd.batch_dim_utils import BatchDimAnalyzer +from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard + +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.op_schema import ( + OpStrategy, + PlacementStrategy, + StrategyType, + TupleStrategy, +) +from torch.distributed._tensor.placement_types import _Partial, DTensorSpec, Placement +from torch.distributed._tensor.redistribute import redistribute_local_tensor +from torch.fx import GraphModule +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + +aten = torch.ops.aten + +# Dummy op used by data parallel to tag gradients. +_spmd_lib_def = torch.library.Library("_spmd", "DEF") +_spmd_lib_def.define("tag_grad(Tensor self) -> Tensor") + +_spmd_lib_impl = torch.library.Library("_spmd", "IMPL") +_spmd_lib_impl.impl("tag_grad", lambda x: x, "CompositeExplicitAutograd") + + +class DataParallelStyle(Enum): + """This enum represents the style of the data-parallel operation. + + We have three types of Data Parallel style: + 1. DEFAULT: the default data parallel style, which is to represent a mixed + replicate and fully shard behavior. For each parameter that is able + to be sharded evenly, we shard it, otherwise we would replicate the + parameter. This style avoids potential padding if the parameters + cannot be sharded evenly, but it would generate a mixed of all_reduce + and reduce_scatter. + 2. REPLICATE: the data parallel style that replicates all model parameters. + This is similar to the behavior of DistributedDataParallel. + 3. FULLY_SHARD: the data parallel style that shards all model parameters. This + is similar to the behavior of FullyShardedDataParallel, the + difference is that FullyShardedDataParallel (ZERO-3), which + shards the model using FlatParameter based sharding, + while this style shards each parameter into DTensor. + """ + + DEFAULT = 0 + REPLICATE = 1 + FULLY_SHARD = 2 + + +class NodeType(Enum): + """NodeType is an enum that records the type of the tensors in the graph. + + This is used to determine the data parallel strategy. + """ + + PARAM = 0 + ACT = 1 + GRAD = 2 + STATE = 3 + NON_TENSOR = 4 # NON_TENSOR is to tag non tensor node (i.e. graph output) + + +class DataParallelStrategy(OpStrategy): + """DataParallelStrategy is a special case of OpStrategy that only records the "data parallel style" placement + strategy for each fx Node. + + It takes a list of PlacementStrategy, where each PlacementStrategy describes + one way to distribute the tensor and computation. In the DataParallel case, + there're two possible ways to distribute the parameters: + 1. replicate the parameter over a set of devices (DDP like behavior) + 2. shard the parameter on its tensor dimension 0 over a set of devices + (FSDP like behavior). + + In addition to the strategy list, we also need to: + 1. `node_type`: record the type of each node in the graph, so that we can + determine how to propagate in a data parallel fashion. + 2. `reduce_over_batch` is specifically tied to data parallel as the loss + calculation usually results in scalar tensor where it comes from a + reduction over the batch dimension. We need to know this information + so that we could keep the output as sharded. + """ + + def __init__( + self, + node_type: NodeType, + strategy_list: List[PlacementStrategy], + reduction_over_batch: bool = False, + ): + super().__init__(strategy_list) + self.node_type = node_type + self.reduction_over_batch = reduction_over_batch + + def __str__(self) -> str: + return f"type: {self.node_type}, {super().__str__()}" + + +@contextmanager +def gradients_tagging(params: Dict[str, torch.Tensor]): + """Tag the gradient of the parameters with a special tag, so that we can identify them during SPMD expansion. + + It's safe to trace those hooks and we would remove those nodes later. + """ + tagging_hooks = [] + try: + for p in params.values(): + h = p.register_hook(torch.ops._spmd.tag_grad) + tagging_hooks.append(h) + yield + finally: + # remove those hooks after tracing + for h in tagging_hooks: + h.remove() + + +def _gen_shard_strategy( + mesh: DeviceMesh, shard_dim: int, input_specs: Optional[List[DTensorSpec]] = None +) -> PlacementStrategy: + """Util function to generate a shard strategy on shard_dim.""" + return PlacementStrategy( + output_specs=DTensorSpec(mesh=mesh, placements=(Shard(shard_dim),)), + input_specs=input_specs, + ) + + +def _gen_replicate_strategy( + mesh: DeviceMesh, input_specs: Optional[List[DTensorSpec]] = None +) -> PlacementStrategy: + """Util function to generate a replicate strategy.""" + return PlacementStrategy( + output_specs=DTensorSpec(mesh=mesh, placements=(Replicate(),)), + input_specs=input_specs, + ) + + +def _gen_partial_strategy(mesh: DeviceMesh) -> PlacementStrategy: + """Util function to generate a partial strategy.""" + # NOTE: we use AVG by default, avg reduction is needed depending on + # the loss function, for most loss function it should do + # gradient averaging. There might be certain cases it should + # not do gradient averaging (i.e. sum) but it's pretty rare. + # TODO: Only NCCL supports AVG so using backend like Gloo would + # crash, we should figure out a way to support avg reduction + # for non-NCCL backend + reduce_op = c10d.ReduceOp.AVG # type: ignore[attr-defined] + return PlacementStrategy( + output_specs=DTensorSpec(mesh=mesh, placements=(_Partial(reduce_op),)), + ) + + +def build_data_parallel_strategies( + train_step_graph: GraphModule, + num_params: int, + num_states: int, + mesh: DeviceMesh, + batch_dim: int = 0, +) -> Dict[fx.Node, StrategyType]: + """Loop through the train step graph and build the data parallel strategy for each fx Node.""" + activation_idx = num_params + num_states + non_compute_ops = [ + aten.clone.default, + aten.detach.default, + aten.ones_like.default, + aten.reshape.default, + aten.t.default, + aten.view.default, + torch.ops._spmd.tag_grad.default, + operator.getitem, + ] + + tuple_strategy_ops = [aten._fused_adam.default] + + dp_strategy_map: Dict[fx.Node, StrategyType] = {} + batch_dim_analyzer = BatchDimAnalyzer(batch_dim) + placeholder_idx = 0 + num_param_grad = 0 + + # first we backward propagate to mark the param gradients sharding + # with tag_grad node helps and then delete the tag_grad nodes + for node in reversed(list(train_step_graph.graph.nodes)): + # find a param_grad node via the tagging + if node.target == torch.ops._spmd.tag_grad.default: + cur_node = node + while cur_node.target in non_compute_ops: + cur_node = cur_node.args[0] + partial_strategy = _gen_partial_strategy(mesh) + dp_strategy_map[cur_node] = DataParallelStrategy( + NodeType.GRAD, [partial_strategy] + ) + num_param_grad += 1 + # remove the tag_grad node from graph + node.replace_all_uses_with(node.args[0]) + train_step_graph.graph.erase_node(node) + + if num_param_grad == num_params: + # early break if we have already processed all param_grads + break + + # next we forward propagate to mark all the sharding + for node in train_step_graph.graph.nodes: + if node.op == "placeholder": + if "val" not in node.meta: + # NOTE: There're certain cases where the placeholder nodes do + # not have real tensor values: + # 1. optimizer states can be None sometimes, i.e. SGD with + # no momentum, optimizer states populate `momentum` state + # as None, the full graph we get from `compile` would have + # None as the placeholder value + # 2. function args might not only contain params or activations, + # but also contain other non-tensor inputs, i.e. the model + # and optimizer instances baked in as a placeholder, there might + # also be some scalar argument which is not a tensor + # + # For the above cases, we create a NON_TENSOR stratgy so that we + # know it's not a tensor and we don't need to shard it + dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, []) + + elif placeholder_idx < num_params: + # during compilation there's an assumption that the first num_params + # placeholders should be parameters + shard_strategy = _gen_shard_strategy(mesh, 0) + replica_strategy = _gen_replicate_strategy(mesh) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.PARAM, [replica_strategy, shard_strategy] + ) + + elif placeholder_idx < activation_idx: + # optimizer states follow the same strategy as + # the corresponding parameters + replica_strategy = _gen_replicate_strategy(mesh) + shard_strategy = _gen_shard_strategy(mesh, 0) + + dp_strategy_map[node] = DataParallelStrategy( + NodeType.STATE, [replica_strategy, shard_strategy] + ) + else: + activation_batch_dim_size = node.meta["val"].shape[batch_dim] + # find the first activation node and use its batch dim size + if batch_dim_analyzer.batch_dim_size == -1: + batch_dim_analyzer.init_batch_dim_size(activation_batch_dim_size) + + batch_dim_analyzer.set_batch_dim(node, batch_dim) + shard_strategy = _gen_shard_strategy(mesh, batch_dim) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.ACT, [shard_strategy] + ) + placeholder_idx += 1 + elif node.op == "call_function": + # Annotate node types for the computation graph + # Data Parallel node propagation logic: + # param (non-compute) -> out: param + # grad (non-compute before/after) -> out: grad + # state -> output: state + # + # param + activation (param must be replicate, act be sharded) -> out: activation + # param/state + grad (param/state/grad be the same spec) -> out: param/state + # param + state -> out: param + + if node.target in non_compute_ops: + # At this point, we should have removed all the `tag_grad` nodes in the graph + assert node.target != torch.ops._spmd.tag_grad.default + + input_nodes = node.all_input_nodes + assert ( + len(input_nodes) == 1 + ), f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}" + arg_strategy = dp_strategy_map[input_nodes[0]] + + if node.target == operator.getitem: + # for getitem call, just forward the strategy from the input + getitem_idx = node.args[1] + if isinstance(arg_strategy, TupleStrategy): + # for tuple strategy, we need to get the child strategy from the tuple + dp_strategy_map[node] = arg_strategy.childs[getitem_idx] + else: + # if it's not a tuple strategy, we just forward the arg strategy + dp_strategy_map[node] = arg_strategy + else: + assert isinstance(arg_strategy, DataParallelStrategy) + arg_node_type = arg_strategy.node_type + if arg_node_type == NodeType.PARAM: + replica_strategy = _gen_replicate_strategy(mesh) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.PARAM, [replica_strategy] + ) + elif arg_node_type == NodeType.GRAD: + partial_sig = _gen_partial_strategy(mesh) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.GRAD, [partial_sig] + ) + elif arg_node_type == NodeType.ACT: + arg_node_spec = batch_dim_analyzer.compute_act_spec( + input_nodes[0], mesh + ) + + output_spec = batch_dim_analyzer.compute_act_spec(node, mesh) + + shard_strategy = PlacementStrategy( + output_specs=output_spec, input_specs=[arg_node_spec] + ) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.ACT, [shard_strategy] + ) + else: + raise RuntimeError( + f"non compute op not supporting {arg_node_type}! " + ) + + # finished processing this non-compute node + continue + + # for computatation nodes, we need to check all the inputs + input_args = node.all_input_nodes + input_specs = [] + if node in dp_strategy_map: + # found a param_grad node that already have output pre-filled spec + # fill in the expected input specs for the pre-filled strategy + node_strategy = dp_strategy_map[node] + assert isinstance(node_strategy, DataParallelStrategy) + node_type = node_strategy.node_type + assert node_type == NodeType.GRAD + produce_param_grad_strat = node_strategy.strategies + has_activation = False + for arg in input_args: + arg_strategy = dp_strategy_map[arg] + assert isinstance(arg_strategy, DataParallelStrategy) + arg_node_type = arg_strategy.node_type + if arg_node_type == NodeType.ACT: + # activation sharded + has_activation = True + act_spec = batch_dim_analyzer.compute_act_spec(arg, mesh) + + input_specs.append(act_spec) + + if has_activation: + assert len(produce_param_grad_strat) == 1 + produce_param_grad_strat[0].input_specs = input_specs + elif node.target in tuple_strategy_ops: + # ops that need to build tuple strategy instead of normal strategy + # This should happen rarely and only needed when we need to generate + # different node strategy for multiple outputs (i.e. fused_adam op) + # TODO: Currently this specializes to fused optimizer ops, but we need + # to see how to generalize this strategy building logic + output_strategy_len = len(node.args) - 1 + tuple_strategies = [] + for i in range(output_strategy_len): + if not isinstance(node.args[i], list): + raise RuntimeError( + f"Expecting list as arg to build Tuple Strategy, but found type {type(node.args[i])}!" + ) + # for list/tuple arg, use the first one to find out the node type + if len(node.args[i]) > 0: + arg_strategy = dp_strategy_map[node.args[i][0]] + assert isinstance(arg_strategy, DataParallelStrategy) + assert arg_strategy.node_type in [ + NodeType.PARAM, + NodeType.GRAD, + NodeType.STATE, + ], "Expecting param/grad/state as arg to build Tuple Strategy!" + replica_strategy = _gen_replicate_strategy(mesh) + shard_strategy = _gen_shard_strategy(mesh, shard_dim=0) + out_node_strategy: StrategyType = DataParallelStrategy( + arg_strategy.node_type, [replica_strategy, shard_strategy] + ) + + tuple_strategies.append(out_node_strategy) + + output_tuple_strategy = TupleStrategy(tuple(tuple_strategies)) + dp_strategy_map[node] = output_tuple_strategy + else: + # NOTE: This is the common region for all regular computation ops + + input_node_types = [ + cast(DataParallelStrategy, dp_strategy_map[arg]).node_type + for arg in input_args + if isinstance(dp_strategy_map[arg], DataParallelStrategy) + ] + if NodeType.GRAD in input_node_types: + # param/state + grad, build up acceptable strategy + # the strategy should be the same for all the inputs/outputs + # TODO: optimizer parts should follow the dtensor prop logic + # to support more general cases that allows optimizer states + # to have different shardings compare to the params + replica_strategy = _gen_replicate_strategy(mesh) + shard_strategy = _gen_shard_strategy(mesh, shard_dim=0) + output_node_type = NodeType.PARAM + + non_grad_types = [t for t in input_node_types if t != NodeType.GRAD] + + output_node_type = non_grad_types[0] + for non_grad_type in non_grad_types: + assert ( + non_grad_type == output_node_type + ), f"Found more than one non grad types! Expect {output_node_type} but found {non_grad_type}!" + assert output_node_type in [ + NodeType.PARAM, + NodeType.STATE, + ], f"Expecting output node type to be either state or param, but found {output_node_type}!" + + dp_strategy_map[node] = DataParallelStrategy( + output_node_type, [replica_strategy, shard_strategy] + ) + elif NodeType.STATE in input_node_types: + # either param + state or state + state + replica_strategy = _gen_replicate_strategy(mesh) + shard_strategy = _gen_shard_strategy(mesh, shard_dim=0) + output_node_type = ( + NodeType.PARAM + if NodeType.PARAM in input_node_types + else NodeType.STATE + ) + + dp_strategy_map[node] = DataParallelStrategy( + output_node_type, [replica_strategy, shard_strategy] + ) + elif NodeType.PARAM in input_node_types: + if NodeType.ACT in input_node_types: + # param + activation, build up acceptable strategy + # param must be replicated, activation must be sharded + for arg in input_args: + arg_strategy = dp_strategy_map[arg] + assert isinstance(arg_strategy, DataParallelStrategy) + node_type = arg_strategy.node_type + if node_type == NodeType.ACT: + # compute activation spec + act_spec = batch_dim_analyzer.compute_act_spec( + arg, mesh + ) + + input_specs.append(act_spec) + elif node_type == NodeType.PARAM: + # param must be replicated + input_specs.append( + DTensorSpec(mesh=mesh, placements=(Replicate(),)) + ) + else: + raise RuntimeError( + f"Expecting node with parameter and activation, but found {input_node_types}! " + ) + # produce activation type sharding for output + output_spec = batch_dim_analyzer.compute_act_spec(node, mesh) + + act_strategy = PlacementStrategy( + output_specs=output_spec, input_specs=input_specs + ) + + dp_strategy_map[node] = DataParallelStrategy( + NodeType.ACT, [act_strategy] + ) + else: + # If inputs only have parameters, the + # strategy of this node should follow input + dp_strategy_map[node] = dp_strategy_map[input_args[0]] + else: + # If input nodes does not have PARAM/GRAD/STATE, then + # it should be a pure activation computation, it should + # produce activation output. + # Activations are usually sharded unless model creates + # new tensors during computation, which depend on whether + # the new tensor associate with a batch dim or not, it could + # be shard/replicate/partial, batch dim analyzer should tell + # us the correct sharding. + for arg in input_args: + arg_strategy = dp_strategy_map[arg] + assert isinstance(arg_strategy, DataParallelStrategy) + input_spec = batch_dim_analyzer.compute_act_spec(arg, mesh) + + input_specs.append(input_spec) + + act_spec = batch_dim_analyzer.compute_act_spec(node, mesh) + op_strategy = PlacementStrategy( + output_specs=act_spec, input_specs=input_specs + ) + dp_strategy_map[node] = DataParallelStrategy( + NodeType.ACT, [op_strategy] + ) + + elif node.op == "output": + dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, []) + else: + raise RuntimeError(f"op code {node.op} not supported") + + return dp_strategy_map # type: ignore[return-value] + + +def mark_data_parallel_shardings( + train_step_graph: GraphModule, + num_parameters: int, + num_states: int, + dp_strategy_map: Dict[fx.Node, StrategyType], + parallel_mode: DataParallelStyle = DataParallelStyle.FULLY_SHARD, +) -> None: + """Mark the sharding for the nodes in the train_step_graph.""" + activation_idx = num_parameters + num_states + placeholder_idx = 0 + for node in train_step_graph.graph.nodes: + node_strategy = dp_strategy_map[node] + if node.op == "placeholder": + assert isinstance(node_strategy, DataParallelStrategy) + node_type = node_strategy.node_type + node_strategies = node_strategy.strategies + if node_type == NodeType.NON_TENSOR: + # set node sharding to None + node_sharding = None + elif placeholder_idx < activation_idx: + assert len(node_strategies) > 0, "node_strategies should not be empty" + if parallel_mode == DataParallelStyle.REPLICATE: + # set to replicate for replicate style + node_sharding = node_strategies[0] + elif parallel_mode == DataParallelStyle.FULLY_SHARD: + # set to shard for fully shard style + if len(node_strategies) == 1: + # only one strategy, use that instead + # i.e. optimizer state steps can only be replicate + node_sharding = node_strategies[0] + else: + # use the full sharding strategy + node_sharding = node_strategies[1] + elif parallel_mode == DataParallelStyle.DEFAULT: + # TODO: add support for default mode + # default mode would generate either replicate or shard + raise NotImplementedError("default mode not implemented") + else: + assert len(node_strategies) > 0, "node_strategies should not be empty" + # mark activation as sharded on batch dim + node_sharding = node_strategies[0] + + node.meta["sharding"] = node_sharding # type: ignore[possibly-undefined] + + placeholder_idx += 1 + elif node.op == "call_function": + if isinstance(node_strategy, TupleStrategy): + # For tuple strategy in the data parallel mode, it should have the same strategy + # for all tuple elements, assert that then use the first element's strategy as sharding + first_strategy = cast(DataParallelStrategy, node_strategy.childs[0]) + for child_strategy in node_strategy.childs: + assert isinstance(child_strategy, DataParallelStrategy) + assert child_strategy.strategies == first_strategy.strategies + + node_strategies = first_strategy.strategies + else: + assert isinstance(node_strategy, DataParallelStrategy) + node_strategies = node_strategy.strategies + + assert ( + len(node_strategies) <= 2 + ), "data parallel should have at most 2 strategies" + if len(node_strategies) == 1: + node.meta["sharding"] = node_strategies[0] + elif len(node_strategies) == 2: + if parallel_mode == DataParallelStyle.REPLICATE: + # set to replicate for replicate style + node.meta["sharding"] = node_strategies[0] + elif parallel_mode == DataParallelStyle.FULLY_SHARD: + # set to shard for fully shard style + node.meta["sharding"] = node_strategies[1] + else: + raise RuntimeError("default mode not supported yet!") + else: + raise RuntimeError( + f"node {node} strategy length {len(node_strategies)} is not expected!" + ) + elif node.op == "output": + assert ( + isinstance(node_strategy, DataParallelStrategy) + and node_strategy.node_type == NodeType.NON_TENSOR + ), "output node should not be tensor" + node.meta["sharding"] = None + else: + raise RuntimeError(f"op code {node.op} not supported") + + +def _partition_val(val: Any, spec: DTensorSpec) -> Any: + """Util function to convert a full tensor val to its local component.""" + if isinstance(val, torch.Tensor): + local_shard = val + if val.ndim == 0: + # If it's already a scalar tensor, it is already local, we don't + # need to do anything + return local_shard + + for idx, placement in enumerate(spec.placements): + if placement.is_shard(): + placement = cast(Shard, placement) + num_chunks = spec.mesh.size(mesh_dim=idx) + my_coord = spec.mesh.get_coordinate() + assert my_coord is not None, "current rank not in mesh!" + my_coord_on_mesh_dim = my_coord[idx] + local_shard = placement._split_tensor( + local_shard, num_chunks, with_padding=False, contiguous=False + )[0][my_coord_on_mesh_dim] + return local_shard + elif isinstance(val, (tuple, list)): + return val.__class__(_partition_val(v, spec) for v in val) + else: + raise RuntimeError(f"val type {type(val)} not supported") + + +def partitioner(graph: GraphModule) -> GraphModule: + """Graph partitioner that partitions the single device graph to distributed graph.""" + shape_adjustment_ops = { + aten._unsafe_view.default: 1, + aten.expand.default: 1, + aten.new_zeros.default: 1, + aten.ones.default: 0, + aten.reshape.default: 1, + aten.view.default: 1, + aten.zeros.default: 0, + } + # partition the graph to distributed + for node in graph.graph.nodes: + node_sharding = node.meta["sharding"] + # None sharding means this node don't need sharding + if node_sharding is None: + continue + + if node.op == "placeholder": + out_spec = node_sharding.output_spec + if not hasattr(out_spec, "from_local"): + local_val = _partition_val(node.meta["val"], out_spec) + # update node value + node.meta["val"] = local_val + elif node.op == "call_function": + out_spec = node_sharding.output_spec + + # check if there's misaligned sharding, insert reshard if there is + expected_input_specs = node_sharding.input_specs + for idx, input_arg in enumerate(node.all_input_nodes): + input_arg_sharding = input_arg.meta["sharding"] + + input_arg_spec = input_arg_sharding.output_spec + desired_spec = ( + out_spec + if expected_input_specs is None + else expected_input_specs[idx] + ) + if input_arg_spec != desired_spec: + input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"] + desired_spec.tensor_meta = input_arg.meta["tensor_meta"] + input_arg_tensor = input_arg.meta["val"] + + # insert reshard operation + def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor: + return redistribute_local_tensor( + local_tensor, + input_arg_spec, + desired_spec, + ) + + reshard_gm = make_fx(reshard_fn)(input_arg_tensor) + reshard_gm_nodes = list(reshard_gm.graph.nodes) + input_node = reshard_gm_nodes[0] + with graph.graph.inserting_before(node): + output_node = graph.graph.graph_copy( + reshard_gm.graph, + val_map={ + input_node: input_arg, + }, + ) + node.replace_input_with(input_arg, output_node) + + output_val = node.meta["val"] + + if node.target == torch.ops.aten.repeat.default: + # for repeat op, we need to infer the repeat sizes + assert isinstance(output_val, torch.Tensor) + local_shape = compute_local_shape( + output_val.shape, out_spec.mesh, out_spec.placements + ) + input_shape = node.args[0].meta["val"].shape + + def infer_repeat_sizes(repeated_shape, input_shape): + repeated_size = [1] * len(repeated_shape) + padded_length = len(repeated_shape) - len(input_shape) + for i in range(len(repeated_shape)): + if i < padded_length: + repeated_size[i] = repeated_shape[i] + else: + repeated_size[i] = ( + repeated_shape[i] // input_shape[i - padded_length] + ) + + return repeated_size + + node.update_arg(1, infer_repeat_sizes(local_shape, input_shape)) + + elif node.target in shape_adjustment_ops: + # for view related op that needs shape, adjust shape to local shape if needed + assert isinstance(output_val, torch.Tensor) + local_shape = compute_local_shape( + output_val.shape, out_spec.mesh, out_spec.placements + ) + shape_arg_num = shape_adjustment_ops[node.target] + node.update_arg(shape_arg_num, local_shape) + + # convert output val to its local component + node.meta["val"] = _partition_val(output_val, out_spec) + + elif node.op == "output": + break + else: + raise RuntimeError(f"op code {node} not supported") + + # clean up the graph by removing sharding and partitioning related metadata + for node in graph.graph.nodes: + if "sharding" in node.meta: + del node.meta["sharding"] + if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor): + local_tensor_meta = _extract_tensor_metadata(node.meta["val"]) + node.meta["tensor_meta"] = local_tensor_meta + + graph.graph.lint() + graph.recompile() + return graph + + +def partition_data_parallel( + graph: GraphModule, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer], + params_buffers: Dict[str, torch.Tensor], + named_states: Dict[str, Any], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + mesh: DeviceMesh, + parallel_style: DataParallelStyle, + input_batch_dim: int, +) -> GraphModule: + """Partition the graph to into a data parallel graph. + + This function also shards/replicates the model parameters and optimizer states to DTensors. + """ + num_params_buffers = len(params_buffers) + flattened_states = pytree.tree_leaves(named_states) + num_states = len(flattened_states) + + changed = graph.graph.eliminate_dead_code() + if changed: + graph.recompile() + + # 1. First build up data parallel strategies for the whole graph + strategy_map = build_data_parallel_strategies( + graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim + ) + + # 2. Next we mark the data parallel strategy for each node base on + # the parallel_style + mark_data_parallel_shardings( + graph, + num_parameters=num_params_buffers, + num_states=num_states, + dp_strategy_map=strategy_map, + parallel_mode=parallel_style, + ) + + # 3. Partition the single machine graph to the distribute graph + partitioned_graph = partitioner(graph) + + # preserve node types for the expanded graph + for node in partitioned_graph.graph.nodes: + if node in strategy_map: + node_strategy = strategy_map[node] + if isinstance(node_strategy, DataParallelStrategy): + node.meta["node_type"] = node_strategy.node_type + elif isinstance(node_strategy, TupleStrategy): + node.meta["node_type"] = NodeType.NON_TENSOR + else: + raise RuntimeError(f"Unknown node strategy {node_strategy}") + else: + # if the nodes are expanded nodes (collectives), we mark them + # the same type as the input node. + input_node = node.all_input_nodes[0] + node.meta["node_type"] = input_node.meta["node_type"] + + # 4. Last, inplace partition the weights and optim states to + # DTensors base on the parallel style + accessor = NamedMemberAccessor(model) + for param_key, param in params_buffers.items(): + placement: Placement = Replicate() + if parallel_style == DataParallelStyle.FULLY_SHARD: + placement = Shard(0) + elif parallel_style != DataParallelStyle.REPLICATE: + raise RuntimeError(f"parallel style {parallel_style} not supported yet") + + dtensor_param = distribute_tensor(param, mesh, [placement]) + # update re-parameterized module param dict and optim states dict to DTensor + params_buffers[param_key] = dtensor_param.to_local() + # update module parameters to DTensor + accessor.set_tensor(param_key, dtensor_param) + + # update the optimizer state key and values to DTensor + if optimizer is not None and param in optimizer.state: + param_states = named_states[param_key] + param_dtensor_states = {} + for state_key, state_val in param_states.items(): + if isinstance(state_val, torch.Tensor) and state_val.ndim > 0: + # shard/replicate non-scalar tensors, for scalar tensor, we + # don't do anything + dtensor_state = distribute_tensor(state_val, mesh, [placement]) + param_dtensor_states[state_key] = dtensor_state + param_states[state_key] = dtensor_state.to_local() + else: + param_dtensor_states[state_key] = state_val + + optimizer.state.pop(param) # type: ignore[call-overload] + optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index] + + return partitioned_graph diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/distribute.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/distribute.py new file mode 100644 index 0000000000000000000000000000000000000000..771b064b57b9e6f6deebe926deeafbbe535aac24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/distribute.py @@ -0,0 +1,783 @@ +import logging +import operator +from dataclasses import dataclass +from enum import auto, Enum +from functools import partial +from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.distributed._spmd.experimental_ops +import torch.fx as fx + +from torch.distributed._spmd.comm_tensor import _get_tracer +from torch.distributed._spmd.graph_utils import OP +from torch.distributed._spmd.log_utils import get_logger + +from torch.distributed._tensor import DeviceMesh, DTensor +from torch.distributed._tensor.op_schema import OpSchema +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, + TensorMeta, +) +from torch.distributed._tensor.redistribute import redistribute_local_tensor +from torch.fx.experimental.proxy_tensor import make_fx, proxy_slot +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_map, tree_map_only, tree_unflatten + + +logger: Optional[logging.Logger] = None + +aten = torch.ops.aten + + +class TrainingPhase(Enum): + FORWARD = auto() + BACKWARD = auto() + + +@dataclass +class Schema: + mesh: DeviceMesh + placements: List[Placement] + + +@dataclass +class DSymInt: + """DSymInt represents a value retrieved by a SymInt op from a DTensor. + + DSymInt helps View and Factory ops to determine the placement and shape of the + output tensor, as those operators either do not have an input DTensor or + the input DTensor is insufficient to determine the output tensor's placement. + """ + + global_value: int # value that the SymInt evaluates to + local_value: int # vaue that this SymInt evaluates to on the local shard + mesh: DeviceMesh # device mesh of the DTensor where this SymInt is retrieved from + + def is_shard(self) -> bool: + return self.local_value != self.global_value + + @classmethod + def from_node(cls, node: fx.Node, dtensor: DTensor) -> "DSymInt": + dim: int = 0 + if node.target == aten.sym_size: + dim = cast(int, node.args[1]) + return cls( + global_value=dtensor.size(dim), + local_value=dtensor.to_local().size(dim), + mesh=dtensor.device_mesh, + ) + elif node.target == aten.sym_numel: + return cls( + global_value=dtensor.numel(), + local_value=dtensor.to_local().numel(), + mesh=dtensor.device_mesh, + ) + elif node.target == aten.sym_stride: + dim = cast(int, node.args[1]) + return cls( + global_value=dtensor.stride(dim), + local_value=dtensor.to_local().stride(dim), + mesh=dtensor.device_mesh, + ) + else: + raise NotImplementedError(f"DSymInt does not support {node.target}") + + +def _is_partial_dtensor(obj: Any) -> bool: + """Check if object is 1) DTensor and 2) with any placement of _Partial.""" + if not isinstance(obj, DTensor): + return False + + is_partial = False + for placement in obj.placements: + if isinstance(placement, _Partial): + is_partial = True + break + + return is_partial + + +def _dispatch_with_local_tensors( + op: torch._ops.OpOverload, + local_args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + specs: Optional[ + Dict[ + torch.Tensor, + Tuple[torch.Size, DeviceMesh, Sequence[Placement], Sequence[Placement]], + ] + ] = None, +) -> Any: + if kwargs is None: + kwargs = {} + if specs is None: + specs = {} + + def redistribute(arg: Any) -> Any: + tensor_shape, mesh, current_placement, target_placement = specs[arg] + tensor_meta = TensorMeta( + tensor_shape, + stride=arg.stride(), + dtype=arg.dtype, + ) + current_spec = DTensorSpec( + mesh, tuple(current_placement), tensor_meta=tensor_meta + ) + target_spec = DTensorSpec( + mesh, tuple(target_placement), tensor_meta=tensor_meta + ) + + return ( + redistribute_local_tensor(arg, current_spec, target_spec) # type: ignore[index] + if isinstance(arg, torch.Tensor) and arg in specs # type: ignore[operator] + else arg + ) + + # TODO: this is broken because it won't redistributed potential tensors on the kwargs + return op(*tree_map(redistribute, local_args), **kwargs) + + +# Figure out how to specify a type spec for the return specs value +# without the entire structure. +# pyre-fixme +def _update_specs_for_redistribute(args, target_schema, redistribute): + # Code adapted from pack_args_kwargs_with_local_tensor + flatten_args, args_tree_spec = tree_flatten(args) + flatten_args_schema = pytree.tree_leaves(target_schema.args_schema) + + specs: Dict[ + torch.Tensor, + Tuple[ + torch.Size, + DeviceMesh, + Sequence[Placement], + Sequence[Placement], + ], + ] = {} + for i, arg in enumerate(flatten_args): + if isinstance(arg, DTensor): + if redistribute: + specs[arg._local_tensor] = ( + arg.size(), + flatten_args_schema[i].mesh, + arg.placements, + flatten_args_schema[i].placements, + ) + flatten_args_schema[i] = arg._local_tensor + + unflattened_args = tree_unflatten(flatten_args_schema, args_tree_spec) + return specs, unflattened_args + + +# When no tensor redistribution is required, we only need to update non-tensor args +# of the node according to op_schema and avoid building a GraphModule just for the +# node. +def _update_node_from_op_schema(node: torch.fx.Node, op_schema: OpSchema) -> None: + flat_args, args_tree_spec = tree_flatten(node.args) + flat_args_schema = pytree.tree_leaves(op_schema.args_schema) + + def is_sym_int_or_int(arg: Union[int, torch.fx.Node]) -> bool: + if isinstance(arg, torch.fx.Node): + return arg.target in [ + aten.sym_size, + aten.sym_numel, + aten.sym_stride, + ] + return isinstance(arg, int) + + assert len(flat_args) == len(flat_args_schema) + for i, (arg, arg_schema) in enumerate(zip(flat_args, flat_args_schema)): + if is_sym_int_or_int(arg) and isinstance(arg_schema, int): + flat_args[i] = arg_schema + + args = tree_unflatten(flat_args, args_tree_spec) + for idx, arg in enumerate(args): + node.update_arg(idx, arg) + return None + + +def _remap_arg(node_to_obj: Dict[fx.Node, Any], arg: Any) -> Any: + if isinstance(arg, torch.fx.Node): + obj = node_to_obj[arg] + if _get_tracer(): + # This is a shared arg, already has a tracer from previous + # tracing. Delete the tracer. + del cast(Dict[Any, Any], obj.__dict__)[proxy_slot] + return obj + else: + return arg + + +def unpack_sizes_and_dims( + sizes: List[Union[DSymInt, int]], mesh: DeviceMesh +) -> Tuple[List[int], List[Placement]]: + local_sizes: List[int] = [ + s.local_value if isinstance(s, DSymInt) else s for s in sizes + ] + placements: List[Placement] = [ + Shard(i) + for i, a in enumerate(sizes) + if (isinstance(a, DSymInt) and a.is_shard()) + ] or [Replicate()] + + assert len(placements) == mesh.ndim, ( + f"The number of sharded dimensions ({len(placements)}) must " + f"match number of dimensions in device mesh ({mesh.ndim})." + ) + + return local_sizes, placements + + +def binop_sym_int_consumer_rule(node: fx.Node, args: Tuple[Any, ...]) -> DTensor: + assert len(args) == 2, f"Expect two args but got op {node.target} with args {args}" + assert isinstance( + args[0], DTensor + ), f"Expect 1st argument to be DTensor but got {args[0]}" + assert isinstance(args[1], list), f"Expect 2nd argument as list but got {args[1]}" + + # extract sharded dimensions in the size list, the output DTensor should + # follow these placements. + local_sizes, placements = unpack_sizes_and_dims(args[1], args[0].device_mesh) + + # set node args to real int sizes. + node.args = (node.args[0], local_sizes) + op = cast(torch._ops.OpOverload, node.target) + return DTensor.from_local( + local_tensor=op(args[0]._local_tensor, local_sizes), + device_mesh=args[0].device_mesh, + placements=placements, + run_check=False, + ) + + +def slice_backwad_sym_int_consumer_rule( + node: fx.Node, args: Tuple[Any, ...] +) -> DTensor: + grad_output, input_sizes, dim, start, end, step = args + + local_sizes: List[int] = [ + s.local_value if isinstance(s, DSymInt) else s for s in input_sizes + ] + + input_tensor = torch.zeros( + local_sizes, device=grad_output.device, dtype=grad_output.dtype + ) + return DTensor.from_local( + local_tensor=torch.slice_scatter( + input_tensor, grad_output.to_local(), dim, start, end, step + ), + device_mesh=grad_output.device_mesh, + placements=grad_output.placements, + run_check=False, + ) + + +def factory_with_sizes_rule( + node: fx.Node, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + default_mesh: DeviceMesh, +) -> DTensor: + flat_args = pytree.arg_tree_leaves(*args) + assert not any(isinstance(a, DTensor) for a in flat_args), ( + f"Not expect DTensor argument for factory op, but got {node.target} " + f"with arguments {args}." + ) + assert isinstance(args[0], list), f"Expect 2nd argument as list but got {args[1]}" + + local_sizes, placements = unpack_sizes_and_dims(args[0], default_mesh) + node.args = (local_sizes, *args[1:]) + op = cast(torch._ops.OpOverload, node.target) + return DTensor.from_local( + local_tensor=op(*node.args, **kwargs), + device_mesh=default_mesh, + placements=placements, + run_check=False, + ) + + +def factory_arange_rule( + node: fx.Node, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + default_mesh: DeviceMesh, +) -> DTensor: + node.args = tree_map(lambda a: a.local_value if isinstance(a, DSymInt) else a, args) + op = cast(torch._ops.OpOverload, node.target) + return DTensor.from_local( + local_tensor=op(*node.args, **kwargs), + device_mesh=default_mesh, + placements=[Replicate()], + run_check=False, + ) + + +def default_factory_op_rule( + node: fx.Node, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + default_mesh: DeviceMesh, +) -> DTensor: + node.args, node.kwargs = args, kwargs + op = cast(torch._ops.OpOverload, node.target) + return DTensor.from_local( + local_tensor=op(*node.args, **node.kwargs), + device_mesh=default_mesh, + placements=[Replicate()], + run_check=False, + ) + + +# Dispatch override for view and factory ops that consume SymInt arguments, +# where the output spec should follow dimension placement where the SymInt comes +# from. +VIEW_SYM_INT_CONSUMERS: Dict[torch._ops.OpOverload, Callable] = { + aten._unsafe_view.default: binop_sym_int_consumer_rule, + aten.expand.default: binop_sym_int_consumer_rule, + aten.slice_backward.default: slice_backwad_sym_int_consumer_rule, + aten.view.default: binop_sym_int_consumer_rule, +} + +FACTORY_SYM_INT_CONSUMERS: Dict[torch._ops.OpOverload, Callable] = { + aten.full.default: factory_with_sizes_rule, + aten.arange.default: factory_arange_rule, + aten.arange.start: factory_arange_rule, +} + + +# Dispatch override for factory ops, as DTensor cannot propogate sharding spec +# without DTensor inputs. +FACTORY_OPS: Dict[torch._ops.OpOverload, Callable] = { + aten.scalar_tensor.default: default_factory_op_rule, + aten.arange.start: default_factory_op_rule, + aten.zeros.default: default_factory_op_rule, +} + + +def _get_dtensor_dispatch_graph( + node: fx.Node, + node_to_obj: Dict[fx.Node, Any], + *, + force_make_fx: bool = False, + default_mesh: Optional[DeviceMesh] = None, +) -> Optional[fx.GraphModule]: + with torch.no_grad(): + # Args should be a list of objects post remapping. + args = tree_map(partial(_remap_arg, node_to_obj), node.args) + kwargs = tree_map(partial(_remap_arg, node_to_obj), node.kwargs) + + op_overload = cast(torch._ops.OpOverload, node.target) + + if any( + a.is_shard() + for a in pytree.arg_tree_leaves(*args) + if isinstance(a, DSymInt) + ): + if op_overload in VIEW_SYM_INT_CONSUMERS: + assert len(kwargs) == 0, f"Expect empty kwargs, but got {kwargs}" + node_to_obj[node] = VIEW_SYM_INT_CONSUMERS[op_overload](node, args) + return None + elif op_overload in FACTORY_SYM_INT_CONSUMERS: + assert default_mesh is not None, "Requires default mesh for factory ops" + node_to_obj[node] = FACTORY_SYM_INT_CONSUMERS[op_overload]( + node, args, kwargs, default_mesh + ) + return None + else: + assert isinstance(logger, logging.Logger) + logger.warning( + "Assuming using local_value from SymInt for %s" + "is mathematically correct. Full args are %s.", + op_overload, + args, + ) + + if node.target == aten.view.default: + # HACK: this is a hack to get around with the fact that some + # view operations on a "global" tensor is invalid usage + # but somehow the view operation on the batch input might hit it + # so we convert the view op to reshape before calling DTensor + op_overload = aten.reshape.default + + # DSymInt args are not sharded on any dimension, local value and global + # value should be the same + args = tree_map(lambda a: a.local_value if isinstance(a, DSymInt) else a, args) + kwargs = tree_map( + lambda a: a.local_value if isinstance(a, DSymInt) else a, kwargs + ) + + if op_overload in FACTORY_OPS: + # Don't pass factory ops to DTensor dispatch, as DTensor cannot + # propagate sharding spec without DTensor inputs. + node_to_obj[node] = FACTORY_OPS[op_overload]( + node, args, kwargs, default_mesh + ) + return None + + dispatch = partial( + _dispatch_with_local_tensors, + op_overload, + kwargs=kwargs, + specs=args, + ) + + gm = make_fx(dispatch, _allow_non_fake_inputs=False)(args) + # FIXME(@wanchaol, @mrshenli): the above seems to accidentally captured + # DeviceMesh tensor ops when handling inplace operators? The ``_to_copy`` is + # not connected to graph output. So, using DCE to get rid of it, but this + # doesn't look correct. + # + # The following operators appear in the captured graph, where the dtype is + # torch.int64. + # + # get_attr _tensor_constant0 _tensor_constant0 () + # call_function transpose aten.transpose.int (_tensor_constant0, -1, 0) + # call_function view aten.view.default (transpose, [-1, 2]) + # call_function view_1 aten.view.default (view, [2]) + # call_function _to_copy aten._to_copy.default (view_1,) + gm.graph.eliminate_dead_code() + + return gm + + +def _build_dummy_add_graph( + dt: DTensor, node_to_obj: Dict[fx.Node, Any] +) -> Tuple[fx.GraphModule, Any]: + """Create a graph for a dummy add function from a partial DTensor. + + This dummy add is used for triggering all_reduce on a Partial DTensor + during the DTensor expansion of the traced graph. + Also returns the actual DTensor after resharding. + """ + + def dummy_add(grad: torch.Tensor, zero: torch.Tensor) -> torch.Tensor: + return grad + zero + + grad: torch.Tensor = dt._local_tensor + zero: torch.Tensor = torch.zeros_like(dt._local_tensor) + + traced_add = make_fx(dummy_add)(grad, zero) + + placeholders = [n for n in traced_add.graph.nodes if n.op == OP.PLACEHOLDER] + call_functions = [n for n in traced_add.graph.nodes if n.op == OP.CALL_FUNCTION] + assert len(placeholders) == 2 + assert len(call_functions) == 1 + node_to_obj[placeholders[0]] = dt + node_to_obj[placeholders[1]] = DTensor.from_local( + zero, dt.device_mesh, [Replicate()], run_check=False + ) + + traced_dispatch = _get_dtensor_dispatch_graph( + call_functions[0], node_to_obj, force_make_fx=True + ) + assert traced_dispatch is not None + + # TODO(anj): This depends on the call function node -> actual DTensor output + # mapping that we want to avoid for SPMD expansion + return traced_dispatch, node_to_obj[call_functions[0]] + + +def _convert_output( + gm: fx.GraphModule, + node: fx.Node, + node_to_obj: Dict[fx.Node, Any], +) -> fx.Node: + new_args = [] + has_partial = False + for argument in node.args[0]: # type: ignore[union-attr] + if not isinstance(argument, fx.Node): + new_args.append(argument) + continue + + obj = node_to_obj[argument] + + if not _is_partial_dtensor(obj): + new_args.append(argument) + continue + + has_partial = True + + # we know it's a dtensor from is partial DT check... + dt = cast(DTensor, obj) + + traced_dispatch, result_obj = _build_dummy_add_graph(dt, node_to_obj) + + wait = [ + n + for n in traced_dispatch.graph.nodes + if n.name == "wait_comm" or n.name == "wait_tensor" + ] + add = [n for n in traced_dispatch.graph.nodes if n.name == "add"] + assert len(wait) == 1 and len(add) == 1 + + # remove add node and replace it with wait node + add[0].replace_all_uses_with(wait[0]) + traced_dispatch.graph.eliminate_dead_code() + # also update the actual DTensor corresponding to the node + # TODO(anj): We require mapping of the final DTensor output to the wait + # comm node. + node_to_obj[wait[0]] = result_obj + + value_remap: Dict[fx.Node, fx.Node] = {} + for dtn in traced_dispatch.graph.nodes: + if dtn.op == OP.PLACEHOLDER: + # do nothing, ignore placeholders, as it has + # already been prepared in value_remap + value_remap[dtn] = argument + elif dtn.op == OP.OUTPUT: + assert ( + len(dtn.args) == 1 and len(dtn.args[0]) == 1 + ), f"Expecting single output, but got {dtn.args} {len(dtn.args)}" + new_args.append(value_remap[dtn.args[0][0]]) + # the concrete DTensor value of output was added when creating the + # inner graph (in _build_dummy_add_graph). Just add it to the final + # output node so that we can report the final output specs correctly. + # TODO(anj): We are depending on the concrete DTensor output of the dummy add. + node_to_obj[value_remap[dtn.args[0][0]]] = node_to_obj[dtn.args[0][0]] + + else: + if dtn.op == OP.GET_ATTR: + setattr( + gm, + dtn.target, + getattr(traced_dispatch, dtn.target), + ) + with gm.graph.inserting_before(node): + value_remap[dtn] = gm.graph.node_copy(dtn, lambda n: value_remap[n]) + if has_partial: + gm.graph.erase_node(node) + return gm.graph.output(new_args) + else: + return node + + +def _rebuild_graph( + gm: fx.GraphModule, + node_replacements: Dict[torch.fx.Node, torch.fx.GraphModule], +) -> None: + # replace nodes in local traced graph with DTensor's dispatch graph + for node in gm.graph.nodes: + if node not in node_replacements: + continue + + traced_dispatch = node_replacements[node] + # Map DT's dispatch graph input placeholder nodes to the ones in + # local traced graph. It uses index-based accessing, which is + # brittle, just for testing purpose. + flatten_args = pytree.arg_tree_leaves(*node.args) + i, value_remap = 0, {} + for dtn in traced_dispatch.graph.nodes: + if dtn.op == OP.PLACEHOLDER: + value_remap[dtn] = flatten_args[i] + i += 1 + + # insert DT's dispatch graph to traced local graph. + with gm.graph.inserting_before(node): + for dtn in traced_dispatch.graph.nodes: + if dtn.op == OP.PLACEHOLDER: + # do nothing, ignore placeholders, as it has already + # been prepared in value_remap + pass + elif dtn.op == OP.OUTPUT: + assert ( + len(dtn.args) == 1 + ), f"Expecting single output, but got {dtn.args} {len(dtn.args[0])}" + outputs = dtn.args[0] + # we currently support two very specific types of output + # 1. single output + # 2. multiple outputs resulting from getitem of all elements of tuple + if len(outputs) == 1: + # for single output, we replace the node with the single node + output = outputs[0] + else: + # for multiple outputs, we check that these outputs correspond + # to all elements of a tuple. In that case, we replace + # uses of the output directly with the original tuple + source = None + for i, out in enumerate(outputs): + # we allow None outputs for certain items in the tuple + if out is None: + continue + assert out.op == "call_function" + assert out.target.__module__ == "_operator" + assert out.target.__name__ == "getitem" + assert source is None or source == out.args[0] + source = out.args[0] + assert out.args[1] == i + assert source is not None + output = source + + new_node = value_remap[output] + node.replace_all_uses_with(new_node) + else: + value_remap[dtn] = gm.graph.node_copy(dtn, lambda n: value_remap[n]) + if all( + isinstance(n.target, torch._ops.OpOverload) + and n.target._schema.name.startswith( + ("aten::_foreach", "aten::_fused_adam") + ) + for n in [dtn, node] + ): + # FIXME(@mrshenli): This is a temporary solution enable + # foreach ops. The problem is that foreach ops returns + # List[Tensor], but make_fx will flatten that before + # passing those tensors to output node, which will + # introduce additional getitem nodes. These redundant + # getitem nodes breaks graph correctness as we cannot do + # getitem(getitem(foreach_out, 0), 0). This temporary + # solution skips getitem nodes in DTensor expanded + # subgraphs. + node.replace_all_uses_with(value_remap[dtn]) + break + # explicitly erase node instead of relying on DCE, as DCE does not + # remove inplace copy_ correctly. + gm.graph.erase_node(node) + + gm.graph.eliminate_dead_code() + gm.recompile() + + +def _get_last_consumer_to_nodes( + graph: fx.Graph, +) -> Dict[fx.Node, List[fx.Node]]: + # Run through reverse nodes and record the first instance of a use + # of a given node. This represents the *last* use of the node in the + # execution order of the program, which we will use to free unused + # values + node_to_last_consumer: Dict[fx.Node, fx.Node] = {} + last_consumer_to_nodes: Dict[fx.Node, List[fx.Node]] = {} + + def _register_final_consumer(arg_node: fx.Node, consumer: fx.Node) -> None: + if arg_node not in node_to_last_consumer: + node_to_last_consumer[arg_node] = consumer + last_consumer_to_nodes.setdefault(consumer, []).append(arg_node) + + for node in reversed(graph.nodes): + fx.node.map_arg( + node.args, lambda arg_node: _register_final_consumer(arg_node, node) + ) + fx.node.map_arg( + node.kwargs, + lambda kwarg_node: _register_final_consumer(kwarg_node, node), + ) + + return last_consumer_to_nodes + + +def _convert_to_distributed( + gm: fx.GraphModule, + inps: List[torch.Tensor], + schemas: List[Schema], + default_mesh: Optional[DeviceMesh] = None, + _allow_partial: bool = False, +) -> Tuple[fx.GraphModule, Dict[str, Schema]]: + """Transform a graph module to a distributed graph module. + + Returns: + - transformed graph module + - map from output name to DTensorSpec + + """ + global logger + logger = get_logger("spmd_exp") + operators = {getattr(operator, name) for name in operator.__all__} + node_to_obj: Dict[fx.Node, Any] = {} + # map local op node in traced_f to its corresponding subgraph of + # DTensor ops. + node_replacements: Dict[torch.fx.Node, torch.fx.GraphModule] = {} + + last_consumer_to_nodes = _get_last_consumer_to_nodes(gm.graph) + + output_schemas: Dict[str, Schema] = {} + for i, node in enumerate(gm.graph.nodes): + assert logger is not None + logger.info("node%s: op=%s target=%s", i, node.op, node.target) + if node.op == OP.PLACEHOLDER: + assert i < len( + inps + ), f"got more placeholder nodes ({i + 1}) than inputs ({len(inps)})" + + # our example inputs are local shards. Create DTensors from them. + node_to_obj[node] = DTensor.from_local( + inps[i].clone(), # use clone to avoid modifications from inplace ops + schemas[i].mesh, + schemas[i].placements, + # prevent running this collective in backwards pass + run_check=False, + ) + elif isinstance(node.target, torch._ops.OpOverloadPacket): + dtensor = cast(DTensor, node_to_obj[node.args[0]]) + node_to_obj[node] = DSymInt.from_node(node, dtensor) + elif isinstance(node.target, torch._ops.OpOverload): + replacement = _get_dtensor_dispatch_graph( + node, node_to_obj, default_mesh=default_mesh + ) + if replacement is not None: + node_replacements[node] = replacement + elif node.op == OP.OUTPUT: + if not _allow_partial: + # Returns an expanded dummy add node that ensures + # that the partial output tensor has been converted + # to a replicated tensor. + node = _convert_output(gm, node, node_to_obj) + + # Save output sharding for the inputs to backward pass. + # TODO(anj): Pipe the output schema for the BW pass + # instead of requiring the full output DTensor to be + # materialized. + for inp_arg in node.args[0]: + if isinstance(inp_arg, fx.Node): + obj = node_to_obj[inp_arg] + if isinstance(obj, DTensor): + output_schemas[inp_arg.name] = Schema( + obj.device_mesh, obj.placements # type: ignore[arg-type] + ) + elif node.op == OP.CALL_FUNCTION: + args = tree_map(partial(_remap_arg, node_to_obj), node.args) + kwargs = tree_map(partial(_remap_arg, node_to_obj), node.kwargs) + + dsymints = list( + filter(lambda a: isinstance(a, DSymInt), args + tuple(kwargs.values())) + ) + + if node.target in operators and len(dsymints) > 0: + assert all( + dsymints[0].mesh == d.mesh for d in dsymints + ), "all DSymInts must have the same mesh. " + + local_args = tree_map_only(DSymInt, lambda a: a.local_value, args) + local_kwargs = tree_map_only(DSymInt, lambda a: a.local_value, kwargs) + + global_args = tree_map_only(DSymInt, lambda a: a.global_value, args) + global_kwargs = tree_map_only(DSymInt, lambda a: a.global_value, kwargs) + + node.args = local_args + node.kwargs = local_kwargs + + node_to_obj[node] = DSymInt( + local_value=node.target(*local_args, **local_kwargs), + global_value=node.target(*global_args, **global_kwargs), + mesh=dsymints[0].mesh, + ) + else: + assert len(dsymints) == 0, ( + "SPMD expansion does not support SymInt in non-operator " + f"nodes, got {node.target}." + ) + node_to_obj[node] = node.target(*args, **kwargs) + else: + raise ValueError(f"Unrecognized node.op type {node.op}") + + if node in last_consumer_to_nodes: + # Save memory by deleting objs that wont be used anymore. + for arg_node in last_consumer_to_nodes[node]: + del node_to_obj[arg_node] + + _rebuild_graph(gm, node_replacements) + + return gm, output_schemas diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/experimental_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/experimental_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..40188cf10d3cc2cb4a790c5e0ec352947d06a215 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/experimental_ops.py @@ -0,0 +1,455 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import cast, List, Optional, Sequence, Tuple + +import torch +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding +from torch.distributed._tensor.ops.common_rules import pointwise_rule +from torch.distributed._tensor.ops.utils import register_prop_rule + +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, + TensorMeta, +) + +aten = torch.ops.aten # pyre-ignore + + +@register_prop_rule( # pyre-ignore + [ + aten._foreach_neg.default, + aten._foreach_reciprocal.default, + aten._foreach_sqrt.default, + ] +) +def _prop__foreach_unaop(op_schema: OpSchema) -> OutputSharding: + self = op_schema.args_schema[0] + assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self) + # FIXME(@mrshenli): for sqrt, this is only mathematically correct for + # Replicate and Shard tensor. + return OutputSharding(output_spec=self) + + +@register_prop_rule( # pyre-ignore + [ + aten._foreach_add.List, + aten._foreach_div.List, + aten._foreach_mul.List, + ] +) +def _prop__foreach_binop_list(op_schema: OpSchema) -> OutputSharding: + self, other = op_schema.args_schema[:2] + scalar = None if len(op_schema.args_schema) < 3 else op_schema.args_schema[2] + assert isinstance(self, list) and all( + isinstance(s, DTensorSpec) for s in self + ), f"Expect a List[DTensorSpec] but got {self}" + assert isinstance(other, list) and all( + isinstance(o, DTensorSpec) for o in other + ), f"Expect a List[DTensorSpec] but got {other}" + assert len(self) == len(other), ( + "Two tensor lists must match in length, " + f"but got {len(self)} and {len(other)}" + ) + + if any(s != o for s, o in zip(self, other)): + # If DTensorSpec for the two operand do not match, suggest using + # self's DTensorSpec. This will trigger allreduce if other is partial + # and self is replicated. + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=(self, self, scalar) if scalar else (self, self), + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + else: + return OutputSharding(output_spec=self) + + +@register_prop_rule( # pyre-ignore + [ + aten._foreach_add.Scalar, + aten._foreach_div.Scalar, + aten._foreach_mul.Scalar, + aten._foreach_sub.Scalar, + ] +) +def _prop__foreach_binop_scalar(op_schema: OpSchema) -> OutputSharding: + self, scalar = op_schema.args_schema + assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self) + assert not isinstance(scalar, list) + return OutputSharding(output_spec=self) + + +@register_prop_rule( # pyre-ignore + [ + aten._foreach_addcdiv.Scalar, + aten._foreach_addcmul.Scalar, + ] +) +def _prop__foreach_addcop_scalar(op_schema: OpSchema): + self, tensor1, tensor2 = op_schema.args_schema[:3] + scalar = None if len(op_schema.args_schema) < 4 else op_schema.args_schema[3] + assert isinstance(self, list) and all(isinstance(s, DTensorSpec) for s in self) + assert isinstance(tensor1, list) and all(isinstance(s, DTensorSpec) for s in self) + assert isinstance(tensor2, list) and all(isinstance(s, DTensorSpec) for s in self) + if any(s != t1 or s != t2 for s, t1, t2 in zip(self, tensor1, tensor2)): + # If DTensorSpec for the two operand do not match, suggest using + # self's DTensorSpec. This will trigger allreduce if other is partial + # and self is replicated. + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=(self, self, self, scalar) + if scalar + else (self, self, self), + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + else: + return OutputSharding(output_spec=self) + + +@register_prop_rule([aten._foreach_pow.ScalarAndTensor]) # pyre-ignore +def _prop__foreach_pow_scalar_and_tensor(op_schema: OpSchema): + scala, exponent = op_schema.args_schema + assert isinstance(exponent, list) and all( + isinstance(s, DTensorSpec) for s in exponent + ) + return OutputSharding(output_spec=exponent) + + +@register_prop_rule([aten._fused_adam.default]) # pyre-ignore +def _prop__fused_adam(op_schema: OpSchema): + NT = 5 + tesnor_list_args: Tuple[List[DTensorSpec]] = op_schema.args_schema[:NT] # type: ignore[assignment] + + assert all(isinstance(schema, list) for schema in tesnor_list_args) + assert all( + isinstance(s, DTensorSpec) for schema in tesnor_list_args for s in schema + ) + + tensor_schemas: Tuple[List[DTensorSpec]] = [ # type: ignore[assignment] + schema for schema in tesnor_list_args if len(schema) + ] + + assert all(len(s) == len(tensor_schemas[0]) for s in tensor_schemas), ( + "expect the same number of gradients and states, but got " + f"{[len(s) for s in tensor_schemas]}." + ) + + if any(any(t != ts[0] for t in ts) for ts in zip(*tensor_schemas)): + new_schemas: Tuple[List[DTensorSpec]] = tuple( # type: ignore[assignment] + op_schema.args_schema[0] if len(s) else s for s in tesnor_list_args + ) + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=new_schemas + op_schema.args_schema[NT:], + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + else: + return OutputSharding(output_spec=(op_schema.args_schema[0],) * NT) # type: ignore[arg-type] + + +@register_prop_rule(aten.nll_loss_forward.default) # pyre-ignore +def _prop_nll_loss_forward(op_schema: OpSchema) -> OutputSharding: + self, target = op_schema.args_schema[:2] + assert isinstance(self, DTensorSpec) + assert isinstance(target, DTensorSpec) + if self.placements != target.placements: + # Self and target must match in placements, which should be shard along + # batch dimension in data parallell use cases. Force redistribute. + + # need to create a new self instead return (target, target) as target + # and self might not match in shape. + new_self = DTensorSpec( + mesh=self.mesh, + placements=target.placements, + tensor_meta=self.tensor_meta, + ) + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=(new_self, target) + op_schema.args_schema[2:], + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + else: + return OutputSharding( + output_spec=( + # by default, nll_loss_forward conducts a reduction and returns + # a scalar tensor, and hence the _Partial placements. + DTensorSpec(mesh=self.mesh, placements=(_Partial(),)), + # the 2nd output total_weight is always a scalar tensor + DTensorSpec(mesh=self.mesh, placements=(Replicate(),)), + ) + ) + + +@register_prop_rule(aten.nll_loss_backward.default) # pyre-ignore +def _prop_nll_loss_backward(op_schema: OpSchema) -> OutputSharding: + grad_output, self = op_schema.args_schema[:2] + assert isinstance(grad_output, DTensorSpec) + assert isinstance(self, DTensorSpec) + return OutputSharding(output_spec=self) + + +@register_prop_rule(aten.stack.default) +def _prop_stack(op_schema: OpSchema) -> OutputSharding: + tensors = op_schema.args_schema[0] + dim = 0 if len(op_schema.args_schema) == 1 else cast(int, op_schema.args_schema[1]) + assert ( + isinstance(tensors, list) and len(tensors) > 0 + ), "expect at least one tensor to stack" + assert all( + isinstance(t, DTensorSpec) for t in tensors + ), f"expect a list of DTensorSpecs, but got {tensors}" + assert all( + t.shape == tensors[0].shape for t in tensors + ), f"expect all tensors to have the same shape, but got {tensors}." + # TODO: provide schema_suggestions when placements do not match + assert all( + t.placements == tensors[0].placements for t in tensors + ), f"expect all tensors to have the same placements, but got {tensors}." + assert all( + not p.is_shard(dim) for p in tensors[0].placements + ), "DTensor does not support stack on sharded dimension." + + return OutputSharding( + output_spec=DTensorSpec(mesh=tensors[0].mesh, placements=tensors[0].placements) + ) + + +@register_prop_rule(aten.select.int) +def _prop_select(op_schema: OpSchema) -> OutputSharding: + tensor, dim = op_schema.args_schema[:2] + assert isinstance(tensor, DTensorSpec) + assert isinstance(dim, int) + placements: Sequence[Placement] = tensor.placements + assert all( + not p.is_shard(dim) for p in placements + ), "DTensor does not support select on sharded dimension." + + # select will remove one dimension, decrement dim of Shard placements by 1 + # if they are larger than dim. + new_placements: List[Placement] = [] + for p in placements: + # Using isinstance instead of is_shard so that mypy won't complain + # about accessing dim attribute. + if isinstance(p, Shard) and p.dim > dim: + new_placements.append(Shard(p.dim - 1)) + else: + new_placements.append(p) + + return OutputSharding( + output_spec=DTensorSpec(mesh=tensor.mesh, placements=tuple(new_placements)) + ) + + +@register_prop_rule(aten.native_layer_norm.default) # pyre-ignore +def _prop_native_layer_norm(op_schema: OpSchema) -> OutputSharding: + input, normalized_shape, weight, bias, eps = op_schema.args_schema + assert isinstance(input, DTensorSpec) + assert isinstance(normalized_shape, (tuple, list)) + if weight is not None: + assert isinstance(weight, DTensorSpec) + assert all(isinstance(p, Replicate) for p in weight.placements) + if bias is not None: + assert isinstance(bias, DTensorSpec) + assert all(isinstance(p, Replicate) for p in bias.placements) + # only the left-most (non-normalized) dimensions of the input can be sharded + batch_ndim = len(input.shape) - len(normalized_shape) + assert all( + isinstance(p, Replicate) or (isinstance(p, Shard) and p.dim < batch_ndim,) + for p in input.placements + ) + stats_spec = DTensorSpec( + mesh=input.mesh, + placements=input.placements, + ) + return OutputSharding(output_spec=(input, stats_spec, stats_spec)) + + +@register_prop_rule(aten.native_layer_norm_backward.default) # pyre-ignore +def _prop_native_layer_norm_backward(op_schema: OpSchema) -> OutputSharding: + ( + grad, + input, + normalized_shape, + result1, + result2, + weight, + bias, + grad_input_mask, + ) = op_schema.args_schema + assert isinstance(grad, DTensorSpec) + assert isinstance(grad_input_mask, (list, tuple)) + if weight is not None: + assert isinstance(weight, DTensorSpec) + assert all(isinstance(s, Replicate) for s in weight.placements) + if bias is not None: + assert isinstance(bias, DTensorSpec) + assert all(isinstance(s, Replicate) for s in bias.placements) + # ensure sharding on dim 0, which will trigger the "Partial" output on + # weight and bias grads + assert any( + isinstance(s, Shard) and s.dim == 0 for s in grad.placements + ), f"Got {grad.placements}" + weight_grad = ( + DTensorSpec( + mesh=weight.mesh, + placements=tuple([_Partial()] * weight.mesh.ndim), + ) + if weight + else None + ) + bias_grad = ( + DTensorSpec( + mesh=bias.mesh, + placements=tuple([_Partial()] * bias.mesh.ndim), + ) + if bias + else None + ) + return OutputSharding( + # NOTE: type errors below are legit. This is because DTensor currently + # doesn't support Optional return values. Need to be fixed in DTensor repo. + output_spec=( + grad if grad_input_mask[0] else None, + weight_grad if grad_input_mask[1] else None, + bias_grad if grad_input_mask[2] else None, + ), + ) + + +def _refine_sharding( + op_schema: OpSchema, active_dim: Optional[int] +) -> Sequence[Placement]: + """Considers 2 first inputs of op_schema as having same shape, and returns suggested placement for a pointwise operation.""" + # consider the operating dimension as a singleton to prevent sharding on it + # however, if active_dim is None, this means the input and output shapes are equal and + # we'll apply exactly the pointwise rule. + + args_schema = [] + for s in op_schema.args_schema[:2]: + assert isinstance(s, DTensorSpec) and s.tensor_meta is not None + args_schema.append( + DTensorSpec( + mesh=s.mesh, # type: ignore[attr-defined] + placements=s.placements, # type: ignore[attr-defined] + tensor_meta=TensorMeta( + shape=torch.Size( + s.shape[0:active_dim] + (1,) + s.shape[active_dim + 1 :] + ) + if active_dim is not None + else s.shape, + stride=s.tensor_meta.stride, + dtype=s.tensor_meta.dtype, + ), + ) + ) + + op_schema = OpSchema( + op=op_schema.op, + args_schema=args_schema, # type: ignore[arg-type] + kwargs_schema={}, + ) + output_sharding = pointwise_rule(op_schema, linearity=False) + if output_sharding.output_spec: + assert isinstance(output_sharding.output_spec, DTensorSpec) + return output_sharding.output_spec.placements + else: + assert output_sharding.schema_suggestions is not None + out_schema = output_sharding.schema_suggestions[0].args_schema[0] + assert isinstance(out_schema, DTensorSpec) + return tuple(out_schema.placements) + + +@register_prop_rule(aten.slice_scatter.default) # pyre-ignore +def prop_slice_scatter(op_schema: OpSchema) -> OutputSharding: + # 1. number of dimensions in input and src need to match. + # 2. number of elements on all non-dim need to match between input and src. + # 3. numer of elements in src in dim need to match the slice size. + # Given the above: + # - We suggest for src to follow the sharding of input, except on the scatter dimension, + # where our best bet for now is to make them replicated as a fall-back. + # TODO: Ideally we'd like to make sure the output is re-sharded afterwards to keep input sharding. + + defaults = (None, None, 0, None, None, 1) + input, src, dim, start, end, step = ( + op_schema.args_schema + defaults[len(op_schema.args_schema) :] + ) + assert isinstance(input, DTensorSpec) + assert isinstance(src, DTensorSpec) + assert isinstance(dim, int) + + if dim < 0: + dim += input.ndim + + # if the input shape and the output shape are the same on the operating dimension, + # this is effectively a no-op, so we just propagate sharding as we would do for + # pointwise, no exceptions. + if input.shape[dim] == src.shape[dim]: + assert start == 0 + assert end >= src.shape[dim] # type: ignore[operator] + dim = None + + # apply sharding refinement as implemented in pointwise_rule + input_suggestion = list(_refine_sharding(op_schema, dim)) + # apply the exception -- disallow sharding on the operating dimension. + for i, p in enumerate(input_suggestion): + if isinstance(p, Shard) and p.dim == dim: + input_suggestion[i] = Replicate() + input_suggestion = tuple(input_suggestion) # type: ignore[assignment] + + if input_suggestion == tuple(input.placements) and src.placements == tuple( + input.placements + ): + # if our sharding is correct, the output sharding will be the same as the input. + return OutputSharding( + output_spec=DTensorSpec( + mesh=input.mesh, + placements=input.placements, + ) + ) + else: + # otherwise, return the suggestion. + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + DTensorSpec( + mesh=input.mesh, + placements=input_suggestion, + tensor_meta=input.tensor_meta, + ), + DTensorSpec( + mesh=src.mesh, + placements=input_suggestion, + tensor_meta=src.tensor_meta, + ), + ) + + op_schema.args_schema[2:], + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/gm_transformation.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/gm_transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..ea2be4bb36ce53347fea94cb3afc10594adbf8f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/gm_transformation.py @@ -0,0 +1,51 @@ +from typing import Callable + +from torch import fx +from torch.distributed._spmd.graph_optimization import ( + comm_fusion_with_concat, + enable_graph_optimization_dump, + remove_copy_from_optimizer, + schedule_comm_wait, +) +from torch.distributed._spmd.graph_utils import dump_graphs_to_files +from torch.distributed._spmd.iter_graph_module import IterGraphModule + + +class GraphModuleTransformation: + def __init__( + self, + *, + enable_graph_optimization: bool = False, + enable_inductor: bool = False, + dump_graphs: bool = False, + ) -> None: + self.enable_graph_optimization = enable_graph_optimization + self.enable_inductor = enable_inductor + self.dump_graphs = dump_graphs + + def __call__(self, gm: fx.GraphModule) -> Callable: + if self.dump_graphs: + graph_folder = dump_graphs_to_files( + {"before_transformation_gm": gm.print_readable(False)} + ) + enable_graph_optimization_dump(graph_folder) + + iter_gm = IterGraphModule(gm, enable_inductor=self.enable_inductor) + if self.enable_graph_optimization: + comm_fusion_with_concat(iter_gm, 100) + schedule_comm_wait(iter_gm) + remove_copy_from_optimizer(iter_gm) + # Must be called after we are not going to move the graphs + iter_gm.finalize_setup() + + if self.dump_graphs: + dump_graphs_to_files( + { + "iter_graph_setup_gm": iter_gm.setup_gm.print_readable(False), + "iter_graph_main_gm": iter_gm.main_gm.print_readable(False), + "iter_graph_cleanup_gm": iter_gm.cleanup_gm.print_readable(False), + }, + graph_folder, # type: ignore[possibly-undefined] + ) + + return iter_gm diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_optimization.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..10423fb55cd4c4a253c01104e87f6209e2f9f83c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_optimization.py @@ -0,0 +1,986 @@ +# Owner(s): ["oncall: distributed"] +import collections +import itertools +import logging +import operator +import tempfile +import time +from dataclasses import dataclass, field +from functools import wraps +from typing import ( + Any, + Callable, + cast, + DefaultDict, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +import torch +import torch.fx as fx +from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode +from torch.distributed._spmd.graph_utils import ( + CommType, + dump_graphs_to_files, + find_node, + get_output, + OP, +) +from torch.distributed._spmd.iter_graph_module import IterGraphModule +from torch.fx.passes.shape_prop import TensorMetadata +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_unflatten + +logger: logging.Logger = logging.getLogger("graph_optimization") +aten = torch.ops.aten +fake_tensor_mode = FakeTensorMode() + +_optimized_func: Set[str] = set() +# The key is the target pass and the value is the prerequisites of the pass. +_prerequisite_sets: DefaultDict[str, Set[str]] = collections.defaultdict(set) +# The key is the target pass and the value is the passes that must applied before +# the key. +_apply_before_sets: DefaultDict[str, Set[str]] = collections.defaultdict(set) +_dump_graph_folder: str = "" + + +def enable_graph_optimization_dump(folder: str = ""): + global _dump_graph_folder + if not folder: + folder = tempfile.mkdtemp() + _dump_graph_folder = folder + + +# TODO(@fegin): Support multiple runs of graph optimization +# TODO(@fegin): With this design, circular imports will happen when a pass +# developer accidentally create a pass dependency cycle. As a result, we need to +# break this file into a finer granularity to avoid incorrect circular import. +def graph_optimization_pass( + prerequisites: Iterable[Callable], + apply_after: Iterable[Callable], +) -> Callable: + """Define the contract of a graph optimization pass. + + All the passes should be wrapped with this decorator. + `prerequisites` is used to annotate the prerequisite passes of the this pass. + `apply_after` means that this wrapped pass must be applied after the passes + in `apply_after`. The difference between `prerequisites` and `apply_after` + is that all the passes in `prerequisites` must be applied to the graph and + must be applifed before the wrapped pass while the passes `apply_after` are + optional. But if a pass in `apply_after` is applied to the graph, it has to + be done before the wrapped pass. + Optimizer pass developers are required to add these fields accordingly and + users need to follow the restrictions to avoid the assert. + + Current design has one limitation: users can only apply the optimizations + once. In some cases, we may need to run multiple the same optimization + multiple time, e.g., optimization passes -> profiling the result -> apply + optimization passes with the profiling result again. This limitation will be + addressed limitation in the future. + + Args: + prerequisites (Iterable[Callable]): the list of string to the names of + passes which are the prerequisites of this pass. + apply_after (Iterable[Callable]): the list of string to the names of + passes that can not be applied after the wrapped pass. + """ + + def inner(func: Callable) -> Callable: + def make_key(func: Callable) -> str: + return f"{func.__module__}.{func.__name__}" + + func_key = make_key(func) + _prerequisite_sets[func_key] = {make_key(f) for f in prerequisites} + for apply_after_pass in apply_after: + _apply_before_sets[make_key(apply_after_pass)].add(func_key) + + @wraps(func) + def pass_wrapper( + gm: Union[fx.GraphModule, IterGraphModule], *args: Any, **kwargs: Any + ) -> None: + begin = time.time() + assert isinstance(gm, (fx.GraphModule, IterGraphModule)), ( + "The first argument of the pass must be either " + "fx.GraphModule or IterGraphModule." + ) + assert func_key not in _optimized_func, f"Cannot apply {func_key} twice." + invalid_passes = _apply_before_sets[func_key].intersection(_optimized_func) + assert ( + not invalid_passes + ), f"{invalid_passes} must be applied after {func_key}." + assert _prerequisite_sets[func_key].issubset(_optimized_func), ( + f"{_prerequisite_sets[func_key] - _optimized_func} are the " + f"prerequisites of {func_key} but are not applified. " + f"Applied passes are {_optimized_func}." + ) + + func(gm, *args, **kwargs) + gm.graph.lint() + gm.graph.eliminate_dead_code() + gm.recompile() + _optimized_func.add(func_key) + + prefix = f"after_{func.__name__}" + if _dump_graph_folder: + if isinstance(gm, IterGraphModule): + dump_graphs_to_files( + { + f"{prefix}_setup_gm": gm.setup_gm, + f"{prefix}_main_gm": gm.main_gm, + f"{prefix}_cleanup_gm": gm.cleanup_gm, + }, + _dump_graph_folder, + ) + else: + dump_graphs_to_files({prefix: gm}, _dump_graph_folder) + + logger.info("Spent %f seconds applying %s", time.time() - begin, func_key) + + return pass_wrapper + + return inner + + +@dataclass(unsafe_hash=True) +class CommBlock: + shape: Optional[torch.Size] + node_list: List[fx.Node] + inputs: List[fx.Node] + wait_nodes: List[fx.Node] + comm_node: fx.Node + outputs: Set[fx.Node] + + +def get_comm_block(comm_node: fx.Node) -> CommBlock: + """Find out all the nodes belong to this communcation given a collective node (e.g., allreduce). + + Args: + comm_node(fx.Node): The target communication/collective node. + + Returns: + The CommBlock that encapsulates the related nodes (e.g., wait_node) of + the given comm_node. + """ + # We choose 5 to prevent some accidents that cause infinite loop. But + # with functional collective, the distance is 1. + MAX_WAIT_DISTANCE = 5 + node_list = [] + wait_nodes = [] + inputs = pytree.arg_tree_leaves(*comm_node.args, **comm_node.kwargs) + input_nodes = [inp for inp in inputs if isinstance(inp, fx.Node)] + distance = 0 + wait_prefixes = ("wait_comm", "wait_tensor") + non_end_users_nodes = ("split", "reshape", "getitem", "detach", "alias") + + nodes = collections.deque([comm_node, None]) + while nodes and distance < 5: + node = nodes.popleft() + if node is None: + distance += 1 + if nodes: + nodes.append(None) + continue + node_list.append(node) + if node.name.startswith(wait_prefixes): + wait_nodes.append(node) + else: + for child in node.users: + if isinstance(child, fx.Node): + nodes.append(child) + + if not wait_nodes: + raise RuntimeError( + "The wait nodes are too far away from the comm node {comm_node}." + ) + + # Identify all the outputs of this collective block. + outputs: Set[fx.Node] = set() + nodes = collections.deque(wait_nodes) + while nodes: + node = nodes.popleft() + assert node is not None + for user in node.users: + if isinstance(user, fx.Node) and user.name.startswith(non_end_users_nodes): + nodes.append(user) + node_list.append(user) + else: + outputs.add(node) + break + + # TODO: populate all the tensor metadata and remove the default. + tensor_meta = input_nodes[0].meta.get("tensor_meta", None) + return CommBlock( + # TODO: support symbolic shapes + shape=torch.Size(int(s) for s in tensor_meta.shape) if tensor_meta else None, + node_list=node_list, + wait_nodes=wait_nodes, + comm_node=comm_node, + inputs=input_nodes, + outputs=outputs, + ) + + +def get_all_comm_blocks( + gm: IterGraphModule, comm_ops: Union[Tuple[str, ...], str] +) -> List[CommBlock]: + return [ + get_comm_block(node) + for node in gm.graph.nodes + if node.name.startswith(comm_ops) + ] + + +def _create_meta_val( + fake_tensor_mode: FakeTensorMode, + val: FakeTensor, +) -> FakeTensor: + # TODO: fix the memory_format + return FakeTensor( + fake_tensor_mode, + torch.empty( + val.shape, + dtype=val.dtype, + device="meta", + requires_grad=val.requires_grad, + ), + val.device, + ) + + +def _create_meta_tensor_meta( + fake_tensor_mode: FakeTensorMode, + val: FakeTensor, +) -> TensorMetadata: + return TensorMetadata( + shape=val.shape, + dtype=val.dtype, + requires_grad=val.requires_grad, + stride=val.stride, # type: ignore[arg-type] + # TODO: fix these value + memory_format=None, + is_quantized=False, + qparams={}, + ) + + +def _call_function( + gm: IterGraphModule, + fake_tensor_mode: FakeTensorMode, + meta_val: Optional[FakeTensor], + function: Any, + *args: Any, + **kwargs: Any, +) -> fx.Node: + node = gm.graph.call_function(function, args, kwargs) + + if meta_val is None: + flat_args, spec = tree_flatten((args, kwargs)) + new_flat_args = [] + memory_format = None + for arg in flat_args: + if not isinstance(arg, fx.Node): + new_flat_args.append(arg) + continue + val = arg.meta["val"] + new_flat_args.append(_create_meta_val(fake_tensor_mode, val)) + + fake_args, fake_kwargs = tree_unflatten(new_flat_args, spec) + new_meta_val = function(*fake_args, **fake_kwargs) + else: + new_meta_val = meta_val + node.meta["val"] = new_meta_val + node.meta["tensor_meta"] = _create_meta_tensor_meta(fake_tensor_mode, new_meta_val) + return node + + +def _scatter_wait_result( + gm: IterGraphModule, + fused_comm_block: CommBlock, + comm_blocks: List[CommBlock], + node_indices: Dict[fx.Node, int], +) -> None: + """Scatter the result of the fused communication node to the original users -- splitting the output and reshape each subitem.""" + last_wait_node_idx = 0 + for node in gm.graph.nodes: + if node == fused_comm_block.comm_node: + break + last_wait_node_idx = max( + node_indices.get(node, last_wait_node_idx), last_wait_node_idx + ) + + fused_comm_node = fused_comm_block.comm_node + fused_wait_node = fused_comm_block.wait_nodes[0] + + with gm.graph.inserting_after(fused_wait_node): + split_node = gm.graph.call_function( + aten.split, + ( + fused_wait_node, + # TODO(@fegin): support symbolic shapes + [int(cast(torch.Size, cb.shape).numel()) for cb in comm_blocks], + ), + ) + + # Scatter the split result. + need_sort_nodes = [] + last_split_reshape_node = split_node + with gm.graph.inserting_after(split_node): + for idx, comm_block in enumerate(comm_blocks): + # Some users of the original allreduce and wait are scheduled + # before the fused allreduce. We must move these users to a + # correct topological sort order -- right after the last fused + # allreduce result, the `last_split_reshape_node` variable. + orig_wait = comm_block.wait_nodes[0] + nodes = collections.deque(list(orig_wait.users)) + while nodes: + user_node = nodes.popleft() + if not isinstance(user_node, fx.Node): + continue + if node_indices[user_node] < last_wait_node_idx: + need_sort_nodes.append(user_node) + nodes.extend(list(user_node.users)) + + split_idx_node = gm.graph.call_function(operator.getitem, (split_node, idx)) + with gm.graph.inserting_after(split_idx_node): + wait_output_node = gm.graph.call_function( + aten.reshape, (split_idx_node, comm_block.shape) + ) + gm.graph.node_replace_all_uses_with(orig_wait, wait_output_node) + + if last_split_reshape_node == split_node: + last_split_reshape_node = wait_output_node # type: ignore[possibly-undefined] + + need_sort_nodes = sorted(need_sort_nodes, key=lambda node: node_indices[node]) + gm.graph.move_after(need_sort_nodes, last_split_reshape_node) + + gm.graph.eliminate_dead_code() + + +def _fuse_with_cat( + gm: IterGraphModule, + comm_blocks: List[CommBlock], + node_indices: Dict[fx.Node, int], +) -> CommBlock: + """Fuse the CommBlocks using concat given a list of CommBlock (only allreduce).""" + # Find the last input node. + last_input_node = comm_blocks[0].inputs[0] + last_input_index = -1 + all_input_nodes = [] + for comm_block in comm_blocks: + input_node = comm_block.inputs[0] + # If the input node is a clone, this is CommTensor based implementation. + if input_node.name.startswith("clone"): + input_node = cast(fx.Node, input_node.args[0]) + all_input_nodes.append(input_node) + index = node_indices[input_node] + if index >= last_input_index: + assert index != last_input_index + last_input_node = input_node + last_input_index = index + + # Flatten all the inputs right after the last input is ready. + with gm.graph.inserting_after(last_input_node): + cat_inputs = [] + for input_node in all_input_nodes: + cat_inputs.append( + _call_function( + gm, fake_tensor_mode, None, aten.flatten.using_ints, input_node + ) + ) + + with gm.graph.inserting_after(cat_inputs[0]): + cat_node = _call_function(gm, fake_tensor_mode, None, aten.cat, cat_inputs) + + # Create a new Comm node. + last_comm = comm_blocks[-1] + last_comm_node = last_comm.comm_node + last_wait_node = last_comm.wait_nodes[0] + with gm.graph.inserting_after(cat_node): + flatten_args, spec = tree_flatten((last_comm_node.args, last_comm_node.kwargs)) + flatten_args[0] = cat_node + args, kwargs = tree_unflatten(flatten_args, spec) + fused_comm_node = _call_function( + gm, + fake_tensor_mode, + cat_node.meta["val"], + last_comm_node.target, + *args, + **kwargs, + ) + + # Create a new Wait node. + with gm.graph.inserting_after(fused_comm_node): + flatten_args, spec = tree_flatten((last_wait_node.args, last_wait_node.kwargs)) + flatten_args[0] = fused_comm_node + args, kwargs = tree_unflatten(flatten_args, spec) + fused_wait_node = _call_function( + gm, + fake_tensor_mode, + cat_node.meta["val"], + last_wait_node.target, + *args, + **kwargs, + ) + + # Move the fused_comm_node and its args to right after the source node + nodes_to_move = cat_inputs + [cat_node, fused_comm_node, fused_wait_node] + gm.graph.move_after(nodes_to_move, last_input_node) + + tensor_meta = cat_node.meta.get("tensor_meta") + fused_comm_block = CommBlock( + shape=tensor_meta.shape, # type: ignore[union-attr] + node_list=[fused_comm_node, fused_wait_node], + wait_nodes=[fused_wait_node], + comm_node=fused_comm_node, + inputs=[cat_node], + outputs={fused_wait_node}, + ) + + _scatter_wait_result(gm, fused_comm_block, comm_blocks, node_indices) + + return fused_comm_block + + +def _expedite_comm_ops(gm: IterGraphModule, comm_blocks: List[CommBlock]) -> None: + node_indices = {node: i for i, node in enumerate(gm.graph.nodes)} + for comm_block in comm_blocks: + last_input = comm_block.comm_node + last_input_idx = -1 + for input in comm_block.inputs: + input_idx = node_indices[input] + if input_idx > last_input_idx: + last_input = input + last_input_idx = input_idx + gm.graph.node_append(last_input, comm_block.comm_node) + + +@graph_optimization_pass( + prerequisites=[], + apply_after=[], +) +def comm_fusion_with_concat( + gm: IterGraphModule, + bucket_size_mb: int, +) -> None: + """Run fuse communication with concat. + + This implementation uses concat to concat the bucketed gradients. + """ + comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce")) + # First ensure the allreduce are scheduled immediately right after the gradients. + _expedite_comm_ops(gm, comm_blocks) + # Get the comm_blocks based on the new order. + comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce")) + node_indices = {node: i for i, node in enumerate(gm.graph.nodes)} + + bucket_size = 1 * 1024**2 + bucket_cap_size = bucket_size_mb * 1024**2 + begin = end = curr_size = 0 + while end < len(comm_blocks): + # TODO: determine the dtype + curr_size += cast(torch.Size, comm_blocks[end].shape).numel() * 4 + end += 1 + if curr_size < bucket_size: + continue + _fuse_with_cat(gm, comm_blocks[begin:end], node_indices) + bucket_size = bucket_cap_size + begin = end + curr_size = 0 + else: + if begin < len(comm_blocks): + _fuse_with_cat(gm, comm_blocks[begin:end], node_indices) + + +@graph_optimization_pass( + prerequisites=[comm_fusion_with_concat], + apply_after=[], +) +def schedule_comm_wait(gm: IterGraphModule) -> None: + """Delay the execution of wait tensors of allreduce until its first user.""" + comm_blocks = get_all_comm_blocks(gm, (CommType.ALLREDUCE, "all_reduce")) + + # Find all the end users. + allreduce_users: Set[fx.Node] = set() + for allreduce in comm_blocks: + for output in allreduce.outputs: + allreduce_users.update(output.users) + + node_indices = {node: i for i, node in enumerate(gm.graph.nodes)} + for allreduce in comm_blocks: + # Find the earliest users. + assert ( + len(allreduce.outputs) >= 1 + ), f"Found a allreduce that has zero outputs/users -- {allreduce}." + # Initialize the target_node to be the first user of the first output. + target_node = next(iter(next(iter(allreduce.outputs)).users)) + target_node_index = 2**31 + for user in (user for output in allreduce.outputs for user in output.users): + index = node_indices[user] + if index < target_node_index: + target_node = user + target_node_index = index + + # Move wait nodes and all the subsequent output nodes before the + # earliest user. + wait_idx = -1 + for wait_idx, node in enumerate(allreduce.node_list): + if node == allreduce.wait_nodes[0]: + break + assert wait_idx >= 0 + gm.graph.move_before(allreduce.node_list[wait_idx:], target_node) + + +@graph_optimization_pass( + prerequisites=[], + apply_after=[], +) +def remove_copy_from_optimizer(gm: IterGraphModule) -> None: + """Erase the orphant copy_ that generated when tracing optimizer. + + Two reasons why we could not simply use the DCE of fx.Graph. + 1. fx.Graph treats copy_ as a side-effect node and does not erase it. + 2. Users may want to preserve some orphan `copy_` that is not from the + optimizer. + If the second reason does not hold, this pass can be rewritten as using + DCE from fx.Graph (with the overwrite to the side-effect node list). + """ + MAX_COPY_DISTANCE = 5 + remove_candidates: Set[fx.Node] = set() + for node in reversed(gm.graph.nodes): + if node.users: + continue + if node.op != OP.CALL_FUNCTION or node.target != aten.copy_.default: + continue + + copy_ancestors: Set[fx.Node] = set() + nodes = collections.deque([node, None]) + distance = 0 + should_remove = False + while nodes and distance < MAX_COPY_DISTANCE: + visiting = nodes.popleft() + if visiting is None: + distance += 1 + if nodes: + nodes.append(None) + continue + copy_ancestors.add(visiting) + if visiting.op == OP.CALL_FUNCTION and str(visiting.target).startswith( + ("aten._foreach_", "aten._fused_") + ): + should_remove = True + parents = pytree.arg_tree_leaves(*visiting.args, **visiting.kwargs) + for parent in parents: + if isinstance(parent, fx.Node): + nodes.append(parent) + if should_remove: + # We add all ancestors to the list and it is okay as not all of + # them will be erased -- only those nodes with zero users will be + # erased. + remove_candidates.update(copy_ancestors) + + for node in reversed(gm.graph.nodes): + if node.users: + continue + if node not in remove_candidates: + continue + gm.graph.erase_node(node) + + +# The args list of fused_adam function. We don't care about kwargs. +AdamArgs = collections.namedtuple( + "AdamArgs", + ["params", "grads", "exp_avgs", "exp_avg_sqs", "max_exp_avg_sqs", "state_steps"], +) + + +# TODO(fegin): Have a template class for all Block class. +@dataclass(unsafe_hash=True) +class FusedAdamBlock: + optim_node: fx.Node + generate_output: bool + # The output list of the copy nodes. The order follows the argument order. + param_outputs: List[fx.Node] = field(default_factory=list) + grad_outputs: List[fx.Node] = field(default_factory=list) + exp_avgs_outputs: List[fx.Node] = field(default_factory=list) + exp_avg_sqs_outputs: List[fx.Node] = field(default_factory=list) + # TODO(fegin): populate/generate the max_exp_avg_sqs if exists + max_exp_avg_sqs: List[fx.Node] = field(default_factory=list) + + def generate_outputs(self): + # Iterate all the args and generate the corresponding output lists. + # Assuming the corrsesponding output nodes are not created yet. + def _generate_outputs(arg_idx, output_list): + graph = self.optim_node.graph + with graph.inserting_after(self.optim_node): + optim_getitem = graph.call_function( + operator.getitem, (self.optim_node, arg_idx) + ) + for i, arg in enumerate(self.optim_node.args[arg_idx]): + with graph.inserting_after(optim_getitem): + updated_arg = graph.call_function( + operator.getitem, (optim_getitem, i) + ) + with graph.inserting_after(updated_arg): + output_copy = graph.call_function(aten.copy_, (arg, updated_arg)) + output_list.append(output_copy) + + _generate_outputs(0, self.param_outputs) + # Do not generate gradient out list as it is not used. + _generate_outputs(2, self.exp_avgs_outputs) + _generate_outputs(3, self.exp_avg_sqs_outputs) + + def populate_outputs(self): + # Populate the existing output lists from the graph. + def _populate_outputs(args_idx, output_list): + optim_getitem = self.optim_node + for user in self.optim_node.users: + assert ( + user.target == operator.getitem + ), f"The user of {self.optim_node} is not getitem." + if user.args[1] == args_idx: + optim_getitem = user + break + assert ( + optim_getitem != self.optim_node + ), f"Cannot find the getitem node for {self.optim_node}" + output_list.extend( + [self.optim_node] * len(cast(List[fx.Node], self.optim_node.args[0])) + ) + for updated_arg in optim_getitem.users: + assert ( + updated_arg.target == operator.getitem + ), f"Unexpected node target {updated_arg.target}." + idx = updated_arg.args[1] + output_copy = next(iter(updated_arg.users)) + assert str(output_copy.target).startswith( + "aten.copy_" + ), f"Unexpected node target {output_copy.target}." + output_list[idx] = output_copy + for i, output in enumerate(output_list): + assert output != self.optim_node, f"{i}th output is not replaced." + + assert output_list, f"The output for {self.optim_node} is empty." + + _populate_outputs(0, self.param_outputs) + _populate_outputs(2, self.exp_avgs_outputs) + _populate_outputs(3, self.exp_avg_sqs_outputs) + + def __post_init__(self): + if self.param_outputs: + return + if self.generate_output: + self.generate_outputs() + else: + self.populate_outputs() + + +@dataclass(unsafe_hash=True) +class ForeachAddBlock: + add_node: fx.Node + generate_output: bool + # The output list of the copy nodes. The order follows the argument order. + outputs: List[fx.Node] = field(default_factory=list) + + def generate_outputs(self): + # Iterate all the args and generate the corresponding output lists + # Assuming the corrsesponding output nodes are not created yet. + graph = self.add_node.graph + for i, arg in enumerate(cast(Tuple[Any, ...], self.add_node.args[0])): + with graph.inserting_after(self.add_node): + updated_arg = graph.call_function(operator.getitem, (self.add_node, i)) + with graph.inserting_after(updated_arg): + output_copy = graph.call_function(aten.copy_, (arg, updated_arg)) + self.outputs.append(output_copy) + assert self.outputs, f"The output for {self.add_node} is empty." + + def populate_outputs(self): + # Populate the existing output lists from the graph. + self.outputs = [ + self.add_node for _ in cast(Tuple[Any, ...], self.add_node.args[0]) + ] + for updated_arg in self.add_node.users: + assert ( + updated_arg.target == operator.getitem + ), f"Unexpected node target {updated_arg.target}" + idx = cast(int, updated_arg.args[1]) + output_copy = next(iter(updated_arg.users)) + assert str(output_copy.target).startswith( + "aten.copy_" + ), f"The execpted output node is different, {str(output_copy.target)}" + self.outputs[idx] = output_copy + for i, output in enumerate(self.outputs): + assert output != self.add_node, f"{i}th output is not replaced." + + def __post_init__(self): + if self.outputs: + return + + if self.generate_output: + self.generate_outputs() + else: + self.populate_outputs() + + +@dataclass(unsafe_hash=True) +class FusedOptimizerBlock: + step: ForeachAddBlock + optim: FusedAdamBlock + + +def get_fused_optimizer_block(optim_node: fx.Node) -> FusedOptimizerBlock: + """Given a fused optimizer node and return the FusedOptimizerBlock.""" + MAX_STEP_DISTANCE = 5 + # Find the step (foreach_add) + nodes = collections.deque([optim_node, None]) + step_node = optim_node + distance = 0 + while nodes and distance < MAX_STEP_DISTANCE: + node = nodes.popleft() + if node is None: + distance += 1 + if nodes: + nodes.append(None) + continue + elif node.op == OP.CALL_FUNCTION and str(node.target).startswith( + "aten._foreach_add" + ): + step_node = node + break + else: + nodes.extend( + a + for a in pytree.arg_tree_leaves(*node.args, **node.kwargs) + if isinstance(a, fx.Node) + ) + if step_node == optim_node: + raise RuntimeError( + "Cannot find step node (foreach_add) for the optimizer node " + f"{optim_node} with {MAX_STEP_DISTANCE} BFS distance. " + "The API design does not match the tracing graph." + ) + + step = ForeachAddBlock(step_node, generate_output=False) + optim = FusedAdamBlock(optim_node, generate_output=False) + return FusedOptimizerBlock(step, optim) + + +def get_all_fused_optimizer_blocks( + gm: IterGraphModule, optim_ops: Union[Tuple[str, ...], str] +) -> List[FusedOptimizerBlock]: + """Find all the FusedOptimizerBlock that the optimizer operators are in `optim_ops`.""" + return [ + get_fused_optimizer_block(node) + for node in gm.graph.nodes + if node.name.startswith(optim_ops) + ] + + +def _split_fused_adam( + gm: IterGraphModule, + orig_optim_block: FusedOptimizerBlock, + split_gradients: Set[fx.Node], +) -> Tuple[FusedOptimizerBlock, FusedOptimizerBlock]: + """Split the `orig_optim_block` into two FusedOptimizerBlock. + + The first one will be the optimizer that optimize `split_gradients`. The second one is + used to optimize the remaining gradients. + An assert will be raised if one of the optimizer optimize zero gradients. + """ + orig_optim_args = AdamArgs(*orig_optim_block.optim.optim_node.args) + optim_args = (AdamArgs([], [], [], [], [], []), AdamArgs([], [], [], [], [], [])) + # The only hint we can use to split the optimizer is the order/indices. + orig_optim_indices: Tuple[List[int], List[int]] = ([], []) + orig_step_indices: Tuple[List[int], List[int]] = ([], []) + + for idx, gradient in enumerate(orig_optim_args.grads): + group_idx = 0 if gradient in split_gradients else 1 + orig_optim_indices[group_idx].append(idx) + # Get the argument for idx-th gradient from orig_optim_args + for orig_arg, optim_arg in zip(orig_optim_args, optim_args[group_idx]): + # Only add the argument to the list if the original argument list + # is not empty. If the original argument list is empty, the new + # one must be an empty list as well. + if orig_arg: + optim_arg.append(orig_arg[idx]) + + # If argument order of step is the same as optimizer, nothing has to be + # done. However, it is risky to rely on this assumption so we populate + # the orig_step_indices. + orig_step_output = optim_args[group_idx].state_steps[-1] + assert str(orig_step_output.target).startswith( + "aten.copy_" + ), f"The copy output is {orig_step_output.target}, expect aten.copy_" + orig_step_getitem = orig_step_output.args[1] + assert "getitem" in str( + orig_step_getitem.target + ), f"The copy getitem is {orig_step_getitem.target}, expect operator.getitem" + orig_step_idx = orig_step_getitem.args[1] + orig_step_indices[group_idx].append(orig_step_idx) + + if not all(l for l in (orig_step_indices + orig_optim_indices)): + raise ValueError("At least one split optimizer does not have input.") + + output = get_output(gm.graph) + results: List[FusedOptimizerBlock] = [] + flatten_output_args, spec = tree_flatten((output.args, output.kwargs)) + flatten_output_args_indices: DefaultDict[ + fx.Node, Set[int] + ] = collections.defaultdict(set) + for idx, output_arg in enumerate(flatten_output_args): + if isinstance(output_arg, fx.Node): + flatten_output_args_indices[output_arg].add(idx) + + def replace_flatten_output_args(orig_node: fx.Node, new_node: fx.Node): + for idx in flatten_output_args_indices[orig_node]: + flatten_output_args[idx] = new_node + + # Create the new step and optim nodes and blocks. + for group_idx in range(2): + step_args: List[fx.Node] = [] + orig_step_outputs: List[fx.Node] = [] + # We have to create the new step node and block first because it is used + # for the new optim node as the input. + with gm.graph.inserting_after(orig_optim_block.optim.optim_node): + for idx in orig_step_indices[group_idx]: + step_args.append( + cast(Tuple[fx.Node, ...], orig_optim_block.step.add_node.args[0])[ + idx + ] + ) + orig_step_outputs.append(orig_optim_block.step.outputs[idx]) + step = gm.graph.call_function( + aten._foreach_add.Scalar, + (step_args, 1), + ) + step_block = ForeachAddBlock(step, generate_output=True) + for i, step_output in enumerate(step_block.outputs): + # Replace the original step output in the graph output node with + # the new one. + orig_step_output = orig_step_outputs[i] + replace_flatten_output_args(orig_step_output, step_output) + # Also need to replace the step output used for the new optimizer. + assert optim_args[group_idx].state_steps[i] == orig_step_output, ( + f"The expected step output node mismatched, {orig_step_output} " + f"{optim_args[group_idx].state_steps[i]}" + ) + optim_args[group_idx].state_steps[i] = step_output + + # Insert the optimizer node after the first step output because its + # topo sort order is the last. + with gm.graph.inserting_after(step_block.outputs[0]): + optim = gm.graph.call_function( + aten._fused_adam.default, + optim_args[group_idx], + orig_optim_block.optim.optim_node.kwargs, + ) + optim_block = FusedAdamBlock(optim, generate_output=True) + for curr_idx, orig_idx in enumerate(orig_optim_indices[group_idx]): + list_names = ("param_outputs", "exp_avgs_outputs", "exp_avg_sqs_outputs") + for name in list_names: + orig_list = getattr(orig_optim_block.optim, name) + curr_list = getattr(optim_block, name) + replace_flatten_output_args(orig_list[orig_idx], curr_list[curr_idx]) + + results.append(FusedOptimizerBlock(step_block, optim_block)) + + # Optimizer is used as the output of the train_step. Therefore, we have to + # update the output node of the graph. + output_args, output_kwargs = tree_unflatten(flatten_output_args, spec) + gm.graph.node_set_args(output, output_args) + gm.graph.node_set_kwargs(output, output_kwargs) + # Remove the original copy_ nodes as they won't be DCE. + for copy_output in itertools.chain( + orig_optim_block.optim.param_outputs, + orig_optim_block.optim.exp_avgs_outputs, + orig_optim_block.optim.exp_avg_sqs_outputs, + ): + gm.graph.erase_node(copy_output) + # Call DCE once to get rid of the old optimizer. By doing so, we will be + # able to erase the copy_ nodes of step later. + gm.graph.eliminate_dead_code() + for copy_output in orig_optim_block.step.outputs: + gm.graph.erase_node(copy_output) + # This is not required but calling this for consistency. + gm.graph.eliminate_dead_code() + + return results[0], results[1] + + +def split_fused_optimizer( + gm: IterGraphModule, + optim_block: FusedOptimizerBlock, + split_gradients: Set[fx.Node], +) -> Tuple[FusedOptimizerBlock, FusedOptimizerBlock]: + if not split_gradients: + raise ValueError("The given split_gradients is empty.") + if str(optim_block.optim.optim_node.target).startswith("aten._fused_adam"): + return _split_fused_adam(gm, optim_block, split_gradients) + else: + raise NotImplementedError("Only fused_adam is supported now") + + +# TODO(fegin): The API only support fused adam now. Should extend it to support +# foreach as well. +@graph_optimization_pass( + prerequisites=[remove_copy_from_optimizer], + apply_after=[schedule_comm_wait], +) +def iter_move_grads_and_optimizers( + gm: IterGraphModule, + target_comm_node: str, + target_dest_node: str, +) -> None: + """Extract a comm block and split out a new optimizer and step for it. + + This subgraph is then moved to the forward graph. + """ + for comm_block in get_all_comm_blocks(gm, "all_reduce"): + if comm_block.comm_node.name == target_comm_node: + break + else: + raise ValueError(f"Cannot find {target_comm_node}") + + optim_blocks = get_all_fused_optimizer_blocks(gm, "_fused_adam") + for optim_block in optim_blocks: + optim_args = AdamArgs(*optim_block.optim.optim_node.args) + one_output = next(iter(comm_block.outputs)) + if one_output in optim_args.grads: + break + else: + raise ValueError(f"{target_comm_node} is not used by any fused optimizer.") + + move_optim, _ = split_fused_optimizer(gm, optim_block, comm_block.outputs) + + move_nodes = find_all_descendants( + gm, [comm_block.comm_node, move_optim.step.add_node] + ) + + stop_node = find_node(gm.graph, lambda n: n.name == target_dest_node)[0] + + gm.graph.move_to_next_iter_before(move_nodes, stop_node) + + +def find_all_descendants( + gm: IterGraphModule, + parent_nodes: List[fx.Node], +) -> List[fx.Node]: + """Identify the list of nodes to move during FX graph transformation.""" + assert len(parent_nodes) > 0, "No parent nodes are given." + + output = get_output(gm.graph) + dq_parent_nodes = collections.deque(parent_nodes) + move_node_set = set() + while dq_parent_nodes: + node = dq_parent_nodes.popleft() + move_node_set.add(node) + dq_parent_nodes += [ + u for u in node.users if isinstance(u, fx.Node) and u != output + ] + move_nodes = [node for node in gm.graph.nodes if node in move_node_set] + + return move_nodes diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d48e1c0e762a7ca89bd3e83a1fbd4d23d6c72ffe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/graph_utils.py @@ -0,0 +1,145 @@ +import logging +import os +import tempfile +from enum import Enum +from typing import Callable, cast, Dict, Iterable, List, Set + +import torch.fx as fx +from torch.fx.passes.shape_prop import TensorMetadata +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_unflatten + + +logger: logging.Logger = logging.getLogger("graph_utils") + + +class OP(str, Enum): + CALL_FUNCTION = "call_function" + CALL_MODULE = "call_module" + CALL_METHOD = "call_method" + GET_ATTR = "get_attr" + OUTPUT = "output" + PLACEHOLDER = "placeholder" + + +class CommType(str, Enum): + ALLREDUCE = "allreduce_" + ALLGATHER = "allgather_" + BROADCAST = "broadcast_" + REDUCESCATTER = "reduce_scatter_" + SCATTER = "scatter_" + + +def get_node_tensor_metadata(node: fx.Node, is_required: bool = True) -> TensorMetadata: + metadata = node.meta.get("tensor_meta", None) + if is_required and metadata is None: + raise RuntimeError( + f"Callsite expects that ``tensor_meta`` exists in ``{node.name}``, " + f"but got None instead. Node: {node.op} {node.name} {node.target}" + ) + return metadata + + +def get_output(graph: fx.Graph) -> fx.Node: + """Take a graphmodule and return the graph output node. + + We traverse in reverse to expedite it, with the idea that last node should be output + """ + for node in reversed(graph.nodes): + if node.op == OP.OUTPUT: + return node + raise RuntimeError(f"Cannot find the output node in {graph}") + + +def find_node( + graph: fx.Graph, predicate: Callable, reverse_order: bool = False +) -> List[fx.Node]: + """Take a predicate and return all the nodes in the `graph` where the predicate holds.""" + nodes = cast(Iterable[fx.Node], graph.nodes) + if reverse_order: + nodes = cast(Iterable[fx.Node], iter(reversed(nodes))) # type: ignore[call-overload] + return [node for node in nodes if predicate(node)] + + +def is_leaf_subgraph(graph: fx.Graph, subgraph: List[fx.Node]) -> bool: + """Ensure nodes in ``subgraph`` satisfy one of the following rules. + + 1. The user of the node is in ``subgraph``. + 2. The user of the node is output. + 3. There are no users -- the node is a side-effect node. + """ + all_nodes: Set[fx.Node] = set(subgraph) + output = get_output(graph) + for node in subgraph: + for user in node.users: + if not isinstance(user, fx.Node): + continue + if user not in all_nodes and user != output: + return False + return True + + +def clone_subgraph( + graph: fx.Graph, subgraph: List[fx.Node], target: fx.Node +) -> List[fx.Node]: + """Clone the given subgraph and insert it before ``target``. + + This API currently does not support inserting after ``target``. + """ + all_nodes = set(subgraph) + mapping: Dict[fx.Node, fx.Node] = dict() + cloned_subgraph = [] + with graph.inserting_before(target): + for node in subgraph: + cloned_node = graph.call_function( + node.target, node.args, node.kwargs, node.type + ) + # TODO: there are many flatten/unflatten in IterGraph that + # can be simplified with tree_map. Will simplify this in + # a follow-up PR. + original_input = pytree.arg_tree_leaves(*node.args, **node.kwargs) + cloned_input, spec = tree_flatten((cloned_node.args, cloned_node.kwargs)) + mapped_cloned_input = [] + for original_input_node, cloned_input_node in zip( + original_input, cloned_input + ): + if ( + isinstance(original_input_node, fx.Node) + and original_input_node in all_nodes + ): + assert original_input_node in mapping + mapped_cloned_input.append(mapping[original_input_node]) + else: + mapped_cloned_input.append(cloned_input_node) + cloned_node.args, cloned_node.kwargs = tree_unflatten( + mapped_cloned_input, spec + ) + mapping[node] = cloned_node + cloned_subgraph.append(cloned_node) + + return cloned_subgraph + + +def rebuild_graph(gm: fx.GraphModule, remove_dead_code: bool = True) -> None: + """Run the required steps to ensure production-ready graph. + + Note - per the fx docs, elimination of dead code is not very precise. + Hence, the flag to make this step optional. + """ + gm.graph.lint() + if remove_dead_code: + gm.graph.eliminate_dead_code() + gm.recompile() + + +def dump_graphs_to_files(graphs: Dict[str, fx.GraphModule], folder: str = "") -> str: + if not folder: + folder = tempfile.mkdtemp() + + for prefix, gm in graphs.items(): + with open(os.path.join(folder, f"{prefix}.graph"), "w") as fp: + fp.write(str(gm)) + + logger.warning("Dump graphs to %s", folder) + + return folder diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/iter_graph_module.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/iter_graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..f1e8e960f361bc18d6268e05e54a261cf23d9c59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/iter_graph_module.py @@ -0,0 +1,762 @@ +import copy +import inspect +import logging +from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type + +import torch.nn as nn +from torch import fx +from torch.distributed._spmd.graph_utils import ( + clone_subgraph, + get_output, + is_leaf_subgraph, +) +from torch.distributed._spmd.partial_lower import partial_lower +from torch.fx.graph import _PyTreeCodeGen, PythonCode +from torch.fx.node import Argument +from torch.profiler import record_function +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_map, tree_map_only, tree_unflatten + + +logger: logging.Logger = logging.getLogger("IterGraphModule") + + +class IterGraph(fx.Graph): + """``IterGraph`` is used to perform cross-iteration optimization. + + ``IterGraph`` keeps track of the 3 graphs, self (the original graph), setup graph, and + cleanup graph. The 3 graphs should be identical copies of a ``fx.Graph``. + + IterGraph subclass fx.Graph to override the necessary APIs that will be used + when constructing a optimization, e.g., communication fusion. IterGraph also + provides APIs that originally belong to fx.Node and all these APIs will have + ``node_`` prefix. For example, ``IterGraph.node_prepend`` is the equivalence + of ``fx.Node.prepend``. Note that all the optimizations must be constructed + using these APIs. + """ + + def __init__( + self, + orig_graph: fx.Graph, + setup_graph: fx.Graph, + cleanup_graph: fx.Graph, + owning_module: Optional[fx.GraphModule] = None, + tracer_cls: Optional[Type["fx.Tracer"]] = None, + tracer_extras: Optional[Dict[str, Any]] = None, + ): + super().__init__(owning_module, tracer_cls, tracer_extras) + + output_vals = self.graph_copy(orig_graph, {}, return_output_node=True) + # TODO: if we do ``deepcopy(_codegen)`` and the input argument contains + # a dictionary with the form of Dict[torch.Tensor, Any], the + # torch.fx._pytree.treen_flatten_spec will not be able to flatten the + # dict -- the torch.Tensor will be duplicated because the _input_spec + # will save the ``keys`` of a dictionary (the values are not saved). + self._codegen = copy.deepcopy(orig_graph._codegen) + assert isinstance(output_vals, tuple) + output_val, old_output_val = output_vals + super().output(output_val, type_expr=getattr(old_output_val, "type", None)) + + self.setup_graph = setup_graph + self.cleanup_graph = cleanup_graph + self._all_graphs: Tuple[fx.Graph, ...] = ( + self.setup_graph, + self.cleanup_graph, + cast(fx.Graph, super()), + ) + + self._setup_mapping: Dict[fx.Node, fx.Node] = {} + self._cleanup_mapping: Dict[fx.Node, fx.Node] = {} + self._freeze_cross_iter_movement = False + self._cross_iter_block_count = 0 + + for node, setup_node, cleanup_node in zip( + self.nodes, self.setup_graph.nodes, self.cleanup_graph.nodes + ): + self._setup_mapping[node] = setup_node + self._cleanup_mapping[node] = cleanup_node + + self.num_extra_output = 0 + + def _lookup_node(self, node: fx.Node, graph: fx.Graph) -> Optional[fx.Node]: + if graph == self.setup_graph: + return self._setup_mapping.get(node, None) + elif graph == self.cleanup_graph: + return self._cleanup_mapping.get(node, None) + return node + + def _fx_graph_call( + self, graph: fx.Graph, func: str, *args: Any, **kwargs: Any + ) -> Any: + fx_graph: fx.Graph = graph if graph != self else cast(fx.Graph, super()) + return getattr(fx_graph, func)(*args, **kwargs) + + def _insert_context(self, func: str, node: fx.Node): + class _InsertPoint: + def __init__(self, insert_points: List[Any]): + self.insert_points = insert_points + + def __enter__(self): + pass + + def __exit__(self, type, value, tb): + for insert_point in self.insert_points: + insert_point.__exit__(type, value, tb) + + insert_points = [] + for graph in self._all_graphs: + if node: + actual_node = self._lookup_node(node, graph) + assert actual_node is not None, "Cannot handle None case now." + else: + actual_node = node + insert_points.append(getattr(graph, func)(actual_node)) + + return _InsertPoint(insert_points) + + def inserting_after(self, node): + if self._freeze_cross_iter_movement: + return super().inserting_after(node) + return self._insert_context("inserting_after", node) + + def inserting_before(self, node): + if self._freeze_cross_iter_movement: + return super().inserting_before(node) + return self._insert_context("inserting_before", node) + + def _forward_subgraph_inputs( + self, subgraph: List[fx.Node], graph: fx.Graph, erase_node: bool + ) -> int: + """Turn the inputs of a subgraph into the extra output of the entire graph. + + If ``erase_node`` is True, the subgraph will be erased from the graph -- essentially forward the inputs + of the subgraph to the output of the graph. + """ + output = get_output(graph) + inputs = [] + all_nodes: Set[fx.Node] = set(subgraph) + + for node in subgraph: + node_inputs = pytree.arg_tree_leaves(*node.args, **node.kwargs) + for _input in node_inputs: + if not isinstance(_input, fx.Node): + continue + if _input in all_nodes: + continue + inputs.append(_input) + + if erase_node: + # We have to remove the node in the reversed order to ensure the + # node has zero users. + erased = set() + for node in reversed(subgraph): + if len(node.users) == 1: + key = next(iter(node.users.keys())) + if key == output: + flatten_args, spec = tree_flatten((output.args, output.kwargs)) + if node not in flatten_args: + # This optimizer node from the legacy _SPMD tracing. + node.users.clear() + elif str(node.target).startswith("aten.copy_"): + # This is the case where the optimizer is + # functionalized with copy_. + for i in range(len(flatten_args)): + if flatten_args[i] == node: + flatten_args[i] = node.args[0] + else: + # We have not figured out semantics of forwarding + # all diff ops. + raise RuntimeError( + f"IterGraph does not how to forward the output of {node}" + ) + output.args, output.kwargs = tree_unflatten(flatten_args, spec) + + # This is the step case where there is a virtual data dependency + # (in-place update) between step and optimizer. And + # functionalize_optim add this dependency + for user in list(node.users.keys()): + if user in erased: + node.users.pop(user) + if node.users: + raise RuntimeError( + "IterGraph has not supported moving the nodes that " + "produce users output result. " + f"Error node: {node}." + ) + self._fx_graph_call(graph, "erase_node", node) + erased.add(node) + + # Add all the extra output nodes into a list and append the list to + # the original output.args[0]. + if self.num_extra_output: + # If the extra-output list already exist, just use it. + cast(List[fx.Node], output.args[0][-1]).extend(inputs) # type: ignore[index] + new_output = output.args[0] + else: + # When adding the extra-output list, out_spec of _PyTreeCodeGen + # must be updated accordingly. + if isinstance(graph._codegen, _PyTreeCodeGen): + codegen = graph._codegen + new_output = list(output.args[0]) # type: ignore[arg-type] + new_output.append(inputs) + assert codegen.pytree_info.out_spec is not None + original_tree_out = tree_unflatten( + cast(List[Any], output.args[0]), codegen.pytree_info.out_spec + ) + # Use None as a placeholder. If we use the extra-output list + # the list will be flatten as well and put into out_spec. + _, out_spec = tree_flatten((original_tree_out, None)) + codegen.pytree_info = codegen.pytree_info._replace(out_spec=out_spec) + else: + new_output = (output.args[0], inputs) + self._fx_graph_call(graph, "erase_node", output) + self._fx_graph_call(graph, "output", new_output) + + logger.info("Extended outputs from the subgraph inputs: %s", str(inputs)) + return len(inputs) + + def _forward_inputs_to_subgraph( + self, subgraph: List[fx.Node], graph: fx.Graph, extra_input: int + ) -> None: + """Create extra input nodes and forward the input nodes to the ``subgraph``. + + The external input nodes of ``subgraph`` (nodes that are not in ``subgraph``) will replaced by the newly + created input nodes. + """ + placeholders = [node for node in graph.nodes if str(node.op) == "placeholder"] + assert placeholders, "No placeholders are found" + # Append the extra input nodes to the current input nodes. + with self._fx_graph_call(graph, "inserting_after", placeholders[-1]): + new_input_nodes = list( + reversed( + [ + self._fx_graph_call( + graph, + "placeholder", + f"cross_iter_input_{self._cross_iter_block_count}_{i}", + ) + for i in reversed(range(extra_input)) + ] + ) + ) + + # Update the inputs of subgraph to use the newly created input nodes. + all_nodes = set(subgraph) + new_input_index = 0 + for node in subgraph: + node_inputs, spec = tree_flatten((node.args, node.kwargs)) + new_node_inputs = [] + for input_node in node_inputs: + if not isinstance(input_node, fx.Node) or input_node in all_nodes: + new_node_inputs.append(input_node) + else: + new_node_inputs.append(new_input_nodes[new_input_index]) + new_input_index += 1 + node.args, node.kwargs = tree_unflatten(new_node_inputs, spec) + assert new_input_index == len( + new_input_nodes + ), f"More inputs than needed {len(new_input_nodes)} > {new_input_index}" + + # Update the in_spec of _PyTreeCodeGen if in_spec is not None (the new + # SPMD makes in_spec as None). + if ( + isinstance(graph._codegen, _PyTreeCodeGen) + and graph._codegen.pytree_info.in_spec is not None + ): + codegen = graph._codegen + original_tree_in = tree_unflatten(placeholders, codegen.pytree_info.in_spec) + _, in_spec = tree_flatten(tuple(list(original_tree_in) + new_input_nodes)) + codegen.pytree_info = codegen.pytree_info._replace(in_spec=in_spec) + for new_input in new_input_nodes: + codegen.pytree_info.orig_args.append(new_input.name) + codegen.pytree_info = codegen.pytree_info._replace(in_spec=in_spec) + + def move_to_next_iter_before( + self, subgraph: List[fx.Node], target_node: fx.Node + ) -> None: + """Move the ``subgraph`` to the next iteration before ``target_node``. + + The ``subgraph`` is a list of fx.Node and must satisfy the following + restrictions: + 1. The order of the nodes in ``subgraph`` must obey the topological + sort order. + 2. The users of the node in ``subgraph`` must be one of the following: + a.) the user is also a node in ``subgraph``. + b.) the user is the output of the full graph. + c.) the node has users (side effect node). + """ + if self._freeze_cross_iter_movement: + raise RuntimeError( + "The cross-iteration movement has been frozen for the given " + "IterGraph." + ) + + if not is_leaf_subgraph(self, subgraph): + raise ValueError( + "The target nodes for ``move_to_next_iter_before`` must " + "satisfy one of the following conditions: 1) the user of the " + "node is in the target nodes, 2) the user is the output of the " + "graph, 3) there are no users -- the node is a side-effect node. " + ) + + self._cross_iter_block_count += 1 + # The main graph must be the last one to be modified. Otherwise, the + # mapping may change and hence introduce incorrect mapping for setup + # and cleanup graphs. + + # For the setup graph, no additional input is needed but additional + # outputs will be created. The additional output represents the input of + # the action to be moved to the next iteration -- main graph. + setup_subgraph: List[fx.Node] = [] + for node in subgraph: + mapped_node = self._lookup_node(node, self.setup_graph) + assert mapped_node is not None + setup_subgraph.append(mapped_node) + setup_extra_input = self._forward_subgraph_inputs( + subgraph=setup_subgraph, + graph=self.setup_graph, + erase_node=True, + ) + + # For the cleanup graph, additional input is required to get the output + # from the last iteration -- main graph. Additional nodes are also + # needed to perform the action moved from the last iteration. + target_cleanup_node = self._lookup_node(target_node, self.cleanup_graph) + assert target_cleanup_node is not None, "The target_cleanup_node is None." + cleanup_subgraph: List[fx.Node] = [] + for node in subgraph: + mapped_node = self._lookup_node(node, self.cleanup_graph) + assert mapped_node is not None + cleanup_subgraph.append(mapped_node) + cloned_subgraph = clone_subgraph( + self.cleanup_graph, + cleanup_subgraph, + target=target_cleanup_node, + ) + self._forward_inputs_to_subgraph( + cloned_subgraph, self.cleanup_graph, setup_extra_input + ) + + # For the main graph, additional input will be created to represent + # the output from the last iteration -- main graph or setup graph. + # Additional output will also be generated to represent the input for + # the next iteration -- the main graph or the cleanup graph. + main_extra_input = self._forward_subgraph_inputs( + subgraph=subgraph, graph=self, erase_node=False + ) + assert main_extra_input == setup_extra_input + for node in subgraph: + target_node.prepend(node) + self._forward_inputs_to_subgraph(subgraph, self, main_extra_input) + + # TODO: This is a temporary solution. We are going to remove DCE usage + # or have something to replace fx DCE. + for node in self.cleanup_graph.nodes: + if len(node.users) == 0: + node.users["__hold__"] = None # type: ignore[index] + for node in self.nodes: + if len(node.users) == 0: + node.users["__hold__"] = None # type: ignore[index] + self.num_extra_output += main_extra_input + + def move_before(self, nodes: List[fx.Node], target_node: fx.Node) -> None: + for graph in self._all_graphs: + actual_nodes = [self._lookup_node(node, graph) for node in nodes] + actual_target_node = self._lookup_node(target_node, graph) + assert actual_target_node is not None + for actual_node in actual_nodes: + actual_target_node.prepend(actual_node) + + def move_after(self, nodes: List[fx.Node], target_node: fx.Node) -> None: + for graph in self._all_graphs: + actual_nodes = [self._lookup_node(node, graph) for node in nodes] + actual_target_node = self._lookup_node(target_node, graph) + for actual_node in actual_nodes: + assert actual_target_node is not None + actual_target_node.append(actual_node) + actual_target_node = actual_node + + def call_function( + self, + the_function: Callable[..., Any], + args: Optional[Tuple[Argument, ...]] = None, + kwargs: Optional[Dict[str, Argument]] = None, + type_expr: Optional[Any] = None, + ) -> fx.Node: + if self._freeze_cross_iter_movement: + return super().call_function(the_function, args, kwargs, type_expr) + + setup_args = tree_map( + lambda arg: self._lookup_node(arg, self.setup_graph) + if isinstance(arg, fx.Node) + else arg, + args, + ) + setup_kwargs = tree_map( + lambda arg: self._lookup_node(arg, self.setup_graph) + if isinstance(arg, fx.Node) + else arg, + kwargs, + ) + cleanup_args = tree_map( + lambda arg: self._lookup_node(arg, self.cleanup_graph) + if isinstance(arg, fx.Node) + else arg, + args, + ) + cleanup_kwargs = tree_map( + lambda arg: self._lookup_node(arg, self.cleanup_graph) + if isinstance(arg, fx.Node) + else arg, + kwargs, + ) + + setup_node = self.setup_graph.call_function( + the_function, setup_args, setup_kwargs, type_expr + ) + main_node = super().call_function(the_function, args, kwargs, type_expr) + cleanup_node = self.cleanup_graph.call_function( + the_function, cleanup_args, cleanup_kwargs, type_expr + ) + self._setup_mapping[main_node] = setup_node + self._cleanup_mapping[main_node] = cleanup_node + return main_node + + def erase_node(self, to_erase: fx.Node) -> None: + if self._freeze_cross_iter_movement: + return super().erase_node(to_erase) + + setup_node = self._lookup_node(to_erase, self.setup_graph) + assert setup_node is not None, "setup_node is None" + self.setup_graph.erase_node(setup_node) + super().erase_node(to_erase) + cleanup_node = self._lookup_node(to_erase, self.cleanup_graph) + self.cleanup_graph.erase_node(cleanup_node) + + def placeholder( + self, + name: str, + type_expr: Optional[Any] = None, + default_value: Any = inspect.Signature.empty, + ) -> fx.Node: + if self._freeze_cross_iter_movement: + return super().placeholder(name, type_expr, default_value) + + main_placeholder = super().placeholder(name, type_expr, default_value) + setup_placeholder = self.setup_graph.placeholder(name, type_expr, default_value) + cleanup_placeholder = self.cleanup_graph.placeholder( + name, type_expr, default_value + ) + self._setup_mapping[main_placeholder] = setup_placeholder + self._cleanup_mapping[main_placeholder] = cleanup_placeholder + return main_placeholder + + def output(self, result: Argument, type_expr: Optional[Any] = None) -> fx.Node: + if self._freeze_cross_iter_movement: + return super().output(result, type_expr) + + main_output = super().output(result, type_expr) + setup_result = tree_map( + lambda _result: self._lookup_node(_result, self.setup_graph) + if isinstance(_result, fx.Node) + else _result, + result, + ) + cleanup_result = tree_map( + lambda _result: self._lookup_node(_result, self.cleanup_graph) + if isinstance(_result, fx.Node) + else _result, + result, + ) + self.setup_graph.output(setup_result, type_expr) + self.cleanup_graph.output(cleanup_result, type_expr) + + return main_output + + def lint(self) -> None: + self.setup_graph.lint() + super().lint() + self.cleanup_graph.lint() + + def node_prepend(self, target_node: fx.Node, node: fx.Node) -> None: + """Prepend node to target_node.""" + if self._freeze_cross_iter_movement: + target_node.prepend(node) + return + + for graph in self._all_graphs: + actual_node = self._lookup_node(node, graph) + assert actual_node is not None, "The node is None" + actual_target_node = self._lookup_node(target_node, graph) + assert actual_target_node is not None, "The target node is None" + actual_target_node.prepend(actual_node) + + def node_append(self, target_node: fx.Node, node: fx.Node) -> None: + """Append node to target_node.""" + if self._freeze_cross_iter_movement: + target_node.append(node) + return + + for graph in self._all_graphs: + actual_node = self._lookup_node(node, graph) + assert actual_node is not None, f"The actual node is None, {node}." + actual_target_node = self._lookup_node(target_node, graph) + assert ( + actual_target_node is not None + ), f"The actual target node is None, {target_node}." + actual_target_node.append(actual_node) + + def node_set_args(self, node: fx.Node, args: Tuple[Argument, ...]) -> None: + if self._freeze_cross_iter_movement: + node.args = args + return + + setup_args = tree_map_only( + fx.Node, lambda _arg: self._lookup_node(_arg, self.setup_graph), args + ) + setup_node = self._lookup_node(node, self.setup_graph) + assert setup_node is not None + setup_node.args = setup_args + cleanup_args = tree_map_only( + fx.Node, lambda _arg: self._lookup_node(_arg, self.cleanup_graph), args + ) + cleanup_node = self._lookup_node(node, self.cleanup_graph) + assert cleanup_node is not None + cleanup_node.args = cleanup_args + node.args = args + + def node_set_kwargs(self, node: fx.Node, kwargs: Dict[str, Argument]) -> None: + if self._freeze_cross_iter_movement: + node.kwargs = kwargs + return + + setup_kwargs = tree_map_only( + fx.Node, lambda _arg: self._lookup_node(_arg, self.setup_graph), kwargs + ) + setup_node = self._lookup_node(node, self.setup_graph) + assert setup_node is not None + setup_node.kwargs = setup_kwargs + cleanup_kwargs = tree_map_only( + fx.Node, lambda _arg: self._lookup_node(_arg, self.cleanup_graph), kwargs + ) + cleanup_node = self._lookup_node(node, self.cleanup_graph) + assert cleanup_node is not None + cleanup_node.kwargs = cleanup_kwargs + node.kwargs = kwargs + + def node_replace_all_uses_with( + self, + node: fx.Node, + replace_with: fx.Node, + delete_user_cb: Callable[[fx.Node], bool] = lambda user: True, + *, + propagate_meta=False, + ) -> List[fx.Node]: + for graph in self._all_graphs: + actual_node = self._lookup_node(node, graph) + actual_replace_with = self._lookup_node(replace_with, graph) + assert actual_node is not None + ret = actual_node.replace_all_uses_with( + actual_replace_with, + delete_user_cb, + propagate_meta=propagate_meta, + ) + return ret # type: ignore[possibly-undefined] + + def node_add_user(self, node: fx.Node, user: Any) -> None: + for graph in self._all_graphs: + actual_node = self._lookup_node(node, graph) + if isinstance(user, fx.Node): + actual_user_node = self._lookup_node(user, graph) + else: + actual_user_node = user + assert actual_node is not None + actual_node.users[actual_user_node] = None # type: ignore[index] + + def node_remove_user(self, node: fx.Node, user: Any) -> None: + for graph in self._all_graphs: + actual_node = self._lookup_node(node, graph) + if isinstance(user, fx.Node): + actual_user_node = self._lookup_node(user, graph) + else: + actual_user_node = user + assert actual_node is not None + del actual_node.users[actual_user_node] # type: ignore[arg-type] + + def keep_unused_nodes(self) -> None: + for node in self.nodes: + if len(node.users) == 0 and str(node.op) != "output": + self.node_add_user(node, "__hold__") + + def functionalize_optim(self) -> None: + # IterGraph can only support full graph (fwd+bwd+optim). As optimizer + # is not a functional call (it is inplace op), this method adds the of + # the optimizer call. This method has strong assumption of the optimizer + # and may not always be working. This method is intended be a temporary + # solution only. + + # TODO: remove this API after DCE is removed + for node in reversed(self.nodes): + if node.name.startswith("output"): + output_node = node + elif node.name.startswith( + "_fused_adam_", + ): + optim_node = node + elif node.name.startswith( + "_foreach_add_", + ): + step_node = node + self.node_add_user(optim_node, output_node) # type: ignore[possibly-undefined] + self.node_add_user(step_node, optim_node) # type: ignore[possibly-undefined] + + def defunctionalize_optim(self) -> None: + # TODO: remove this API after DCE is not used with IterGraph + for graph in self._all_graphs: + for node in reversed(graph.nodes): + if node.name.startswith("output"): + output_node = node + elif node.name.startswith( + "_fused_adam_", + ): + optim_node = node + elif node.name.startswith( + "_foreach_add_", + ): + step_node = node + optim_node.users.pop(output_node, None) # type: ignore[possibly-undefined] + step_node.users.pop(optim_node, None) # type: ignore[possibly-undefined] + + def freeze_cross_iter_movement(self) -> None: + self._freeze_cross_iter_movement = True + + +class IterGraphModule(nn.Module): + """``IterGraphModule`` provides the ability to do cross-iteration optimization. + + Given a ``fx.GraphModule``, main_gm, ``IterGraphModule`` internally + duplicate it to 3 copies and redirect the ``forward`` request to a different + ``fx.GraphModule`` based on the iteration count. This allows users to do + graph optimizations that across iterations (e.g., moving collective wait in + the backward to the forward of the next iteration). + + Note that users must call the APIs provided by ``IterGraphModule`` or + ``IterGraph`` to rewrite the graph so that ``IterGraphModule`` can keep the + data dependency for all 3 graphs. + """ + + def __init__( + self, + main_gm: fx.GraphModule, + max_iters: int = -1, + enable_inductor: bool = False, + ) -> None: + super().__init__() + + def _copy_gm(src: fx.GraphModule, graph: fx.Graph) -> fx.GraphModule: + gm = fx.GraphModule(src, graph) + gm.meta = getattr(graph, "meta", {}) + return gm + + self.setup_gm = _copy_gm(main_gm, copy.deepcopy(main_gm.graph)) + self.cleanup_gm = _copy_gm(main_gm, copy.deepcopy(main_gm.graph)) + self.main_gm = _copy_gm( + main_gm, + IterGraph(main_gm.graph, self.setup_gm.graph, self.cleanup_gm.graph), + ) + + self._iter = 0 + self._max_iters = max_iters + self._previous_output: Tuple[Any, ...] = tuple() + self._num_extra_output = 0 + self._is_frozen = False + self._enable_inductor = enable_inductor + + def finalize_setup(self) -> None: + """Set up the internal states and also get the signal from users that what is the maximum iteration count. + + This method must be called before the forward() is called. + """ + if not self._is_frozen: + self.graph.freeze_cross_iter_movement() + self._num_extra_output = self.graph.num_extra_output + if self._enable_inductor: + self.main_gm = partial_lower(self.main_gm) + self._is_frozen = True + + self._iter = 0 + + def _run(self, gm: fx.GraphModule, last_iter: bool, *args, **kwargs) -> Any: + if self._num_extra_output > 0: + new_args = args + (self._previous_output) + output = gm(*new_args, **kwargs) + if not last_iter: + assert len(output) == 2 + self._previous_output = tuple(output[-1]) + assert ( + len(self._previous_output) > 0 + ), "There should be at least one extra output." + output = output[0] + else: + # No cross-iteration optimization is done. Simply call the + # GraphModule. + output = gm(*args, **kwargs) + return output + + def forward(self, *args: Any, last_iter: bool = False, **kwargs: Any) -> Any: + self._iter += 1 + last_iter = last_iter or self._iter == self._max_iters + if last_iter: + logger.info("Using the cleanup graph") + gm = self.cleanup_gm + profiler_string = "## IterGraphModule: Cleanup Graph ##" + self._iter = 0 + elif self._iter == 1: + logger.info("Using the setup graph") + gm = self.setup_gm + profiler_string = "## IterGraphModule: Setup Graph ##" + else: + gm = self.main_gm + if self._iter == 2: + logger.info("Using the main graph") + profiler_string = "## IterGraphModule -- Maybe Compiling ##" + else: + profiler_string = "## IterGraphModule ##" + + with record_function(profiler_string): + return self._run(gm, last_iter, *args, **kwargs) + + @property + def graph(self) -> IterGraph: + return cast(IterGraph, self.main_gm.graph) + + def recompile(self) -> PythonCode: + self.setup_gm.recompile() + self.cleanup_gm.recompile() + return self.main_gm.recompile() + + def freeze_cross_iter_movement(self) -> None: + # TODO: remove this API once it is not used. + self.graph.freeze_cross_iter_movement() + self._num_extra_output = self.graph.num_extra_output + + def print_readable(self, print_output: bool = True) -> str: + return self.main_gm.print_readable(print_output) + + def print_all_graphs(self) -> None: + logger.info("Printing the three fx.Graph:") + logger.info("1. Setup fx.Graph:") + logger.info("%s", self.setup_gm.graph) + logger.info("2. Main fx.Graph:") + logger.info("%s", self.main_gm.graph) + logger.info("3. Cleanup fx.Graph:") + logger.info("%s", self.cleanup_gm.graph) + + def print_all_graph_modules(self) -> None: + logger.info("Printing the three fx gm:") + logger.info("1. Setup fx.GraphModule:") + logger.info("%s", self.setup_gm.print_readable(False)) + logger.info("2. Main fx.GraphModule:") + logger.info("%s", self.main_gm.print_readable(False)) + logger.info("3. Cleanup fx.GraphModule:") + logger.info("%s", self.cleanup_gm.print_readable(False)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/log_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/log_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8a9f0400ea2204545dfc44e0e60086e788ec83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/log_utils.py @@ -0,0 +1,78 @@ +import logging +import logging.config +import os +from typing import Optional + +import torch.distributed as dist + + +LOGGING_CONFIG = { + "version": 1, + "formatters": { + "spmd_format": {"format": "%(name)s: [%(levelname)s] %(message)s"}, + "graph_opt_format": {"format": "%(name)s: [%(levelname)s] %(message)s"}, + }, + "handlers": { + "spmd_console": { + "class": "logging.StreamHandler", + "level": "DEBUG", + "formatter": "spmd_format", + "stream": "ext://sys.stdout", + }, + "graph_opt_console": { + "class": "logging.StreamHandler", + "level": "DEBUG", + "formatter": "graph_opt_format", + "stream": "ext://sys.stdout", + }, + "null_console": { + "class": "logging.NullHandler", + }, + }, + "loggers": { + "spmd_exp": { + "level": "DEBUG", + "handlers": ["spmd_console"], + "propagate": False, + }, + "graph_opt": { + "level": "DEBUG", + "handlers": ["graph_opt_console"], + "propagate": False, + }, + "null_logger": { + "handlers": ["null_console"], + "propagate": False, + }, + # TODO(anj): Add loggers for MPMD + }, + "disable_existing_loggers": False, +} + + +def get_logger(log_type: str) -> Optional[logging.Logger]: + from torch.distributed._spmd import config + + if "PYTEST_CURRENT_TEST" not in os.environ: + logging.config.dictConfig(LOGGING_CONFIG) + avail_loggers = list(LOGGING_CONFIG["loggers"].keys()) # type: ignore[attr-defined] + assert ( + log_type in avail_loggers + ), f"Unable to find {log_type} in the available list of loggers {avail_loggers}" + + if not dist.is_initialized(): + return logging.getLogger(log_type) + + if dist.get_rank() == 0: + logger = logging.getLogger(log_type) + logger.setLevel(config.log_level) + if config.log_file_name is not None: + log_file = logging.FileHandler(config.log_file_name) + log_file.setLevel(config.log_level) + logger.addHandler(log_file) + else: + logger = logging.getLogger("null_logger") + + return logger + + return logging.getLogger("null_logger") diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/parallel_mode.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/parallel_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..a908109805e3c5e144aa18bbb477338695385378 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/parallel_mode.py @@ -0,0 +1,216 @@ +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, List, Optional, Tuple + +import torch +import torch.distributed as dist +import torch.utils._pytree as pytree +from torch._subclasses import FakeTensorMode +from torch.distributed._spmd.data_parallel import ( + DataParallelStyle, + partition_data_parallel, +) +from torch.distributed._spmd.distribute import _convert_to_distributed, Schema +from torch.distributed._tensor import DeviceMesh, Placement, Replicate, Shard + +from torch.fx import GraphModule + + +class ParallelMode(ABC): + """ + Basic Parallel Mode interface. Each parallelism pattern should implement + this interface to describe how to partition and compile the graph in the + spmd compiler. + """ + + @abstractmethod + def partition( + self, + gm: GraphModule, + model: torch.nn.Module, + optimizer: Optional[torch.optim.Optimizer], + params_and_buffers: Dict[str, Any], + named_states: Dict[str, Any], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> GraphModule: + """ + Partition a single device graph to a distributed graph. + + TODO(@wanchaol): some of these arguments are not necessary for + partitioning, remove the unnecessary ones later. + """ + raise NotImplementedError() + + @abstractmethod + def transform_and_compile(self, gm: GraphModule) -> GraphModule: + """ + Transform and compile a distributed graph with a set of graph + transformation and optimization passes for each parallel mode. + + The returned result should be a compiled executable graph in + the distributed environment. + """ + # TODO: add more necessary arguments to this interface. + raise NotImplementedError() + + +class DataParallel(ParallelMode): + """Data Parallelism mode.""" + + def __init__( + self, + parallel_style: str = "replicate", + *, + input_batch_dim: int = 0, + custom_passes: Optional[Callable[[GraphModule], GraphModule]] = None, + ): + """ + DataParallel Mode that partition the model and graph to data parallel style + parallelism (i.e. DDP/FSDP/ZERO-3). It currently supports three different + parallel styles: "replicate", "fully_shard", and "default". See + :class:`DataParallelStyle` for more details. + + Args: + parallel_style (str): parallel style to use. Currently supports + "replicate", "fully_shard", and "default". + + Keyword args: + input_batch_dim (int): the batch dimension of the input tensor. + default: 0 + custom_passes (Callable[[GraphModule], GraphModule], optional): + A custom callable that overrides the default graph transformation + and optimization passes. + """ + if parallel_style == "replicate": + self.parallel_style = DataParallelStyle.REPLICATE + elif parallel_style == "fully_shard": + self.parallel_style = DataParallelStyle.FULLY_SHARD + elif parallel_style == "default": + self.parallel_style = DataParallelStyle.DEFAULT + else: + raise RuntimeError(f"Unknown parallel style: {parallel_style}") + + # TODO: what if user passes in a incorrect `input_batch_dim`, how should we + # detect that and do proper error handling? + self.input_batch_dim = input_batch_dim + + if custom_passes is not None: + self._gm_passes: Callable[[GraphModule], GraphModule] = custom_passes + else: + # TODO: add a few default passes here. + self._gm_passes = lambda gm: gm + + def partition( + self, + gm: GraphModule, + model: torch.nn.Module, + optimizer: Optional[torch.optim.Optimizer], + params_and_buffers: Dict[str, Any], + named_states: Dict[str, Any], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> GraphModule: + # TODO: figure out a way to avoid explicit "cuda" mesh. + mesh = DeviceMesh("cuda", torch.arange(dist.get_world_size())) + + gm = partition_data_parallel( + gm, + model, + optimizer, + params_and_buffers, + named_states, + args, + kwargs, + mesh, + self.parallel_style, + self.input_batch_dim, + ) + return gm + + def transform_and_compile(self, gm: GraphModule) -> GraphModule: + """optimize a distributed graph with a set of optimization passes""" + # TODO: add more necessary arguments to this interface. + return self._gm_passes(gm) + + +class DTensorExpandMode(ParallelMode): + """ + The DTensor Expand mode. It's replicating the parameters and + shard the inputs to represent DDP like behavior, it's currently + a transitent mode before we move to the new data parallel expansion. + """ + + def __init__( + self, custom_passes: Optional[Callable[[GraphModule], GraphModule]] = None + ): + self._placements_override: Dict[int, List[Placement]] = {} + if custom_passes is not None: + self._gm_passes: Callable[[GraphModule], GraphModule] = custom_passes + else: + # TODO: add a few default passes here. + self._gm_passes = lambda gm: gm + + def partition( + self, + gm: GraphModule, + model: torch.nn.Module, + optimizer: Optional[torch.optim.Optimizer], + params_and_buffers: Dict[str, Any], + named_states: Dict[str, Any], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> GraphModule: + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + + mesh = DeviceMesh("cuda", torch.arange(dist.get_world_size()).cuda()) + shard_schema: Schema = Schema(mesh=mesh, placements=[Shard(0)]) + # FIXME: allow other sharding schemas + replicate_schema: Schema = Schema(mesh=mesh, placements=[Replicate()]) + + inps, schemas = [], [] + + for p in pytree.tree_leaves(params_and_buffers): + assert isinstance(p, torch.Tensor), f"expecting Tensor but got {type(p)}" + inps.append(p) + schemas.append(replicate_schema) + + for o in pytree.tree_leaves(named_states): + if isinstance(o, torch.Tensor): + inps.append(o) + schemas.append(replicate_schema) + else: + inps.append(torch.empty(0)) + schemas.append(replicate_schema) + + for a in flat_args: + if isinstance(a, torch.Tensor): + inps.append(a) + if id(a) in self._placements_override: + schemas.append( + Schema(mesh=mesh, placements=self._placements_override[id(a)]) + ) + else: + schemas.append(shard_schema) + else: + # Create dummy tensor and schema for non-tensor inputs for + # the purpose of dtensor expansion. Non-tensor inputs are + # guaranteed unused in dispatcher graphs produced by make_fx. + # However, we still need to respect them so that tensor inputs + # match wtih their placeholders. + inps.append(torch.empty(0)) + schemas.append(shard_schema) + + with FakeTensorMode(allow_non_fake_inputs=True): + fake_inps = [torch.empty_like(inp) for inp in inps] + + return _convert_to_distributed( + gm, fake_inps, schemas, default_mesh=mesh, _allow_partial=False + )[0] + + def transform_and_compile(self, gm: GraphModule) -> GraphModule: + """ + Transform and compile a distributed graph with a set of graph transformation + and optimization passes for the dtensor fallback parallel mode. + """ + # TODO: move the trasnformation passed to this function + return self._gm_passes(gm) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_spmd/partial_lower.py b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/partial_lower.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a62467c10c99ebbf80fdf083d8bc8edf6732a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_spmd/partial_lower.py @@ -0,0 +1,268 @@ +# This file is copied from Meta internal repo and is not synced with the +# internal version. Once the internal version is fully mature, we should +# upstream again and retire the internal version. @yifuwang + +import logging +import operator +from typing import Callable, List, Optional, Set, Tuple + +from functorch import make_fx + +import torch + +from torch._inductor.compile_fx import compile_fx_inner +from torch._inductor.decomposition import select_decomp_table + +MIN_ATEN_OPS_TO_LOWER = 10 + +logger: logging.Logger = logging.getLogger(__name__) + + +def _create_subgraph_module( + inputs: List[torch.fx.Node], body: List[torch.fx.Node], outputs: List[torch.fx.Node] +) -> torch.fx.GraphModule: + subgraph: torch.fx.Graph = torch.fx.Graph() + node_to_subgraph_node = {} + for idx, inp in enumerate(inputs): + subgraph_inp = subgraph.placeholder(name=f"arg_{idx}") + subgraph_inp.meta = inp.meta + node_to_subgraph_node[inp] = subgraph_inp + + for node in body: + subgraph_node = subgraph.node_copy( + node, arg_transform=lambda x: node_to_subgraph_node[x] + ) + node_to_subgraph_node[node] = subgraph_node + + subgraph.output(result=tuple(node_to_subgraph_node[x] for x in outputs)) + subgraph.eliminate_dead_code() + subgraph.lint() + return torch.fx.GraphModule(root={}, graph=subgraph) + + +def _is_container_node(node: torch.fx.Node) -> bool: + if any(user.target == operator.getitem for user in node.users): + assert all(user.target == operator.getitem for user in node.users), ( + "Malformed graph: a container node is used as input for non-getitem nodes." + "\nNode: {fmt_node}\nUsers: {fmt_users}".format( + fmt_node=node.format_node(), + fmt_users="\n".join(u.format_node() for u in node.users), + ) + ) + return True + return False + + +def _lower_subgraph_nodes( + gm: torch.fx.GraphModule, + subgraph_name: str, + subgraph_nodes: List[torch.fx.Node], + dumper: Callable[[str], str], +) -> None: + prologue: List[torch.fx.Node] = [] + inputs: List[torch.fx.Node] = [] + body: List[torch.fx.Node] = [] + visible: Set[torch.fx.Node] = set() + + # Inductor requires all graph input to be tensors. When adding a container + # node as subgraph input, add its descendant getitem nodes to the subgraph + # prologue and add its leaf getitem nodes to the subgraph input. + def add_input(arg: torch.fx.Node) -> None: + stack = [arg] + while len(stack) != 0: + node = stack.pop() + if _is_container_node(node): + # We should only prepone nodes within subgraph_nodes + prologue.extend(user for user in node.users if user in subgraph_nodes) + stack.extend(node.users) + else: + if node not in visible: + inputs.append(node) + visible.add(node) + + for node in subgraph_nodes: + if node.op == "get_attr": + # Prepone get_attr to avoid having to copy + # the attribute to the subgraph module. + inputs.append(node) + visible.add(node) + continue + + for arg in node.all_input_nodes: + if arg not in visible: + add_input(arg) + + if node not in prologue: + body.append(node) + visible.add(node) + + outputs: List[torch.fx.Node] = [] + + # Inductor requires all graph output to be tensors. When adding a container + # node as subgraph output, add its descendant getitem nodes to the subgraph + # body and add its leaf getitem nodes to the subgraph output. + def add_output(output: torch.fx.Node) -> None: + stack = [output] + while len(stack) != 0: + node = stack.pop() + if _is_container_node(node): + body.extend(node.users) + stack.extend(node.users) + elif not all(user in visible for user in node.users): + if node not in outputs: + outputs.append(node) + + for node in body: + if not all(user in visible for user in node.users): + add_output(node) + + assert len(inputs) == len(set(inputs)) + assert len(outputs) == len(set(outputs)) + + subgraph_module = _create_subgraph_module(inputs, body, outputs) + readable_tag = dumper(str(subgraph_module.graph)) + setattr(gm, subgraph_name, _InductorModule(subgraph_module)) + + insertion_point = subgraph_nodes[-1].next + for node in prologue: + insertion_point.prepend(node) + + with gm.graph.inserting_before(insertion_point): + # Insert subgraph call + subgraph_call = gm.graph.create_node( + op="call_module", + target=subgraph_name, + args=tuple(inputs), + kwargs={"tag": readable_tag}, + ) + # Replace parent graph nodes with their corresponding subgraph outputs + for idx, output in enumerate(outputs): + new_output = gm.graph.create_node( + op="call_function", + target=operator.getitem, + args=(subgraph_call, idx), + ) + new_output.meta = output.meta + output.replace_all_uses_with(new_output) + + # Erase lowered nodes from the parent graph + for node in reversed(body + outputs): + if len(node.users) == 0: + gm.graph.erase_node(node) + + +class _InductorModule(torch.nn.Module): + def __init__(self, gm: torch.fx.GraphModule) -> None: + super().__init__() + self.gm = gm + self.compiled: Optional[ + Callable[[List[torch.Tensor]], List[torch.Tensor]] + ] = None + + def forward(self, *args: torch.Tensor, tag: str) -> List[torch.Tensor]: + if self.compiled is None: + inductor_decompositions = select_decomp_table() + # TODO: figure out why turning on cudagraphs cause exceptions. + decomp_gm = make_fx(self.gm, decomposition_table=inductor_decompositions)( + *args + ) + logger.info("Lowering subgraph (%s) to Inductor...", tag) + self.compiled = compile_fx_inner( + decomp_gm, + list(args), + cudagraphs=False, + ) + logger.info("Completed lowering subgraph (%s) to Inductor", tag) + with torch.profiler.record_function(tag): + assert self.compiled is not None + return self.compiled(list(args)) + + +def _is_inductor_compatible(node: torch.fx.Node) -> Tuple[bool, str]: + # `has_tag` is not supported yet + # if has_tag(node, "non_lowerable"): + + if node.target in ( + torch.ops.aten._fused_adam_.default, + torch.ops.aten._fused_adam.default, + torch.ops.aten._foreach_add_.Scalar, + torch.ops.aten._foreach_add.Scalar, + ): + return False, "fused adam is not supported yet" + + # TODO(yifu): apparently having a meta kernel is not a necessary + # condition for Inductor compatiblity. We should refine the check. + # Sneaking this one in for now to support comm_fusion_with_cat. + if node.target == torch.ops.aten.flatten.using_ints: + return True, "" + + if isinstance(node.target, torch._ops.OpOverload): + if not node.target.has_kernel_for_dispatch_key(torch._C.DispatchKey.Meta): + return False, f"{node.target} doesn't have a meta kernel registered" + return True, "" + + +def _subgraph_predicate(nodes: List[torch.fx.Node]) -> bool: + num_aten_ops = len([n for n in nodes if str(n.target).startswith("aten.")]) + return num_aten_ops >= MIN_ATEN_OPS_TO_LOWER + + +def partial_lower( + gm: torch.fx.GraphModule, + node_predicate: Callable[[torch.fx.Node], bool] = lambda x: True, + subgraph_predicate: Callable[[List[torch.fx.Node]], bool] = lambda x: True, + dumper: Callable[[str], str] = lambda x: "subgraph", +) -> torch.fx.GraphModule: + """ + Lower Inductor compatible portions of the graph module to Inductor. + + Args: + node_predicate: user predicate for determining whether to consider a node for + lowering. + subgraph_predicate: user predicate for determining whether to consider a list of + candidate nodes for lowering. + dumper: a callback for dumping subgraphs for human digestion. For exmaple, it + can be a function that writes to disk/blob storage and returns the + path/handle. The returned path/handle for each subgraph will be made + available in the subgraph call node in the parent graph, as well as the + label of the profiler block for the subgraph. + """ + nodes_per_subgraph: List[List[torch.fx.Node]] = [[]] + ptr = next(iter(gm.graph.nodes)) + + def _node_predicate(node: torch.fx.Node) -> Tuple[bool, str]: + should_lower, reason = _is_inductor_compatible(node) + if not should_lower: + return should_lower, reason + if not node_predicate(node): + return False, "user predicate" + return True, "" + + while ptr.op != "output": + if ptr.op == "placeholder": + ptr = ptr.next + continue + should_lower, reason = _node_predicate(ptr) + if should_lower: + nodes_per_subgraph[-1].append(ptr) + else: + if len(nodes_per_subgraph[-1]) > 0: + logger.warning( + "partial_lower: graph break at %s. Reason: %s", str(ptr), reason + ) + nodes_per_subgraph.append([]) + ptr = ptr.next + + nodes_per_subgraph = [ + nodes + for nodes in nodes_per_subgraph + if subgraph_predicate(nodes) and _subgraph_predicate(nodes) + ] + + for idx, subgraph_nodes in enumerate(nodes_per_subgraph): + subgraph_name = f"subgraph_{idx}" + _lower_subgraph_nodes(gm, subgraph_name, subgraph_nodes, dumper) + + gm.graph.lint() + gm.recompile() + return gm diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_state_dict_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_state_dict_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..343899a5ab41396d6c24f517664e4b0dd0354844 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_state_dict_utils.py @@ -0,0 +1,385 @@ +import io +import math +from typing import Any, Callable, Dict, Optional, Tuple, TYPE_CHECKING + +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch.distributed._functional_collectives import AsyncCollectiveTensor + +if dist.is_available() or TYPE_CHECKING: + from torch.distributed import distributed_c10d + from torch.distributed._shard.sharded_tensor import ShardedTensor + from torch.distributed._tensor import DTensor, Replicate + + +def _identity_func( + obj: torch.Tensor, + pg: Optional[dist.ProcessGroup], + device: Optional[torch.device], + companion_obj: Any, +) -> torch.Tensor: + return obj + + +def _all_gather_sharded_tensor( + sharded_tensor: "ShardedTensor", + pg: Optional[dist.ProcessGroup] = None, + device: Optional[torch.device] = None, +) -> torch.Tensor: + if pg is None: + pg = distributed_c10d._get_default_group() + world_size = dist.get_world_size(pg) + shards = sharded_tensor.local_shards() + dim_0_size = sharded_tensor.size()[0] # type: ignore[index] + tensor_numel = sharded_tensor.size().numel() # type: ignore[union-attr] + chunk_size = math.ceil(dim_0_size / world_size) * tensor_numel // dim_0_size + pg_device = ( + distributed_c10d._get_pg_default_device(pg) if device is None else device + ) + if shards: + local_tensor = shards[0].tensor.flatten() + if local_tensor.device.type != pg_device.type: + local_tensor = local_tensor.to(pg_device) + num_padding = chunk_size - local_tensor.numel() + if num_padding > 0: + local_tensor = F.pad(local_tensor, [0, num_padding]) + else: + local_tensor = torch.zeros( + chunk_size, dtype=sharded_tensor.dtype, device=pg_device + ) + + tensor = torch.empty( + chunk_size * world_size, + dtype=local_tensor.dtype, + device=pg_device, + ) + dist.all_gather_into_tensor(tensor, local_tensor, group=pg) + + tensor = tensor.narrow(0, 0, tensor_numel).reshape(sharded_tensor.size()) + return tensor + + +class CompanionMismatch(Exception): + ... + + +def _iterate_state_dict( + iter_object: Any, + sharded_tensor_func: Callable, + dtensor_func: Callable, + tensor_func: Callable, + *, + pg: Optional[dist.ProcessGroup] = None, + device: Optional[torch.device] = None, + cpu_offload: bool = False, + companion_obj: Any = None, + ranks_only: Tuple[int, ...] = tuple(), + type_check: bool = True, +) -> Dict[str, Any]: + # TODO: should we use pytree? + cpu_device = torch.device("cpu") + if isinstance(iter_object, ShardedTensor): + ret = sharded_tensor_func(iter_object, pg, device, companion_obj) + elif isinstance(iter_object, DTensor): + ret = dtensor_func(iter_object, pg, device, companion_obj) + elif isinstance(iter_object, torch.Tensor): + ret = tensor_func(iter_object, pg, device, companion_obj) + elif ( + isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) + or iter_object is None + ): + ret = iter_object + elif isinstance(iter_object, dict): + if companion_obj is not None and ( + not isinstance(companion_obj, dict) + or set(companion_obj.keys()) != set(iter_object.keys()) + ): + raise CompanionMismatch() + + ret = { + key: _iterate_state_dict( + value, + sharded_tensor_func, + dtensor_func, + tensor_func, + pg=pg, + device=device, + cpu_offload=cpu_offload, + companion_obj=companion_obj[key] if companion_obj is not None else None, + ranks_only=ranks_only, + type_check=type_check, + ) + for key, value in iter_object.items() + } + elif isinstance(iter_object, (list, tuple)): + if companion_obj is not None and ( + not isinstance(companion_obj, (list, tuple)) + or len(companion_obj) != len(iter_object) + ): + raise CompanionMismatch() + + ret = [ + _iterate_state_dict( + v, + sharded_tensor_func, + dtensor_func, + tensor_func, + pg=pg, + device=device, + cpu_offload=cpu_offload, + companion_obj=companion_obj[idx] if companion_obj is not None else None, + ranks_only=ranks_only, + type_check=type_check, + ) + for idx, v in enumerate(iter_object) + ] + if isinstance(iter_object, tuple): + ret = tuple(ret) + elif not type_check: + ret = iter_object + else: + raise ValueError(f"Unexpected value type {type(iter_object)}") + + if not ranks_only or dist.get_rank(pg) in ranks_only: + if isinstance(ret, torch.Tensor) and cpu_offload: + if companion_obj is None: + ret = ret.to(cpu_device) + else: + # TODO: support DTensor + companion_obj.copy_(ret, non_blocking=True) + ret = companion_obj + else: + ret = {} if isinstance(ret, dict) else None + + return ret + + +def _gather_state_dict( + state_dict: Dict[str, Any], + *, + pg: Optional[dist.ProcessGroup] = None, + device: Optional[torch.device] = None, + cpu_offload: bool = False, + ranks_only: Tuple[int, ...] = tuple(), + type_check: bool = True, +) -> Dict[str, Any]: + """ + Given a state_dict, this API gathers all the ShardedTensors or DTensors in + the state_dict. + + + Args: + state_dict (Dict[str, Any]): the target sharded state_dict. + pg (Optional[dist.ProcessGroup]): the process group that is used to + gather ShardedTensor. Note that gathering a DTensor will use + the DeviceMesh. So this argument will be ignored when gathering a + DTensor. + device: (Optional[torch.device]): the device that is used to + perform allgather for ShardedTensor. Note that gathering a DTensor + will use the DeviceMesh. So this argument will be ignored when + gathering a DTensor. + cpu_offload (bool): whether to offload the tensors to CPU memory. The + default value is False. + ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will + have the same state_dicts. Otherwise only ranks that in ``ranks_only`` + have the same state_dicts. Other ranks will get empty state_dicts. + type_check: (bool): check if the instance data type is a supported type + that can be saved by DCP. The current supported data types are + torch.Tensor, DTensor, int, float, str, list, dict, None. + + Returns: + The gathered state dictionary. + """ + + def sharded_tensor_func(value, pg, device, companion_obj): + # ShardedTensor does not seem to record the original device type. + # So if the tensor is moved to CPU, we won't know the original type. + # As a result, we have to rely on the user to tell us the correct one. + cpu_device = torch.device("cpu") + output_tensor = _all_gather_sharded_tensor(value, pg, device) + local_shard_device = ( + value.local_shards()[0].tensor.device + if value.local_shards() + else cpu_device + ) + if output_tensor.device != local_shard_device: + value = output_tensor.to(local_shard_device) + else: + value = output_tensor + return value + + def dtensor_func(value, pg, device, companion_obj): + if value.device != value.device_mesh.device_type: + value = value.to(value.device_mesh.device_type) + # FSDP all_gather: [Shard(0)] -> [Replicate()] + # HSDP all_gather: [Replicate(), Shard(0)] -> [Replicate(), Replicate()] + # 2D FSDP + TP all_gather: + # - [Shard(0), Shard(n)] -> [Replicate(), Replicate()] + # - [Shard(0), Replicate()] -> [Replicate(), Replicate()] + placements = [Replicate() for _ in value.placements] + value = value.redistribute( + device_mesh=value.device_mesh, + placements=placements, + ) + # Call `wait()` to force the tensor to be synchronous with respect + # to the main stream. + # See the discussion in https://github.com/pytorch/pytorch/pull/117799. + value = value.to_local() + if isinstance(value, AsyncCollectiveTensor): + value = value.wait() + return value + + return _iterate_state_dict( + state_dict, + sharded_tensor_func, + dtensor_func, + _identity_func, + pg=pg, + device=device, + cpu_offload=cpu_offload, + ranks_only=ranks_only, + type_check=type_check, + ) + + +def _offload_state_dict_to_cpu( + state_dict: Dict[str, Any], + *, + ranks_only: Tuple[int, ...] = tuple(), + cpu_offload_state_dict: Optional[Dict[str, Any]] = None, + cpu_offload_sync: bool = True, + type_check: bool = True, +) -> Dict[str, Any]: + """ + Given a state_dict, this API offload all the tensors to CPU memory. + + Args: + state_dict (Dict[str, Any]): the target state_dict. + pg (Optional[dist.ProcessGroup]): the process group that is used to + gather ShardedTensor. Note that gathering a DTensor will use + the DeviceMesh. So this argument will be ignored when gathering a + DTensor. + ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will + have the same state_dicts. Otherwise only ranks that in ``ranks_only`` + have the same state_dicts. Other ranks will get empty state_dicts. + cpu_offload_state_dict (Optional[Dict[str, Any]]): the CPU state_dict + that will be returned. If this is not None, this API will use + `copy_` to copy the GPU tensor to the tensor in this CPU state_dict. + This CPU state_dict must have exactly the same structure as the + `state_dict` the only difference is that all the tensors in this + CPU state_dict are on CPU memory. + cpu_offload_sync: (bool): flag to decide whether to call `synchronize()` + before this API returns. + type_check: (bool): check if the instance data type is a supported type + that can be saved by DCP. The current supported data types are + torch.Tensor, DTensor, int, float, str, list, dict, None. + + Returns: + The gathered state dictionary. + """ + + ret = _iterate_state_dict( + state_dict, + _identity_func, + _identity_func, + _identity_func, + pg=None, + device=None, + cpu_offload=True, + ranks_only=ranks_only, + companion_obj=cpu_offload_state_dict, + type_check=type_check, + ) + if cpu_offload_state_dict is not None and cpu_offload_sync: + torch.cuda.synchronize() + return ret + + +def _create_cpu_state_dict( + state_dict: Dict[str, Any], pin_memory: bool = False, share_memory: bool = False +) -> Dict[str, Any]: + """ + Given a state_dict, create another state_dict with the same structure and elements. + However, all tensors in the returned state_dict are new tensors on CPU. These + tensors can be placed on pin_memory or share_memory based on the provided arguments. + """ + + if pin_memory and share_memory: + raise ValueError( + "Cannot allocate both memory on both pin_memory and share_memory" + ) + + def tensor_func( + obj: torch.Tensor, + pg: Optional[dist.ProcessGroup], + device: Optional[torch.device], + companion_obj: Any, + ) -> torch.Tensor: + if len(obj.size()) == 0: + return torch.tensor(0, dtype=obj.dtype) + + if share_memory: + return torch.empty( + *tuple(companion_obj.size()), dtype=companion_obj.dtype + ).share_memory_() + else: + return torch.empty( + *tuple(companion_obj.size()), dtype=companion_obj.dtype + ).pin_memory() + + ret = _iterate_state_dict( + state_dict, + _identity_func, + _identity_func, + tensor_func, + pg=None, + device=None, + cpu_offload=False, + ranks_only=tuple(), + companion_obj=state_dict, + type_check=False, + ) + return ret + + +def _check_state_dict_similarity( + state_dict: Dict[str, Any], + compared_state_dict: Dict[str, Any], +) -> bool: + """ + Given two state_dicts, check if the structures are the same. And + if a [key, tensor] pair exist in one state_dict there must be + the a corresponding pait, [key, other_tensor], in the other state_dict, + where tensor and other_tensor have the same size and dtype. + + Return the check result. + """ + + def tensor_func( + obj: torch.Tensor, + pg: Optional[dist.ProcessGroup], + device: Optional[torch.device], + companion_obj: Any, + ) -> torch.Tensor: + if companion_obj.dtype != obj.dtype or companion_obj.size() != obj.size(): + raise CompanionMismatch() + return obj + + try: + _iterate_state_dict( + state_dict, + _identity_func, + _identity_func, + tensor_func, + pg=None, + device=None, + cpu_offload=False, + ranks_only=tuple(), + companion_obj=compared_state_dict, + type_check=False, + ) + except CompanionMismatch: + return False + + return True diff --git a/venv/lib/python3.10/site-packages/torch/distributed/argparse_util.py b/venv/lib/python3.10/site-packages/torch/distributed/argparse_util.py new file mode 100644 index 0000000000000000000000000000000000000000..a214dadd312a56c438e32f623a5347ee07e1d410 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/argparse_util.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import os +from argparse import Action + + +class env(Action): + """ + Get argument values from ``PET_{dest}`` before defaulting to the given ``default`` value. + + For flags (e.g. ``--standalone``) + use ``check_env`` instead. + + .. note:: when multiple option strings are specified, ``dest`` is + the longest option string (e.g. for ``"-f", "--foo"`` + the env var to set is ``PET_FOO`` not ``PET_F``) + + Example: + :: + + parser.add_argument("-f", "--foo", action=env, default="bar") + + ./program -> args.foo="bar" + ./program -f baz -> args.foo="baz" + ./program --foo baz -> args.foo="baz" + PET_FOO="env_bar" ./program -f baz -> args.foo="baz" + PET_FOO="env_bar" ./program --foo baz -> args.foo="baz" + PET_FOO="env_bar" ./program -> args.foo="env_bar" + + parser.add_argument("-f", "--foo", action=env, required=True) + + ./program -> fails + ./program -f baz -> args.foo="baz" + PET_FOO="env_bar" ./program -> args.foo="env_bar" + PET_FOO="env_bar" ./program -f baz -> args.foo="baz" + """ + + def __init__(self, dest, default=None, required=False, **kwargs) -> None: + env_name = f"PET_{dest.upper()}" + default = os.environ.get(env_name, default) + + # ``required`` means that it NEEDS to be present in the command-line args + # rather than "this option requires a value (either set explicitly or default" + # so if we found default then we don't "require" it to be in the command-line + # so set it to False + if default: + required = False + + super().__init__(dest=dest, default=default, required=required, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class check_env(Action): + """ + Check whether the env var ``PET_{dest}`` exists before defaulting to the given ``default`` value. + + Equivalent to + ``store_true`` argparse built-in action except that the argument can + be omitted from the commandline if the env var is present and has a + non-zero value. + + .. note:: it is redundant to pass ``default=True`` for arguments + that use this action because a flag should be ``True`` + when present and ``False`` otherwise. + + Example: + :: + + parser.add_argument("--verbose", action=check_env) + + ./program -> args.verbose=False + ./program --verbose -> args.verbose=True + PET_VERBOSE=1 ./program -> args.verbose=True + PET_VERBOSE=0 ./program -> args.verbose=False + PET_VERBOSE=0 ./program --verbose -> args.verbose=True + + Anti-pattern (don't do this): + + :: + + parser.add_argument("--verbose", action=check_env, default=True) + + ./program -> args.verbose=True + ./program --verbose -> args.verbose=True + PET_VERBOSE=1 ./program -> args.verbose=True + PET_VERBOSE=0 ./program -> args.verbose=False + + """ + + def __init__(self, dest, default=False, **kwargs) -> None: + env_name = f"PET_{dest.upper()}" + default = bool(int(os.environ.get(env_name, "1" if default else "0"))) + super().__init__(dest=dest, const=True, default=default, nargs=0, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/c10d_logger.py b/venv/lib/python3.10/site-packages/torch/distributed/c10d_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..984b3841ef3d964677d047501b3946e06c93fec0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/c10d_logger.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import functools +import logging +import time +from typing import Any, Callable, Dict, List, Tuple, TypeVar +from typing_extensions import ParamSpec + +import torch +import torch.distributed as dist + +from torch.distributed.logging_handlers import _log_handlers + +__all__: List[str] = [] + + +def _get_or_create_logger() -> logging.Logger: + logging_handler, log_handler_name = _get_logging_handler() + logger = logging.getLogger(f"c10d-{log_handler_name}") + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + logging_handler.setFormatter(formatter) + logger.propagate = False + logger.addHandler(logging_handler) + return logger + + +def _get_logging_handler(destination: str = "default") -> Tuple[logging.Handler, str]: + log_handler = _log_handlers[destination] + log_handler_name = type(log_handler).__name__ + return (log_handler, log_handler_name) + + +global _c10d_logger +_c10d_logger = _get_or_create_logger() + + +def _get_msg_dict(func_name, *args, **kwargs) -> Dict[str, Any]: + if dist.is_initialized(): + msg_dict = { + "func_name": f"{func_name}", + "args": f"{args}, {kwargs}", + "pg_name": f"{dist._get_process_group_name(kwargs.get('pg'))}", # type: ignore[arg-type] + "backend": f"{dist.get_backend(kwargs.get('group'))}", + "world_size": f"{dist.get_world_size()}", + "group_size": f"{dist.get_world_size(kwargs.get('group'))}", + "global_rank": f"{dist.get_rank()}", + "local_rank": f"{dist.get_rank(kwargs.get('group'))}", + } + if msg_dict["backend"] == "nccl": + nccl_version = torch.cuda.nccl.version() + msg_dict["nccl_version"] = ".".join(str(v) for v in nccl_version) + else: + msg_dict = { + "func_name": f"{func_name}", + "args": f"{args}, {kwargs}", + } + return msg_dict + +_T = TypeVar('_T') +_P = ParamSpec('_P') + +def _exception_logger(func: Callable[_P, _T]) -> Callable[_P, _T]: + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T: + try: + return func(*args, **kwargs) + except Exception as error: + msg_dict = _get_msg_dict(func.__name__, *args, **kwargs) + msg_dict["error"] = f"{error}" + _c10d_logger.debug(msg_dict) + raise + + return wrapper + + +def _time_logger(func: Callable[_P, _T]) -> Callable[_P, _T]: + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T: + t1 = time.time_ns() + func_return = func(*args, **kwargs) + time_spent = time.time_ns() - t1 + + msg_dict = _get_msg_dict(func.__name__, *args, **kwargs) + msg_dict["time_spent"] = f"{time_spent}ns" + _c10d_logger.debug(msg_dict) + + return func_return + + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/distributed/collective_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/collective_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ed6c93078299a47fa9706d4df7f7989f11e4a856 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/collective_utils.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 + + +""" +A set of primitive functions for performing collective ops. + +Each should also handle single rank scenario. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Callable, cast, Generic, List, Optional, Tuple, TypeVar, Union + +import torch.distributed as dist + +T = TypeVar("T") + +@dataclass +class SyncPayload(Generic[T]): + stage_name: Optional[str] + success: bool + payload: T + exception: Optional[Exception] = None + +def broadcast( + data_or_fn: Union[T, Callable[[], T]], + *, + success: bool = True, + stage_name: Optional[str] = None, + rank: int = 0, + pg: Optional[dist.ProcessGroup] = None, +) -> T: + """ + Broadcasts the data payload from rank 0 to all other ranks. + Or if a function is passed, execute it in rank 0 and broadcast result to all other ranks. + + Can be used to broadcast a failure signal to stop all ranks. + + If the function raises an exception, all ranks will raise. + + Args: + data_or_fn: the data to broadcast or function to execute and broadcast result. + success: False to stop all ranks. + stage_name: the name of the logical stage for synchronization and debugging + rank: rank to broadcast data or execute function and broadcast resutls. + pg: the process group for sync + Throws: + RuntimeError from original exception trace + Returns: + the value after synchronization + + Example usage: + >> id = broadcast(data_or_fn=allocate_id, rank=0, pg=ext_pg.my_pg) + """ + + if not success and data_or_fn is not None: + raise AssertionError("Data or Function is expected to be None if not successful") + + payload: Optional[T] = None + exception : Optional[Exception] = None + # if no pg is passed then execute if rank is 0 + if (pg is None and rank == 0) or (pg is not None and pg.rank() == rank): + # determine if it is an executable function or data payload only + if callable(data_or_fn): + try: + payload = data_or_fn() + except Exception as e: + success = False + exception = e + else: + payload = data_or_fn + + # broadcast the exception type if any to all ranks for failure categorization + sync_obj = SyncPayload( + stage_name=stage_name, + success=success, + payload=payload, + exception=exception, + ) + + if pg is not None: + broadcast_list = [sync_obj] + dist.broadcast_object_list(broadcast_list, src=rank, group=pg) + assert len(broadcast_list) == 1 + sync_obj = broadcast_list[0] + + # failure in any rank will trigger a throw in every rank. + if not sync_obj.success: + error_msg = f"Rank {rank} failed" + if stage_name is not None: + error_msg += f": stage {sync_obj.stage_name}" + if sync_obj.exception is not None: + error_msg += f": exception {sync_obj.exception}" + raise RuntimeError(error_msg) from sync_obj.exception + + return cast(T, sync_obj.payload) + + +def all_gather( + data_or_fn: Union[T, Callable[[], T]], + stage_name: Optional[str] = None, + pg: Optional[dist.ProcessGroup] = None, +) -> List[T]: + """ + A simple all_gather primitive with basic synchronization guard logic, + by checking payload from all ranks has the same stage name. + + Args: + data_or_fn: the data to be all gathered across ranks or function to be executed + stage_name: the sync stage name for out-of-sync protection + pg: the process group for sync + Throws: + RuntimeError from original exception trace + Returns: + a list of synced data from all ranks + + Example usage: + >> all_ids = all_gather(data_or_fn=allocate_id, pg=ext_pg.my_pg) + """ + payload: Optional[T] = None + exception : Optional[Exception] = None + success = True + # determine if it is an executable function or data payload only + if callable(data_or_fn): + try: + payload = data_or_fn() + except Exception as e: + success = False + exception = e + else: + payload = data_or_fn + + sync_obj = SyncPayload( + stage_name=stage_name, + success=success, + payload=payload, + exception=exception, + ) + + if pg is not None: + # List of success/failure across all ranks. + total_list = [None] * dist.get_world_size(pg) + all_gather_object_enforce_type(pg, total_list, sync_obj) + # Each rank will throw RuntimeError in case of failure on any rank. + stage_name = cast(SyncPayload[T], total_list[0]).stage_name + exception_list: List[Tuple[int, Exception]] = [] + ret_list: List[T] = [] + error_msg: str = "" + + for i, sp in enumerate(cast(List[SyncPayload[T]], total_list)): + if sp.stage_name != stage_name: + error_msg += ( + f"Unexpected stage name received from rank {i}: {sp.stage_name} " + ) + continue + if not sp.success and sp.exception is not None: + exception_list.append((i, sp.exception)) + continue + ret_list.append(sp.payload) + + if len(exception_list) > 0: + raise RuntimeError( # type: ignore[misc] + error_msg, exception_list) from exception_list[0] + return ret_list + else: + if not sync_obj.success: + raise RuntimeError( + f"all_gather failed with exception {sync_obj.exception}", + ) from sync_obj.exception + return [sync_obj.payload] # type: ignore[list-item] + + +# Note: use Any for typing for now so users can pass in +# either a list of None or target type placeholders +# otherwise pyre would complain +def all_gather_object_enforce_type( + pg: dist.ProcessGroup, + # pyre-fixme[2]: Parameter must have a type that does not contain `Any` + object_list: List[Any], + # pyre-fixme[2]: Parameter must have a type other than `Any` + obj: Any, + # pyre-fixme[2]: Parameter must have a type that does not contain `Any` + type_checker: Callable[[Any, Any], bool] = lambda x, y: type(x) == type(y), +) -> None: + """ + Similar to plain all_gather_object but with additional type checking + AFTER gather is done to ensure basic consistency. + If check does not pass, all ranks will fail with exception. + + This is generally to prevent conditional logic leading to + unexpected messages being received. This is considered fatal code error, + but due to logic stacks this might happen implicitly in practice. + + The default check does not check sub type (considered different) + or covariance (considered same) but users can pass in custom checker + if more complicated check is needed. + """ + dist.all_gather_object(object_list, obj, group=pg) + + # conservative check + list_len = len(object_list) + if list_len == 0: + return + first_obj = object_list[0] + for i in range(1, list_len): + if not type_checker(first_obj, object_list[i]): + raise TypeError( + f"Object type at index {i} is {type(object_list[i])}, " + f"while first object type is {type(first_obj)}" + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/constants.py b/venv/lib/python3.10/site-packages/torch/distributed/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..47b1f90e406c5e731737c7a503e492674ff38e25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/constants.py @@ -0,0 +1,23 @@ +from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT +from datetime import timedelta +from typing import Optional + +__all__ = ['default_pg_timeout', 'default_pg_nccl_timeout'] + +# Default process group wide timeout, if applicable. +# This only applies to the non-nccl backends +# To make an attempt at backwards compatibility with THD, we use an +# extraordinarily high default timeout, given that THD did not have timeouts. +default_pg_timeout: timedelta = _DEFAULT_PG_TIMEOUT +# Separate timeout for PGNCCL mainly becuase it's always been that way in the C++ layer, but until recently +# there was one default that applied across all backends in the python layer. +# Later, we could consider merging them back together at the c++ layer if we can align on a same value. +# (only if TORCH_NCCL_BLOCKING_WAIT or TORCH_NCCL_ASYNC_ERROR_HANDLING is set to 1). + +try: + from torch._C._distributed_c10d import _DEFAULT_PG_NCCL_TIMEOUT + default_pg_nccl_timeout: Optional[timedelta] = _DEFAULT_PG_NCCL_TIMEOUT +except ImportError: + # if C++ NCCL support is not compiled, we don't have access to the default nccl value. + # if anyone is actually trying to use nccl in this state, it should error. + default_pg_nccl_timeout = None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/device_mesh.py b/venv/lib/python3.10/site-packages/torch/distributed/device_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..72d90f567d91d35d781b91035e33b380988301c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/device_mesh.py @@ -0,0 +1,563 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import logging +import math +from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch + +from torch.distributed import is_available + +from ..utils._typing_utils import not_none + +__all__ = ["init_device_mesh", "DeviceMesh"] + + +if not is_available(): + import sys + + # We need to create the stubs when distributed is not available. + # Otherwise, we would fail the doc tests (```./.ci/pytorch/docs-test.sh```), + # since it would try to import ``torch.distributed.device_mesh`` or + # ``torch.distributed.init_device_mesh`` but cannot find them. + + class _DeviceMeshStub: + pass + + def _init_device_mesh_stub(): + pass + + sys.modules["torch.distributed.device_mesh"].DeviceMesh = _DeviceMeshStub # type: ignore[attr-defined] + sys.modules[ + "torch.distributed.device_mesh" + ].init_device_mesh = _init_device_mesh_stub # type: ignore[attr-defined] + + +else: + from torch.distributed.distributed_c10d import ( + _find_pg_by_ranks_and_tag, + _get_default_group, + _get_group_tag, + get_rank, + get_world_size, + init_process_group, + is_initialized, + new_group, + ProcessGroup, + ) + + logger = logging.getLogger(__name__) + + # only import numpy typing when type checking + if TYPE_CHECKING: + try: + from numpy.typing import ArrayLike + except ImportError: + logger.warning( + "DeviceMesh requires numpy >= 1.21 to be installed for type checking" + ) + + class _MeshEnv: + def __init__(self) -> None: + self.mesh_stack: List[DeviceMesh] = [] + self.child_to_parent_mapping: Dict[DeviceMesh, DeviceMesh] = {} + self.parent_to_child_mapping: Dict[DeviceMesh, Dict[str, DeviceMesh]] = {} + + def get_current_mesh(self) -> "DeviceMesh": + if len(self.mesh_stack) == 0: + raise RuntimeError("No device mesh is currently active!") + return self.mesh_stack[-1] + + def create_child_mesh( + self, device_mesh: "DeviceMesh", mesh_dim: int, mesh_dim_name: str + ) -> "DeviceMesh": + # Directly return the child mesh if it is already created. + child_mesh_mappings = self.parent_to_child_mapping.get(device_mesh) + if child_mesh_mappings: + sub_mesh = child_mesh_mappings.get(mesh_dim_name) + if sub_mesh: + return sub_mesh + + # swap the current dim to the last dim then reshape to flatten out other + # dims, so we can just extract the list of ranks which contains cur_rank. + cur_rank = device_mesh.get_rank() + pg_ranks_by_dim = device_mesh.mesh.swapdims(-1, mesh_dim).reshape( + -1, device_mesh.mesh.size(mesh_dim) + ) + + for mesh_1d in pg_ranks_by_dim: + sub_mesh = DeviceMesh( + device_mesh.device_type, + mesh_1d, + mesh_dim_names=(mesh_dim_name,), + ) + if cur_rank in mesh_1d: + res_sub_mesh = sub_mesh + + res_sub_mesh._dim_group_infos = [device_mesh._dim_group_infos[mesh_dim]] # type: ignore[possibly-undefined] + # Assign the current DeviceMesh as the parent of the child DeviceMesh. + self.child_to_parent_mapping[res_sub_mesh] = device_mesh + self.parent_to_child_mapping.setdefault(device_mesh, {})[ + mesh_dim_name + ] = res_sub_mesh + return res_sub_mesh + + def get_parent_mesh(self, device_mesh: "DeviceMesh") -> Optional["DeviceMesh"]: + return self.child_to_parent_mapping.get(device_mesh, None) + + def get_parent_mesh_dim(self, device_mesh: "DeviceMesh") -> Optional[int]: + """ + Return the index of the mesh dim in the parent mesh. + The device_mesh passed in needs to be sliced out from a parent mesh. + """ + parent_mesh = self.get_parent_mesh(device_mesh) + child_mesh_dim_names = device_mesh.mesh_dim_names + if parent_mesh and child_mesh_dim_names: + assert ( + len(child_mesh_dim_names) == 1 + ), "The child mesh can only be a 1D mesh." + child_mesh_dim_name = child_mesh_dim_names[0] + return self.get_mesh_dim_by_name(parent_mesh, child_mesh_dim_name) + return None + + @staticmethod + def num_devices_per_host(device_type: str) -> int: + return _get_device_handle(device_type).device_count() + + @staticmethod + def num_hosts(device_type: str) -> int: + # ProcessGroup can't tell us this info so we have to infer it, assume + # homogeneous hardware for now + return get_world_size() // _MeshEnv.num_devices_per_host(device_type) + + def get_mesh_dim_by_name( + self, device_mesh: "DeviceMesh", mesh_dim_name: str + ) -> int: + if ( + device_mesh.mesh_dim_names is None + or len(device_mesh.mesh_dim_names) == 0 + ): + raise KeyError( + "No `mesh_dim_names` found.", + ) + if mesh_dim_name not in device_mesh.mesh_dim_names: + raise KeyError( + f"Mesh dimension '{mesh_dim_name}' does not exist.", + f"Available mesh dimensions are: mesh_dim_names={device_mesh.mesh_dim_names}", + ) + return not_none(device_mesh.mesh_dim_names.index(mesh_dim_name)) + + _mesh_resources: _MeshEnv = _MeshEnv() + + def _get_device_handle(device_type: str = "cuda"): + """ + Get the module corresponding to the device_type which is cuda or cuda-like device. + For example, when the device_type is cuda, the module `torch.cuda` is returned. + Return None when there is no corresponding module for device_type, otherwise + return the corresponding module. + """ + return getattr(torch, device_type, None) + + class DeviceMesh: + """ + DeviceMesh represents a mesh of devices, where layout of devices could be + represented as a n-d dimension array, and each value of the n-d dimensional + array is the global id of the default process group ranks. + + DeviceMesh could be used to describe the layout of devices across the cluster, + and serves as a proxy for communication among the device lists within the cluster. + + DeviceMesh can be used as a context manager. + + .. note:: + DeviceMesh follows SPMD programming model, which means the same PyTorch Python program + is running on all processes/ranks in the cluster. Therefore, users need to make sure the + `mesh` array (which describes the layout of devices) should be identical across all ranks. + Inconsistent `mesh` will lead to silent hang. + + Args: + device_type (str): The device type of the mesh. Currently supports: "cpu", "cuda/cuda-like". + mesh (ndarray): A multi-dimensional array or an integer tensor describing the layout + of devices, where the IDs are global IDs of the default process group. + + Returns: + DeviceMesh: A :class:`DeviceMesh` object representing the device layout. + + The following program runs on each process/rank in an SPMD manner. In this example, we have 2 + hosts with 4 GPUs each. + A reduction over the first dimension of mesh will reduce across + columns (0, 4), .. and (3, 7), a reduction over the second dimension + of mesh reduces across rows (0, 1, 2, 3) and (4, 5, 6, 7). + + Example:: + >>> # xdoctest: +SKIP("no rank") + >>> from torch.distributed.device_mesh import DeviceMesh + >>> + >>> # Initialize device mesh as (2, 4) to represent the topology + >>> # of cross-host(dim 0), and within-host (dim 1). + >>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]]) + """ + + device_type: str + mesh: torch.Tensor + mesh_dim_names: Optional[Tuple[str, ...]] + + def __init__( + self, + device_type: str, + mesh: Union[torch.Tensor, "ArrayLike"], + *, + mesh_dim_names: Optional[Tuple[str, ...]] = None, + ) -> None: + self.device_type = device_type + if isinstance(mesh, torch.Tensor) and mesh.device.type != "cpu": + raise ValueError(f"`mesh` must be a CPU tensor, got {mesh}") + self.mesh = ( + mesh.detach().cpu() + if isinstance(mesh, torch.Tensor) + else torch.tensor(mesh, dtype=torch.int) + ) + self.mesh_dim_names = mesh_dim_names + + # private field to pre-generate DeviceMesh's hash + self._flatten_mesh_list = tuple(self.mesh.flatten().tolist()) + self._hash = hash((self._flatten_mesh_list, self.mesh.shape, id(self))) + + # Skip process group initialization if xla device. + # TODO(yeounoh) implement DeviceMesh backend and register XLA backend. + if device_type != "xla": + # always try to create default (world) pg, even if it is not initialized + # already. The world pg is used for device mesh identity (rank) on each + # process (we need to know if the current global rank is in the mesh or not). + self._get_or_create_default_group() + self._init_process_groups() + + def _get_or_create_default_group(self): + default_initialized = is_initialized() + if not default_initialized: + init_process_group() + + world_size = get_world_size() + if self.mesh.numel() > world_size: + raise RuntimeError( + f"Mesh should not be bigger than default world size, but found {self.mesh.numel()} ranks!" + ) + + device_handle = _get_device_handle(self.device_type) + # TODO: if user want to pass pg_options, offer a way to do it + if not default_initialized and device_handle: + # automatically set the current cuda/cuda-like device base on num of gpu devices available in each host + # NOTE: This device selection would only work for homogeneous hardware. + num_devices_per_host = device_handle.device_count() + if ( + world_size > num_devices_per_host + and world_size % num_devices_per_host != 0 + ): + raise RuntimeError( + f"DeviceMesh only support homogeneous hardware, but found " + f"{world_size} ranks and {num_devices_per_host} {self.device_type} devices!" + ) + device_handle.set_device(get_rank() % num_devices_per_host) + + # calculate the coordinates of the current global rank on the mesh + rank_coords = (self.mesh == get_rank()).nonzero() + assert rank_coords.size(0) in (0, 1) + self._coordinate_on_dim: Optional[List[int]] = ( + rank_coords[0].tolist() if rank_coords.size(0) > 0 else None + ) + return _get_default_group() + + def _init_process_groups(self): + # tag/ranks/group_name associated with each mesh dimension, each + # mesh dimension should have one sub-group per rank + # + # TODO(yifu): remove tag and ranks once we fully migrate to native + # functional collectives. See details in: + # https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208 + dim_group_infos: List[Tuple[str, List[int], str]] = [] + + if self.mesh.ndim == 1 and self.mesh.numel() == get_world_size(): + # if the mesh is the same as world_pg, we just append the default + # pg to the first dim groups, as new_group cannot have the exact + # same ranks as world + dim_group_infos.append( + ( + _get_group_tag(_get_default_group()), + list(range(get_world_size())), + _get_default_group().group_name, + ) + ) + else: + # create sub pgs base on the mesh argument specified + for dim in range(self.mesh.ndim): + # swap the current dim to the last dim + # then reshape to flatten out other dims + pg_ranks_by_dim = self.mesh.swapdims(-1, dim).reshape( + -1, self.mesh.size(dim) + ) + # multi-dim mesh, create subgroups by looping over the pg_ranks + # for each dim and append the groups + for dim_mesh in pg_ranks_by_dim: + subgroup_ranks = dim_mesh.tolist() + + # We temporarily revert the re-use subgroup, since it breaks two internal tests. + # Temporarily reverting to resolve test timeout while root-causing. + # TODO: Add two tests to cover internal tests scenarios and re-enable reuse subgroup if exists. + dim_group = new_group(ranks=subgroup_ranks) + + # only add to dim_groups if the current rank in the subgroup + if self.get_rank() in subgroup_ranks: + if len(dim_group_infos) > dim: + raise RuntimeError( + f"Each device mesh dimension should get only one process group, but got {self.get_rank} " + f"in {subgroup_ranks}!" + ) + dim_group_infos.append( + ( + _get_group_tag(not_none(dim_group)), + subgroup_ranks, + dim_group.group_name, + ) + ) + self._dim_group_infos = dim_group_infos + + def __enter__(self) -> "DeviceMesh": + # set this mesh as the current mesh in mesh env + _mesh_resources.mesh_stack.append(self) + return self + + # pyre-fixme[2]: Parameter must be annotated. + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: + # pop this mesh from mesh env + _mesh_resources.mesh_stack.pop() + + def __repr__(self) -> str: + device_mesh_repr = ( + f"DeviceMesh({self.mesh.tolist()})" + if not self.mesh_dim_names + else f"DeviceMesh({self.mesh.tolist()}, mesh_dim_names={self.mesh_dim_names})" + ) + return device_mesh_repr + + def __hash__(self): + return self._hash + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DeviceMesh): + return False + if id(self.mesh) == id(other.mesh): + return True + return ( + self.mesh.shape == other.mesh.shape + and self._flatten_mesh_list == other._flatten_mesh_list + ) + + def __getitem__(self, mesh_dim_name: str) -> "DeviceMesh": + """ + Slice the current DeviceMesh based on the mesh_dim_name given to create a child + DeviceMesh. + + Args: + mesh_dim_name (str): the name of the mesh dimension of the parent DeviceMesh + to create a child DeviceMesh for. + Returns: + A :class:`DeviceMesh` object + + The following program runs on each process/rank in an SPMD manner. In this example, we have 2 + hosts with 4 GPUs each. + Calling mesh["tp"] on rank 0, 1, 2, 3 would return a 1D child DeviceMesh:([0, 1, 2, 3]). + Calling mesh["tp"] on rank 4, 5, 6, 7 would return a 1D child DeviceMesh:([4, 5, 6, 7]). + Calling mesh["dp"] on rank 0, 4 would return a 1D child DeviceMesh:([0, 4]). + Calling mesh["dp"] on rank 1, 5 would return a 1D child DeviceMesh:([1, 5]). + Calling mesh["dp"] on rank 2, 6 would return a 1D child DeviceMesh:([2, 6]). + Calling mesh["dp"] on rank 3, 7 would return a 1D child DeviceMesh:([3, 7]). + + Example:: + >>> # xdoctest: +SKIP("no rank") + >>> from torch.distributed.device_mesh import DeviceMesh + >>> + >>> # Initialize device mesh as (2, 4) to represent the topology + >>> # of cross-host(dim 0), and within-host (dim 1). + >>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]]) + """ + if self.mesh.ndim == 1: + if self.mesh_dim_names and mesh_dim_name == self.mesh_dim_names[0]: + return self + else: + raise RuntimeError( + f"Invalid mesh_dim_name {mesh_dim_name} specified." + ) + + mesh_dim = _mesh_resources.get_mesh_dim_by_name(self, mesh_dim_name) + submesh = _mesh_resources.create_child_mesh(self, mesh_dim, mesh_dim_name) + return submesh + + def get_group( + self, mesh_dim: Optional[Union[int, str]] = None + ) -> Union[ProcessGroup, List[ProcessGroup]]: + """ + Returns a list of ProcessGroups corresponding to the mesh dimensions, or + returns a single ProcessGroup if mesh_dim is specified or the given mesh has + only one mesh dimension. + + Args: + mesh_dim (str/int, optional): it can be the name of the mesh dimension or the index + of the mesh dimension. Default is None. + + Returns: + A list of :class:`ProcessGroup` object when `mesh_dim` is not specified for + a DeviceMesh with more than 1 dimension; otherwise, returns a single + :class:`ProcessGroup` object. + """ + if not hasattr(self, "_dim_group_infos"): + raise RuntimeError("DeviceMesh process groups not initialized!") + + if self.mesh.ndim == 1: + return not_none( + _find_pg_by_ranks_and_tag(*self._dim_group_infos[0][:2]) + ) + + if mesh_dim is not None: + if isinstance(mesh_dim, str): + mesh_dim = _mesh_resources.get_mesh_dim_by_name(self, mesh_dim) + return not_none( + _find_pg_by_ranks_and_tag(*self._dim_group_infos[mesh_dim][:2]) + ) + else: + dim_groups = [] + for ith_dim in range(self.mesh.ndim): + dim_groups.append( + not_none( + _find_pg_by_ranks_and_tag( + *self._dim_group_infos[ith_dim][:2] + ) + ) + ) + return dim_groups + + def size(self, mesh_dim: Optional[int] = None) -> int: + return self.mesh.numel() if mesh_dim is None else self.mesh.size(mesh_dim) + + @property + def ndim(self) -> int: + return self.mesh.ndim + + @property + def shape(self) -> Tuple[int, ...]: + return tuple(self.mesh.shape) + + def get_rank(self) -> int: + """ + Returns the current global rank. + """ + return get_rank() + + def get_local_rank(self, mesh_dim: Optional[Union[int, str]] = None) -> int: + """ + Returns the local rank of the given mesh_dim of the DeviceMesh. + + Args: + mesh_dim (str/int, optional): it can be the name of the mesh dimension or the index + of the mesh dimension. Default is None. + + Returns: + An integer denotes the local rank. + + The following program runs on each process/rank in an SPMD manner. In this example, we have 2 + hosts with 4 GPUs each. + Calling mesh_2d.get_local_rank(mesh_dim=0) on rank 0, 1, 2, 3 would return 0. + Calling mesh_2d.get_local_rank(mesh_dim=0) on rank 4, 5, 6, 7 would return 1. + Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 0, 4 would return 0. + Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 1, 5 would return 1. + Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 2, 6 would return 2. + Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 3, 7 would return 3. + + Example:: + >>> # xdoctest: +SKIP("no rank") + >>> from torch.distributed.device_mesh import DeviceMesh + >>> + >>> # Initialize device mesh as (2, 4) to represent the topology + >>> # of cross-host(dim 0), and within-host (dim 1). + >>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]]) + """ + if self.ndim > 1 and mesh_dim is None: + raise RuntimeError( + f"Found the DeviceMesh have {self.mesh.ndim} dimensions", + "Optional kwarg `mesh_dim` needs to be specified when device_mesh.ndim > 1.", + ) + elif mesh_dim is None: + mesh_dim = 0 + + mesh_dim_group = not_none(self.get_group(mesh_dim)) + assert isinstance( + mesh_dim_group, ProcessGroup + ), "We expect ProcessGroup before calling `get_rank`!" + return not_none(get_rank(mesh_dim_group)) + + def get_coordinate(self) -> Optional[List[int]]: + """ + Return the relative indices of this rank relative to all + dimensions of the mesh. If this rank is not part of the mesh, return None. + """ + return self._coordinate_on_dim if self._coordinate_on_dim else None + + def init_device_mesh( + device_type: str, + mesh_shape: Tuple[int, ...], + *, + mesh_dim_names: Optional[Tuple[str, ...]] = None, + ) -> DeviceMesh: + """ + Initializes a `DeviceMesh` based on `device_type`, `mesh_shape`, and `mesh_dim_names` parameters. + + This creates a DeviceMesh with an n-dimensional array layout, where `n` is the length of `mesh_shape`. + If `mesh_dim_names` is provided, each dimension is labeled as `mesh_dim_names[i]`. + + .. note:: + `init_device_mesh` follows SPMD programming model, meaning the same PyTorch Python program + runs on all processes/ranks in the cluster. Ensure `mesh_shape` (the dimensions of the nD array + describing device layout) is identical across all ranks. Inconsistent `mesh_shape` may lead to hanging. + + .. note:: + If no process group is found, init_device_mesh will initialize distributed process group/groups + required for distributed communications behind the scene. + + Args: + device_type (str): The device type of the mesh. Currently supports: "cpu", "cuda/cuda-like". + mesh_shape (Tuple[int]): A tuple defining the dimensions of the multi-dimensional array + describing the layout of devices. + mesh_dim_names (Tuple[str], optional): A tuple of mesh dimension names to assign to each dimension + of the multi-dimensional array describing the layout of devices. Its length must match the length + of `mesh_shape`. Each string in `mesh_dim_names` must be unique. + + Returns: + DeviceMesh: A :class:`DeviceMesh` object representing the device layout. + + Example:: + >>> # xdoctest: +SKIP("no rank") + >>> from torch.distributed.device_mesh import init_device_mesh + >>> + >>> mesh_1d = init_device_mesh("cuda", mesh_shape=(8,)) + >>> mesh_2d = init_device_mesh("cuda", mesh_shape=(2, 8), mesh_dim_names=("dp", "tp")) + + """ + if mesh_dim_names is not None: + if len(set(mesh_dim_names)) != len(mesh_dim_names): + raise RuntimeError( + "Each mesh_dim_name must be unique.", + f"Found repeated mesh_dim_name in mesh_dim_names {mesh_dim_names}", + ) + + if len(mesh_shape) != len(mesh_dim_names): + raise RuntimeError( + "mesh_shape and mesh_dim_names should have same length!", + f"Found len(mesh_dim_names): {len(mesh_dim_names)} and len(mesh_shape):{len(mesh_shape)}.", + ) + + mesh = torch.arange(math.prod(mesh_shape)).view(mesh_shape) + device_mesh = DeviceMesh( + device_type=device_type, + mesh=mesh, + mesh_dim_names=mesh_dim_names, + ) + + return device_mesh diff --git a/venv/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py b/venv/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py new file mode 100644 index 0000000000000000000000000000000000000000..99727a4f05c3e90a0723a9bb23bc1727ab6733ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py @@ -0,0 +1,4264 @@ +"""Distributed Collective Communication (c10d).""" + +import itertools +import collections.abc +import contextlib +import hashlib +import io +import logging +import os +import pickle +import sys +import time +import warnings +from collections import namedtuple +from datetime import timedelta +from typing import Any, Callable, Dict, Optional, Tuple, Union, List + +import torch +from torch._C._distributed_c10d import ( + AllgatherOptions, + AllreduceCoalescedOptions, + AllreduceOptions, + AllToAllOptions, + _DistributedBackendOptions, + BarrierOptions, + BroadcastOptions, + GatherOptions, + PrefixStore, + ProcessGroup, + ReduceOp, + ReduceOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + DebugLevel, + get_debug_level, + Work, + _register_process_group, + _resolve_process_group, + _unregister_all_process_groups, + _unregister_process_group, +) +from torch._utils_internal import set_pytorch_distributed_envs_from_justknobs +from .constants import default_pg_timeout, default_pg_nccl_timeout +from .c10d_logger import _exception_logger, _time_logger +from .rendezvous import register_rendezvous_handler, rendezvous # noqa: F401 +from ..utils._typing_utils import not_none +DistStoreError = torch._C._DistStoreError + +__all__ = [ + 'Backend', 'BackendConfig', 'GroupMember', 'P2POp', 'all_gather', 'all_gather_coalesced', + 'all_gather_object', 'all_reduce', + 'all_reduce_coalesced', 'all_to_all', + 'all_to_all_single', 'barrier', 'batch_isend_irecv', 'broadcast', + 'broadcast_object_list', 'destroy_process_group', + 'gather', 'gather_object', 'get_backend_config', 'get_backend', 'get_rank', + 'get_world_size', 'get_pg_count', 'group', 'init_process_group', 'irecv', + 'is_gloo_available', 'is_initialized', 'is_mpi_available', 'is_backend_available', + 'is_nccl_available', 'is_torchelastic_launched', 'is_ucc_available', + 'isend', 'monitored_barrier', 'new_group', 'new_subgroups', + 'new_subgroups_by_enumeration', 'recv', 'reduce', + 'reduce_scatter', 'scatter', + 'scatter_object_list', 'send', 'supports_complex', + 'AllreduceCoalescedOptions', 'AllreduceOptions', 'AllToAllOptions', + 'BarrierOptions', 'BroadcastOptions', 'GatherOptions', 'PrefixStore', + 'ProcessGroup', 'ReduceOp', 'ReduceOptions', 'ReduceScatterOptions', + 'ScatterOptions', 'Store', 'DebugLevel', 'get_debug_level', 'Work', + 'default_pg_timeout', 'get_group_rank', 'get_global_rank', 'get_process_group_ranks', + 'reduce_op', 'all_gather_into_tensor', 'reduce_scatter_tensor', +] + +_MPI_AVAILABLE = True +_NCCL_AVAILABLE = True +_GLOO_AVAILABLE = True +_UCC_AVAILABLE = True + +_pickler = pickle.Pickler +_unpickler = pickle.Unpickler + +# Change __module__ of all imported types from torch._C._distributed_c10d that are public +def _export_c_types() -> None: + _public_types_to_change_module = [ + AllreduceCoalescedOptions, + AllreduceOptions, + AllToAllOptions, + BarrierOptions, + BroadcastOptions, + GatherOptions, + PrefixStore, + ProcessGroup, + ReduceOp, + ReduceOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + DebugLevel, + get_debug_level, + Work + ] + for type in _public_types_to_change_module: + type.__module__ = "torch.distributed.distributed_c10d" +_export_c_types() + +try: + from torch._C._distributed_c10d import ProcessGroupMPI + ProcessGroupMPI.__module__ = "torch.distributed.distributed_c10d" + __all__ += ["ProcessGroupMPI"] +except ImportError: + _MPI_AVAILABLE = False + +try: + from torch._C._distributed_c10d import ProcessGroupNCCL + ProcessGroupNCCL.__module__ = "torch.distributed.distributed_c10d" + __all__ += ["ProcessGroupNCCL"] +except ImportError: + _NCCL_AVAILABLE = False + +try: + from torch._C._distributed_c10d import ProcessGroupGloo + from torch._C._distributed_c10d import _ProcessGroupWrapper + ProcessGroupGloo.__module__ = "torch.distributed.distributed_c10d" + __all__ += ["ProcessGroupGloo"] +except ImportError: + _GLOO_AVAILABLE = False + +try: + from torch._C._distributed_c10d import ProcessGroupUCC + ProcessGroupUCC.__module__ = "torch.distributed.distributed_c10d" + __all__ += ["ProcessGroupUCC"] +except ImportError: + _UCC_AVAILABLE = False + +logger = logging.getLogger(__name__) + +PG_WRAPPER_STORE_PREFIX = "pg_wrapper" + + +# Some reduce ops are not supported by complex numbers and will result in an error. +# We currently provide complex support to the distributed API by viewing +# complex tensors as real (torch.view_as_real), meaning that calling +# these unsupported ops will return garbage values rather than error out. +# (e.g. max(2+3i, 3+2i) = 3+3i) +# We'd like calls to unsupported ops to error out accordingly, +# rather than returning garbage values. +def supports_complex(reduceOp: ReduceOp) -> bool: + """Return true if reduce ops is supported. False otherwise.""" + denyList = [ + ReduceOp.MAX, + ReduceOp.MIN, + ReduceOp.PRODUCT, + ReduceOp.BAND, + ReduceOp.BOR, + ReduceOp.BXOR, + ] + return reduceOp not in denyList + + +class Backend(str): + """ + An enum-like class for backends. + + Available backends: GLOO, NCCL, UCC, MPI, and other registered backends. + + The values of this class are lowercase strings, e.g., ``"gloo"``. They can + be accessed as attributes, e.g., ``Backend.NCCL``. + + This class can be directly called to parse the string, e.g., + ``Backend(backend_str)`` will check if ``backend_str`` is valid, and + return the parsed lowercase string if so. It also accepts uppercase strings, + e.g., ``Backend("GLOO")`` returns ``"gloo"``. + + .. note:: The entry ``Backend.UNDEFINED`` is present but only used as + initial value of some fields. Users should neither use it directly + nor assume its existence. + """ + + UNDEFINED = "undefined" + GLOO = "gloo" + NCCL = "nccl" + UCC = "ucc" + MPI = "mpi" + + _BackendPlugin = namedtuple("_BackendPlugin", ["creator_fn", "extended_api"]) + + _plugins: Dict[str, _BackendPlugin] = {} + + backend_list = [UNDEFINED, GLOO, NCCL, UCC, MPI] + + default_device_backend_map: Dict[str, str] = { + 'cpu' : GLOO, + 'cuda' : NCCL, + } + + backend_capability: Dict[str, List[str]] = { + GLOO : ["cpu", "cuda"], + NCCL : ["cuda"], + UCC : ["cpu", "cuda"], + MPI : ["cpu", "cuda"], + } + + backend_type_map: Dict[str, ProcessGroup.BackendType] = { + UNDEFINED: ProcessGroup.BackendType.UNDEFINED, + GLOO : ProcessGroup.BackendType.GLOO, + NCCL: ProcessGroup.BackendType.NCCL, + UCC: ProcessGroup.BackendType.UCC, + } + + def __new__(cls, name: str): + """Create and return a new instance of the class.""" + if not isinstance(name, str): + raise ValueError("Backend constructor parameter must be string-ish") + value = getattr(Backend, name.upper(), Backend.UNDEFINED) + + if value == Backend.UNDEFINED: + value = name.lower() + return value + + @classmethod + def register_backend(cls, name, func, extended_api=False, devices: Optional[Union[str, List[str]]] = None) -> None: + """ + Register a new backend with the given name and instantiating function. + + This class method is used by 3rd party ``ProcessGroup`` extension to + register new backends. + + Args: + name (str): Backend name of the ``ProcessGroup`` extension. It + should match the one in ``init_process_group()``. + func (function): Function handler that instantiates the backend. + The function should be implemented in the backend + extension and takes four arguments, including + ``store``, ``rank``, ``world_size``, and ``timeout``. + extended_api (bool, optional): Whether the backend supports extended argument structure. + Default: ``False``. If set to ``True``, the backend + will get an instance of ``c10d::DistributedBackendOptions``, and + a process group options object as defined by the backend implementation. + device (str or list of str, optional): device type this backend + supports, e.g. "cpu", "cuda", etc. If `None`, + assuming both "cpu" and "cuda" + + .. note:: This support of 3rd party backend is experimental and subject to change. + + """ + # Allow UCC plugin if Pytorch is not built with native support. + # TODO: remove this exception once UCC plugin is fully deprecated. + if (name != Backend.UCC or (name == Backend.UCC and is_ucc_available())): + assert not hasattr(Backend, name.upper()), ( + f"{name.upper()} c10d backend already exist" + ) + assert name.upper() not in Backend._plugins, ( + f"{name.upper()} c10d backend creator function already exist" + ) + + setattr(Backend, name.upper(), name.lower()) + Backend.backend_list.append(name.lower()) + if devices is not None: + for device in devices: + if device != 'cpu' and device != 'cuda': + Backend.default_device_backend_map[device] = name.lower() + Backend.backend_type_map[name.lower()] = ProcessGroup.BackendType.CUSTOM + + # Update device capability matrix in Backend class + if devices is None: + # This is more of a backward support for groups like `threaded`: + # assume default devices "cpu" and "cuda", but warn + warnings.warn( + f"Device capability of {name} unspecified, assuming `cpu` and " + "`cuda`. Please specify it via the `devices` argument of " + "`register_backend`." + ) + Backend.backend_capability[name.lower()] = ["cpu", "cuda"] + elif isinstance(devices, str): + # Single device string specified. Simply convert to list. + Backend.backend_capability[name.lower()] = [devices] + else: + Backend.backend_capability[name.lower()] = devices + + Backend._plugins[name.upper()] = Backend._BackendPlugin(func, extended_api) + +class BackendConfig: + """Backend configuration class.""" + + def __init__(self, backend: Backend): + """Init.""" + self.device_backend_map: Dict[str, Backend] = {} + backend = str(backend) + + if backend == Backend.UNDEFINED: + # default config when backend is not specified + # supported since PyTorch 2.0 + for device, default_backend in Backend.default_device_backend_map.items(): + if is_backend_available(default_backend): + if default_backend == Backend.NCCL and not torch.cuda.is_available(): + continue + self.device_backend_map[device] = Backend(default_backend) + elif backend.lower() in Backend.backend_list: + # Cases for when backend is a single string (without device types) + # e.g. "nccl", "gloo", "ucc", "mpi" + supported_devices = Backend.backend_capability[backend.lower()] + backend_val = Backend(backend) + self.device_backend_map = dict.fromkeys(supported_devices, backend_val) + elif ":" in backend.lower(): + # Backend specified in "device:backend" format + # make sure the backend string is in the correct format + # "{device_type1}:{backend1},{device_type2}:{backend2}" + # e.g. "cpu:gloo,cuda:nccl" + backend_str_error_message = f"""The custom backend string argument is invalid: {backend}. + Custom backend string is an experimental feature where the backend string must be in the format: + ":,:...". e.g. 'cpu:gloo,cuda:nccl'""" + + # parse the backend string and populate the device_backend_map + for device_backend_pair_str in backend.lower().split(","): + device_backend_pair = device_backend_pair_str.split(":") + if len(device_backend_pair) != 2: + raise ValueError(f"Invalid device:backend pairing: \ + {device_backend_pair_str}. {backend_str_error_message}") + device, backend = device_backend_pair + if device in self.device_backend_map: + raise ValueError(f"Duplicate device type {device} \ + in backend string: {backend}. {backend_str_error_message}") + self.device_backend_map[device] = Backend(backend) + else: + # User specified a single backend name whose device capability is + # unknown, assuming it can support the default devices of PyTorch + # (cpu and cuda) + warnings.warn( + f"Device capability of {backend} unknown, assuming `cpu` and " + "`cuda`. You can specify it in `device:backend` format in " + "`init_process_group` call." + ) + backend_val = Backend(backend) + self.device_backend_map = { + "cpu" : backend_val, + "cuda" : backend_val, + "xpu" : backend_val, + } + + logger.info( + f"Using backend config: {self.device_backend_map}" # noqa: G004 + ) + + def __repr__(self): + """Return all the device:backend pairs separated by commas.""" + return ",".join(f"{device}:{backend}" for device, backend in self.device_backend_map.items()) + + def get_device_backend_map(self) -> Dict[str, Backend]: + """Return backend map of the device.""" + return self.device_backend_map + +class _reduce_op: + r""" + Deprecated enum-like class. + + For reduction operations: ``SUM``, ``PRODUCT``, ``MIN``, and ``MAX``. + + :class:`~torch.distributed.ReduceOp` is recommended to use instead. + """ + + def __init__(self): + # __members__ is a dict storing key-value pairs for enum classes + for k, v in ReduceOp.RedOpType.__members__.items(): + setattr(self, k, v) + self.__members__ = ReduceOp.RedOpType.__members__ + + def __getattribute__(self, key): + warnings.warn( + "torch.distributed.reduce_op is deprecated, please use " + "torch.distributed.ReduceOp instead" + ) + return object.__getattribute__(self, key) + + +reduce_op = _reduce_op() + + +class P2POp: + """ + A class to build point-to-point operations for ``batch_isend_irecv``. + + This class builds the type of P2P operation, communication buffer, peer rank, + Process Group, and tag. Instances of this class will be passed to + ``batch_isend_irecv`` for point-to-point communications. + + Args: + op (Callable): A function to send data to or receive data from a peer process. + The type of ``op`` is either ``torch.distributed.isend`` or + ``torch.distributed.irecv``. + tensor (Tensor): Tensor to send or receive. + peer (int): Destination or source rank. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + tag (int, optional): Tag to match send with recv. + """ + + def __init__(self, op: Callable, tensor: torch.Tensor, peer: int, + group: Optional[ProcessGroup] = None, tag: int = 0): + """Init.""" + self.op = op + self.tensor = tensor + self.peer = peer + self.group = group + self.tag = tag + + def __new__(cls, op: Callable, tensor: torch.Tensor, peer: int, + group: Optional[ProcessGroup] = None, tag: int = 0): + """Create and return a new instance of the class.""" + _check_op(op) + _check_single_tensor(tensor, "tensor") + return object.__new__(cls) + + +class _CollOp: + """ + A class to capture collective operations. + + Args: + op (Callable): A collective function, e.g. ``torch.distributed.all_reduce``. + tensor (Tensor): Tensor to operate on. + dst_tensor (Tensor, optional): Provided when source and destinaton tensors are not the same. + redop (ReduceOp, optional): reduce operation. + root (int, optional): root of broadcast or reduce. + """ + + def __init__(self, op: Callable, tensor: torch.Tensor, dst_tensor: Optional[torch.Tensor] = None, + redop: Optional[ReduceOp] = None, root: Optional[int] = None): + self.op = op + self.tensor = tensor + self.dst_tensor = dst_tensor + self.redop = redop + self.root = root + + +# DO NOT USE THESE FIELDS DIRECTLY. +# Use them through the _world object to make sure the _world override mechanism +_pg_map: Dict[ProcessGroup, Tuple[str, Store]] = {} +_pg_names: Dict[ProcessGroup, str] = {} +_pg_group_ranks: Dict[ProcessGroup, Dict[int, int]] = {} +# For a pg, it is a map from ProcessGroup to BackendConfig +_pg_backend_config: Dict[ProcessGroup, str] = {} +_group_count = 0 +_tags_to_pg: Dict[str, List[ProcessGroup]] = {} +_pg_to_tag: Dict[ProcessGroup, str] = {} +_backend: Optional[str] = None + +class _World: + """ + Container class for c10d process group state. + + This is used during registration and lookup of PG state. + + .. warning:: This is an experimental API intended to expose the inner workings + of c10d and is subject to change.. + """ + + def __init__(self): + self._default_pg = None + self._pg_coalesce_state: Dict[ProcessGroup, List[_CollOp]] = {} + self._pg_default_device: Dict[ProcessGroup, torch.device] = {} + + @property + def default_pg(self) -> Optional[ProcessGroup]: + """ + Process group that includes all ranks of the cluster. + + This default ProcessGroup is used by c10d APIs when a ProcessGroup is needed + but None is provided. + """ + return self._default_pg + + @default_pg.setter + def default_pg(self, value) -> None: + self._default_pg = value + + @property + def pg_map(self) -> Dict[ProcessGroup, Tuple[str, Store]]: + """ + Provide Mapping from ProcessGroup to backend name and store. + + For NCCL and GLOO pg, it is a map from ProcessGroup to (Backend, Store) + For MPI pg, it is a map from ProcessGroup to (Backend, None) + + TODO don't expose the map, expose fine grained ops + """ + global _pg_map + return _pg_map + + @property + def pg_names(self) -> Dict[ProcessGroup, str]: + """ + Process group's names, map from ProcessGroup to str. + + TODO don't expose the map, expose fine grained ops + """ + global _pg_names + return _pg_names + + @property + def pg_group_ranks(self) -> Dict[ProcessGroup, Dict[int, int]]: + """ + Process group's global rank to local rank mapping. + + TODO don't expose the map, expose fine grained ops + """ + global _pg_group_ranks + return _pg_group_ranks + + @property + def pg_backend_config(self) -> Dict[ProcessGroup, str]: + """ + Process group's backend config. + + TODO don't expose the map, expose fine grained ops + """ + global _pg_backend_config + return _pg_backend_config + + @property + def group_count(self) -> int: + """ + Process group count for default naming. + + TODO don't expose group_count, use something else instead + """ + global _group_count + return _group_count + + @group_count.setter + def group_count(self, value: int) -> None: + """Use to compute the name of ProcessGroups when using global synchronization.""" + global _group_count + _group_count = value + + @property + def tags_to_pg(self) -> Dict[str, List[ProcessGroup]]: + global _tags_to_pg + return _tags_to_pg + + @property + def pg_to_tag(self) -> Dict[ProcessGroup, str]: + global _pg_to_tag + return _pg_to_tag + + @property + def pg_coalesce_state(self) -> Dict[ProcessGroup, List[_CollOp]]: + return self._pg_coalesce_state + + @property + def pg_default_device(self) -> Dict[ProcessGroup, torch.device]: + return self._pg_default_device + + @property + def pg_config_info(self) -> List[Dict[str, Any]]: + """ + Return a list of dict with process groups and backends. + + Along with their unique IDs and configurations (types and ranks). + """ + config_info: List[Dict[str, Any]] = [] + default_pg_size = _get_group_size(None) + for pg in self.pg_map.keys(): + ranks = self.pg_group_ranks[pg] + config_info.append( + { + "pg_name": self.pg_names[pg], + "uid": _get_process_group_uid(pg), + "backend_config": self.pg_backend_config[pg], + "ranks": list(ranks.keys()) + if len(ranks) != default_pg_size + else [], # 'ranks' is an empty list when all ranks are involved in a pg + "group_size": len(ranks), + "group_count": self.group_count, + } + ) + return config_info + + +_world = _World() +"""Holds the singleton instance of ``_World`` used by c10. Experimental extension point to override it""" + +class _WorldMeta(type): + """ + Meta class of ``group`` and ``GroupMember``. + + Allows them to have the class property ``WORLD``. + """ + + # Points to the default PG once initialized. + @property + def WORLD(cls) -> Optional[ProcessGroup]: + return _world.default_pg + + @WORLD.setter + def WORLD(cls, pg: Optional[ProcessGroup]): + _world.default_pg = pg + +class group(metaclass=_WorldMeta): + """Group class. Placeholder.""" + + pass + +class GroupMember(metaclass=_WorldMeta): + """Group member class.""" + + NON_GROUP_MEMBER = -100 + + +def _get_default_timeout(backend: Backend) -> timedelta: + # see note on nccl vs other backend timeout (constants.py) + if backend == Backend.NCCL: + if not isinstance(default_pg_nccl_timeout, timedelta): + # TODO moco benchmark on CPU initializes pgnccl backend today, triggered this assert in CI before it was + # changed to be a warning. We should fix the moco model. + warnings.warn("Attempted to get default timeout for nccl backend, but NCCL support is not compiled") + return default_pg_timeout + return default_pg_nccl_timeout + else: + return default_pg_timeout + +def _check_valid_timeout(timeout: Any) -> None: + if not isinstance(timeout, timedelta): + raise TypeError( + f"Expected timeout argument to be of type datetime.timedelta, got {timeout}" + ) + +# Default process group state +_default_pg_init_method: Optional[str] = None + +STORE_BASED_BARRIER_PREFIX = "store_based_barrier_key" + +def _get_pg_default_device(group: Optional[ProcessGroup] = None) -> torch.device: + """ + Return the device to use with ``group`` for control flow usage (object collectives, barrier). + + There are selection rules: + 1. If user specifies exactly one backend in ``init_process_group`` call: + use that backend + 2. Else if user specifies multiple "device:backend" pairs in init_process_group: + If "cpu" is among those pairs, use "cpu" (because the object is in cpu memory); + Otherwise, use the first backend (sort of a random pick). + + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Returns: + torch.device: The device to use with ``group``. + + """ + group = group or _get_default_group() + if group in _world.pg_default_device: + # Previously searched and cached; just return + return _world.pg_default_device[group] + + if not isinstance(group, ProcessGroup): + # Provide backward compatibility to cases where `group` passed in is + # actually a Backend (like `ProcessGroupGloo`) rather than a + # `ProcessGroup` in PT 2.0 sense + warnings.warn( + f"You are using a Backend {type(group)} as a ProcessGroup. " + "This usage is deprecated since PyTorch 2.0. Please use a public API " + "of PyTorch Distributed instead." + ) + # Most users create Gloo with private API for object collectives + _world.pg_default_device[group] = torch.device("cpu") + return _world.pg_default_device[group] + + """ + ``group._device_types`` is a property pybind that returns the devices + ("cpu", "cuda", etc) supported by ``group``. Can be multiple if the + ``group`` supports multiple devices. + """ + devices = group._device_types + + if len(devices) == 1: + # User fixed exactly one backend in `init_process_group` + _world.pg_default_device[group] = devices[0] + elif len(devices) == 0: + # No backend has been registered with this PG (maybe because no + # collective has been run?) We pick cpu as the default and hopefully + # this would lazily init Gloo or other available cpu backend. + _world.pg_default_device[group] = torch.device("cpu") + elif torch.device("cpu") in devices: + # There are multiple backends in this PG and cpu is among them. + # cpu is preferred as the object is in cpu memory. No need for device + # copy. + _world.pg_default_device[group] = torch.device("cpu") + else: + # No cpu in the backend list. Randomly pick the first backend + _world.pg_default_device[group] = devices[0] + + logger.info( + f"Using device {_world.pg_default_device[group]} for object " # noqa: G004 + "collectives." + ) + return _world.pg_default_device[group] + + +@_time_logger +def _store_based_barrier(rank, store, group_name, rendezvous_count, timeout, logging_interval=timedelta(seconds=10)) -> None: + """ + Store based barrier for synchronizing processes. + + Barrier based on store which is used for synchronizing processes after + ``init_process_group`` or ``new_group``. Intended to be used only with + those two methods and is not a generic alternative to ``barrier()``. + """ + store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group_name}" + store.add(store_key, 1) + logger.info("Added key: %s to store for rank: %s", store_key, rank) + + # Now wait for all workers to check in with the store. + world_size = rendezvous_count + worker_count = store.add(store_key, 0) + + last_worker_key = f"{store_key}:last_worker" + if worker_count == world_size: + store.set(last_worker_key, "1") + + # adjust the timeout to be at least 10secs + 1sec per thousand ranks to reduce the odds of timeout + # this value was empirically found while scale testing. + logging_interval = max(logging_interval, timedelta(seconds=10 + world_size / 1000)) + + start = time.time() + while True: + try: + # This will throw an exception after the logging_interval in which we print out + # the status of the group or time out officially, throwing runtime error + store.wait([last_worker_key], logging_interval) + break + except RuntimeError as e: + worker_count = store.add(store_key, 0) + # Print status periodically to keep track. + logger.info( + "Waiting in store based barrier to initialize process group for " + "rank: %s, key: %s (world_size=%s, num_workers_joined=%s, timeout=%s)", + rank, store_key, world_size, worker_count, timeout + ) + + if timedelta(seconds=(time.time() - start)) > timeout: + raise DistStoreError( # noqa: TRY200 + "Timed out initializing process group in store based barrier on " + "rank {}, for key: {} (world_size={}, num_workers_joined={}, timeout={})".format( + rank, store_key, world_size, worker_count, timeout + ) + ) + + logger.info( + "Rank %s: Completed store-based barrier for key:%s with %s nodes.", rank, store_key, world_size + ) + + +def _rank_not_in_group(group: Optional[ProcessGroup]) -> bool: + """Check if the current process's rank is not in a given group.""" + if group is None: + return False + return group == GroupMember.NON_GROUP_MEMBER + + +def _warn_not_in_group(op_name) -> None: + global_rank = -1 if GroupMember.WORLD is None else GroupMember.WORLD.rank() + warnings.warn( + f"Running {op_name} on global rank {global_rank} which does not " + "belong to the given group." + ) + + +def get_group_rank(group: ProcessGroup, global_rank: int) -> int: + """ + Translate a global rank into a group rank. + + ``global_rank`` must be part of ``group`` otherwise this raises RuntimeError. + + Args: + group (ProcessGroup): ProcessGroup to find the relative rank. + global_rank (int): Global rank to query. + + Returns: + Group rank of ``global_rank`` relative to ``group`` + + N.B. calling this function on the default process group returns identity + """ + if group is GroupMember.WORLD: + return global_rank + if group not in _world.pg_group_ranks: + raise ValueError(f"Group {group} is not registered, please create group with torch.distributed.new_group API") + group_ranks = _world.pg_group_ranks[group] + if global_rank not in group_ranks: + raise ValueError(f"Global rank {global_rank} is not part of group {group}") + + return group_ranks[global_rank] + +def get_global_rank(group: ProcessGroup, group_rank: int) -> int: + """ + Translate a group rank into a global rank. + + ``group_rank`` must be part of `group` otherwise this raises RuntimeError. + + Args: + group (ProcessGroup): ProcessGroup to find the global rank from. + group_rank (int): Group rank to query. + + Returns: + Global rank of ``group_rank`` relative to ``group`` + + N.B. calling this function on the default process group returns identity + """ + if group is GroupMember.WORLD: + return group_rank + if group not in _world.pg_group_ranks: + raise ValueError(f"Group {group} is not registered, please create group with torch.distributed.new_group API") + for rank, grp_rank in _world.pg_group_ranks[group].items(): + if grp_rank == group_rank: + return rank + raise ValueError(f"Group rank {group_rank} is not part of group {group}") + +# TODO: remove this once the ecosystem moves away from it. +def _get_global_rank(group, rank) -> int: + """Use get_global_rank as this method is deprecated.""" + warnings.warn( + "torch.distributed.distributed_c10d._get_global_rank is deprecated " + "please use torch.distributed.distributed_c10d.get_global_rank instead" + ) + return get_global_rank(group, rank) + + +def get_process_group_ranks(group: ProcessGroup) -> List[int]: + """ + Get all ranks associated with ``group``. + + Args: + group (ProcessGroup): ProcessGroup to get all ranks from. + + Returns: + List of global ranks ordered by group rank. + """ + return list(_world.pg_group_ranks[group].keys()) + +def _get_group_size(group) -> int: + """Get a given group's world size.""" + if group is GroupMember.WORLD or group is None: + default_pg = _get_default_group() + return default_pg.size() + return group.size() + + +def _get_group_size_by_name(group_name: str) -> int: + group = _resolve_process_group(group_name) + return group.size() + + +def _resolve_group_name_by_ranks_and_tag(ranks: List[int], tag: str) -> str: + # TODO(yifu): remove this function once ranks + tag is not a supported + # identifier for process group for functional collectives. + group = _find_pg_by_ranks_and_tag(tag, ranks) + if group is None: + raise ValueError("") + return group.group_name + + +def _check_single_tensor(param, param_name) -> None: + """Check that the parameter ``param_name`` is a single tensor.""" + if not isinstance(param, torch.Tensor): + raise TypeError( + f"""Invalid function argument. Expected parameter `{param_name}` of type torch.Tensor + but got {type(param)} instead.""" + ) + + +def _check_tensor_list(param, param_name) -> None: + """Check that the parameter ``param_name`` is a list of tensors.""" + if not isinstance(param, list): + raise TypeError( + f"""Invalid function argument. Expected parameter `{param_name}` of type List[torch.Tensor] + but got {type(param)} instead.""" + ) + elif not all(isinstance(p, torch.Tensor) for p in param): + raise TypeError( + f"""Invalid function argument. Expected parameter `{param_name}` of type List[torch.Tensor] + but got {type(param)} with elements of type {[type(p) for p in param]}.""" + ) + + +def _as_iterable(obj) -> collections.abc.Iterable: + return obj if isinstance(obj, list) else (obj,) + +def _ensure_all_tensors_same_dtype(*tensors) -> None: + last_dtype = None + for tensor in itertools.chain.from_iterable(map(_as_iterable, tensors)): + tensor_dtype = tensor.dtype + # Mixing complex and its element type is allowed + if tensor_dtype.is_complex: + tensor_dtype = torch.float32 if tensor_dtype == torch.complex64 else torch.complex128 + + if last_dtype is None: + last_dtype = tensor_dtype + else: + if last_dtype != tensor_dtype: + raise ValueError( + "Invalid usage of tensors with different dtypes" + f"Found {last_dtype} and {tensor.dtype}" + ) + + +def _check_op(op) -> None: + """Check that the ``op`` is either isend or irecv.""" + if op not in [isend, irecv]: + raise ValueError( + "Invalid ``op``. Expected ``op`` " + "to be of type ``torch.distributed.isend`` or " + "``torch.distributed.irecv``." + ) + + +def _check_p2p_op_list(p2p_op_list) -> None: + """ + Check that the ``p2p_op_list`` is a list of P2POp instances. + + Also, check that all ops use the same group. + """ + if not isinstance(p2p_op_list, list) or not all( + isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list + ): + raise ValueError( + "Invalid ``p2p_op_list``. Each op is expected to " + "to be of type ``torch.distributed.P2POp``." + ) + + group = p2p_op_list[0].group + if not all(group == p2p_op.group for p2p_op in p2p_op_list): + raise ValueError("All ops need to use the same group.") + + +def is_mpi_available() -> bool: + """Check if the MPI backend is available.""" + return _MPI_AVAILABLE + + +def is_nccl_available() -> bool: + """Check if the NCCL backend is available.""" + return _NCCL_AVAILABLE + + +def is_gloo_available() -> bool: + """Check if the Gloo backend is available.""" + return _GLOO_AVAILABLE + + +def is_ucc_available() -> bool: + """Check if the UCC backend is available.""" + return _UCC_AVAILABLE + + +def is_backend_available(backend: str) -> bool: + """ + Check backend availability. + + Checks if the given backend is available and supports the built-in backends or + third-party backends through function ``Backend.register_backend``. + + Args: + backend (str): Backend name. + Returns: + bool: Returns true if the backend is available otherwise false. + """ + # If the backend has an ``is_backend_available`` function, return the result of that function directly + available_func = getattr(torch.distributed, f"is_{backend.lower()}_available", None) + if available_func: + return available_func() + + return backend.lower() in Backend.backend_list + + +def is_initialized() -> bool: + """Check if the default process group has been initialized.""" + return GroupMember.WORLD is not None + + +def is_torchelastic_launched() -> bool: + """ + Check whether this process was launched with ``torch.distributed.elastic`` (aka torchelastic). + + The existence of ``TORCHELASTIC_RUN_ID`` environment + variable is used as a proxy to determine whether the current process + was launched with torchelastic. This is a reasonable proxy since + ``TORCHELASTIC_RUN_ID`` maps to the rendezvous id which is always a + non-null value indicating the job id for peer discovery purposes.. + """ + return os.getenv("TORCHELASTIC_RUN_ID") is not None + + +def _is_barrier_after_init() -> int: + # Environment variable to control whether process group should perform a + # barrier after its init. Default value is 0, i.e. no barrier. If you + # experience issue with this setting, you may set + # `TORCH_DIST_INIT_BARRIER=1` to add the barrier. + return int(os.getenv("TORCH_DIST_INIT_BARRIER", "0")) + + +def _abort_in_destroy_pg() -> bool: + # Environment variable to control whether to abort the communicators when users call destroy_process_group() + env = os.getenv("TORCH_NCCL_ABORT_IN_DESTROY_PG", "0") + return env == "1" or env.lower() == "true" + + +def _get_default_group() -> ProcessGroup: + """Get the default process group created by init_process_group.""" + if not is_initialized(): + raise ValueError( + "Default process group has not been initialized, " + "please make sure to call init_process_group." + ) + return not_none(GroupMember.WORLD) + + +def _get_default_store() -> Store: + """Get the default store created by init_process_group.""" + if not is_initialized(): + raise ValueError( + "Default process group has not been initialized, " + "please make sure to call init_process_group." + ) + default_pg = _get_default_group() + _, default_store = _world.pg_map[default_pg] + return default_store + + +def _update_default_pg(pg) -> None: + _world.default_pg = pg + rank = pg.rank() if pg is not None and pg != GroupMember.NON_GROUP_MEMBER else -1 + torch._C._distributed_c10d._set_global_rank(rank) + +def get_backend_config(group: Optional[ProcessGroup] = None) -> str: + """ + Return the backend configuration of the given process group. + + Args: + group (ProcessGroup, optional): The process group to work on. The + default is the general main process group. If another specific group + is specified, the calling process must be part of :attr:`group`. + + Returns: + The backend configuration of the given process group as a lower case string. + + """ + if group is None: + pg = _get_default_group() + else: + pg = group + if _rank_not_in_group(pg): + raise ValueError("Invalid process group specified") + backend_config = _world.pg_backend_config.get(pg) + return str(not_none(backend_config)) + +def get_backend(group: Optional[ProcessGroup] = None) -> Backend: + """ + Return the backend of the given process group. + + Args: + group (ProcessGroup, optional): The process group to work on. The + default is the general main process group. If another specific group + is specified, the calling process must be part of :attr:`group`. + + Returns: + The backend of the given process group as a lower case string. + + """ + if group is None: + pg = _get_default_group() + else: + pg = group + if _rank_not_in_group(pg): + raise ValueError("Invalid process group specified") + pg_store = _world.pg_map[pg] if pg in _world.pg_map else None + return Backend(not_none(pg_store)[0]) + +def _get_process_group_uid(pg: ProcessGroup) -> int: + backend = None + try: + backend = pg._get_backend(torch.device("cuda")) + except RuntimeError: + pass + if is_nccl_available() and isinstance(backend, ProcessGroupNCCL): + return backend.uid + return -1 + +def _get_pg_config(group: Optional[ProcessGroup] = None) -> Dict[str, Any]: + """ + Return the pg configuration of the given process group. + + """ + if group is None: + pg = _get_default_group() + else: + pg = group + return { + "pg_name": _get_process_group_name(pg), + "uid": _get_process_group_uid(pg), + "backend_config": get_backend_config(pg), + "pg_size": _get_group_size(pg), + "ranks": get_process_group_ranks(pg), + } + +def _get_all_pg_configs() -> List[Dict[str, Any]]: + """ + Return the pg configuration of all the process groups. + + """ + config_info: List[Dict[str, Any]] = [] + for pg in _world.pg_map.keys(): + config_info.append(_get_pg_config(pg)) + return config_info + +def get_pg_count() -> int: + """ + Return the number of process groups. + + """ + return _world.group_count + +def _set_pg_timeout(timeout: timedelta, group: Optional[ProcessGroup] = None) -> None: + """ + Set the timeout for the given process group when users want to use a different timeout instead of + default values. + + Args: + timeout (timedelta): Timeout for operations executed against the process group which + users want to set. Default value is 10 minutes for NCCL and 30 minutes for other backends. + This is the duration after which collectives will be aborted asynchronously and the process will crash. + This is done since CUDA execution is async and it is no longer safe to continue executing user code since + failed async NCCL operations might result in subsequent CUDA operations running on corrupted data. + When TORCH_NCCL_BLOCKING_WAIT is set, the process will block and wait for this timeout. + + group (ProcessGroup, optional): The process group to work on. The + default is the general main process group. If another specific group + is specified, the calling process must be part of :attr:`group`. + + Returns: + None + """ + if group is None: + group = _get_default_group() + if _rank_not_in_group(group): + raise ValueError("Invalid process group specified") + assert isinstance(group, ProcessGroup) + devices = group._device_types + backends = set() + if torch.device("cpu") in devices and is_gloo_available(): + backend = group._get_backend(torch.device("cpu")) + if isinstance(backend, ProcessGroupGloo): + backends.add(backend) + if torch.device("cuda") in devices: + backend = group._get_backend(torch.device("cuda")) + if is_nccl_available() and isinstance(backend, ProcessGroupNCCL): + backends.add(backend) # type: ignore[arg-type] + elif is_gloo_available() and isinstance(backend, ProcessGroupGloo): + backends.add(backend) # type: ignore[arg-type] + if len(backends) == 0: + warnings.warn("Set timeout is now only supported for either nccl or gloo.") + for backend in backends: + backend._set_default_timeout(timeout) + + +@_exception_logger +@_time_logger +def init_process_group( + backend: Optional[str] = None, + init_method: Optional[str] = None, + timeout: Optional[timedelta] = None, + world_size: int = -1, + rank: int = -1, + store: Optional[Store] = None, + group_name: str = "", + pg_options: Optional[Any] = None, + device_id: Optional[torch.device] = None, +) -> None: + """ + Initialize the default distributed process group. + + This will also initialize the distributed package. + + There are 2 main ways to initialize a process group: + 1. Specify ``store``, ``rank``, and ``world_size`` explicitly. + 2. Specify ``init_method`` (a URL string) which indicates where/how + to discover peers. Optionally specify ``rank`` and ``world_size``, + or encode all required parameters in the URL and omit them. + + If neither is specified, ``init_method`` is assumed to be "env://". + + + Args: + backend (str or Backend, optional): The backend to use. Depending on + build-time configurations, valid values include ``mpi``, ``gloo``, + ``nccl``, and ``ucc``. If the backend is not provided, then both a ``gloo`` + and ``nccl`` backend will be created, see notes below for how multiple + backends are managed. This field can be given as a lowercase string + (e.g., ``"gloo"``), which can also be accessed via + :class:`Backend` attributes (e.g., ``Backend.GLOO``). If using + multiple processes per machine with ``nccl`` backend, each process + must have exclusive access to every GPU it uses, as sharing GPUs + between processes can result in deadlocks. ``ucc`` backend is + experimental. + init_method (str, optional): URL specifying how to initialize the + process group. Default is "env://" if no + ``init_method`` or ``store`` is specified. + Mutually exclusive with ``store``. + world_size (int, optional): Number of processes participating in + the job. Required if ``store`` is specified. + rank (int, optional): Rank of the current process (it should be a + number between 0 and ``world_size``-1). + Required if ``store`` is specified. + store(Store, optional): Key/value store accessible to all workers, used + to exchange connection/address information. + Mutually exclusive with ``init_method``. + timeout (timedelta, optional): Timeout for operations executed against + the process group. Default value is 10 minutes for NCCL and 30 minutes for other backends. + This is the duration after which collectives will be aborted asynchronously and the process will crash. + This is done since CUDA execution is async and it is no longer safe to continue executing user code since + failed async NCCL operations might result in subsequent CUDA operations running on corrupted data. + When TORCH_NCCL_BLOCKING_WAIT is set, the process will block and wait for this timeout. + + group_name (str, optional, deprecated): Group name. This argument is ignored + pg_options (ProcessGroupOptions, optional): process group options + specifying what additional options need to be passed in during + the construction of specific process groups. As of now, the only + options we support is ``ProcessGroupNCCL.Options`` for the ``nccl`` + backend, ``is_high_priority_stream`` can be specified so that + the nccl backend can pick up high priority cuda streams when + there're compute kernels waiting. + device_id (torch.device, optional): a single, specific device + to "bind" this process to, allowing for backend-specific + optimizations. Currently this has two effects, only under + NCCL: the communicator is immediately formed (calling + ``ncclCommInit*`` immediately rather than the normal lazy + call) and sub-groups will use ``ncclCommSplit`` when + possible to avoid unnecessary overhead of group creation. If you + want to know NCCL initialization error early, you can also use this + field. + + .. note:: To enable ``backend == Backend.MPI``, PyTorch needs to be built from source + on a system that supports MPI. + + .. note:: Support for multiple backends is experimental. Currently when no backend is + specified, both ``gloo`` and ``nccl`` backends will be created. The ``gloo`` backend + will be used for collectives with CPU tensors and the ``nccl`` backend will be used + for collectives with CUDA tensors. A custom backend can be specified by passing in + a string with format ":,:", e.g. + "cpu:gloo,cuda:custom_backend". + + """ + + global _world + + global _backend + global _default_pg_init_method + + if GroupMember.WORLD is not None: + raise ValueError("trying to initialize the default process group twice!") + + set_pytorch_distributed_envs_from_justknobs() + + assert (store is None) or ( + init_method is None + ), "Cannot specify both init_method and store." + + if store is not None: + assert world_size > 0, "world_size must be positive if using store" + assert rank >= 0, "rank must be non-negative if using store" + elif init_method is None: + init_method = "env://" + + if backend: + backend = Backend(backend) + else: + backend = Backend("undefined") + + if timeout is None: + timeout = _get_default_timeout(backend) + + _check_valid_timeout(timeout) + + """ + Group name is not visible to users unless they access + internals of c10d. This means we can ignore the value + they provide as it not exposed in a public way. + """ + group_name = _process_group_name([], use_hashed_name=False) + if backend == Backend.MPI: + if world_size != -1 or rank != -1: + warnings.warn( + f"For MPI backend, world_size ({world_size}) and rank ({rank}) " + "are ignored since they are assigned by the " + "MPI runtime." + ) + + default_pg, _ = _new_process_group_helper( + -1, -1, [], backend, None, group_name, timeout=timeout + ) + _update_default_pg(default_pg) + else: + # backward compatible API + if store is None: + rendezvous_iterator = rendezvous( + not_none(init_method), rank, world_size, timeout=timeout + ) + store, rank, world_size = next(rendezvous_iterator) + store.set_timeout(timeout) + + # Use a PrefixStore to avoid accidental overrides of keys used by + # different systems (e.g. RPC) in case the store is multi-tenant. + store = PrefixStore("default_pg", store) + + default_pg, _ = _new_process_group_helper( + world_size, + rank, + [], + backend, + store, + group_name, + pg_options=pg_options, + timeout=timeout, + device_id=device_id, + ) + _update_default_pg(default_pg) + + _world.pg_group_ranks[GroupMember.WORLD] = {i: i for i in range(GroupMember.WORLD.size())} # type: ignore[attr-defined, index] + _backend = _world.pg_map[not_none(GroupMember.WORLD)][0] + _default_pg_init_method = init_method + + old_hook = sys.excepthook + + def _distributed_excepthook(*args): + old_stderr = sys.stderr + sys.stderr = buf = io.StringIO() + try: + old_hook(*args) + finally: + sys.stderr = old_stderr + msg = buf.getvalue() + prefix = f"[rank{get_rank()}]" + msg = "\n".join(f"{prefix}: {s}" if s != "" else "" for s in msg.split("\n")) + sys.stderr.write(msg) + sys.stderr.flush() + + sys.excepthook = _distributed_excepthook + + if _is_barrier_after_init() == 1: + # barrier at the end to ensure that once we return from this method, all + # process groups including global variables (if any) are updated + # correctly on all ranks. + # Update 04/2023: for large-scale runs, this barrier (esp. store-based + # barrier) may be costly and/or unscalable. Also, in a lot of cases, + # these barriers may be unnecessary, as proven by a green CI after + # removal. An environment variable `TORCH_DIST_INIT_BARRIER` has been + # added which enables this barrier only when set to 1. + logger.info( + "Performing barrier after ProcessGroup initialization since " + "TORCH_DIST_INIT_BARRIER = 1" + ) + if backend == Backend.MPI: + # MPI backend doesn't use store. + barrier() + else: + # Use store based barrier here since barrier() used a bunch of + # default devices and messes up NCCL internal state. + _store_based_barrier(rank, store, group_name, world_size, timeout) + +def _get_split_source(pg): + split_from = None + if pg.bound_device_id: + split_from = pg._get_backend(pg.bound_device_id) + elif pg is _world.default_pg: + try: + split_from = pg._get_backend(torch.device("cuda")) + except RuntimeError: + # no cuda device associated with this backend + pass + + if not split_from or not split_from.supports_splitting: + return None + + # If necessary, find a backend to split from by peeling process + # group wrappers from our potentially wrapped process group. + while isinstance(split_from, _ProcessGroupWrapper): + split_from = split_from.wrapped_pg + + return split_from + +def _shutdown_backend(pg): + """ + Try to shut down the backend of a process group. + Currently, only ProcessGroupNCCL backend is supported. + No op for other backends. + """ + backend = None + try: + backend = pg._get_backend(torch.device("cuda")) + except RuntimeError: + pass + if isinstance(backend, ProcessGroupNCCL): + # explictly call shutdown to ensure that NCCL resources are released + backend._shutdown() + +def _new_process_group_helper( + group_size, + group_rank, + global_ranks_in_group, + backend, + store, + group_name, + pg_options=None, + timeout=None, + pg_tag=None, + device_id=None, +): + """ + Create a new distributed process group. + + This function must be called by ALL processes in the global group, even if + the calling process is not part of the newly created group. In that case, + this function returns GroupMember.NON_GROUP_MEMBER. + + This function is called with ``global_ranks_in_group == []`` for the default group. + """ + global _world + + if group_name in _world.pg_names.values(): + raise ValueError( + "The specified group name has already been " + "created, please use a different group name" + ) + + if device_id is not None and (device_id.index is None or device_id.type != 'cuda'): + raise ValueError("init_process_group device_id parameter must be a cuda device with an " + "id, e.g. cuda:0, not just cuda or cpu") + + # Note: _new_process_group_helper is only called from init_process_group, which always provides a timeout value + _check_valid_timeout(timeout) + + if pg_tag not in [None, ""]: + # creating with the same tag and rank set results in the same underlying PG + existing_group = _find_pg_by_ranks_and_tag(pg_tag, global_ranks_in_group) + if existing_group: + _, prefix_store = _world.pg_map[existing_group] + return existing_group, prefix_store + + # The list of group ranks is empty if we're creating the default group. + is_default_group = len(global_ranks_in_group) == 0 + + # nccl and potentially other backends allow creation of + # communicators based on pre-existing ones, which can save + # initialization time. Due to lazy initialization of + # communicators in some backends, we have to be careful and only + # split when we *know* the backends already are connected _on all + # ranks_. We can only know this if the group we are making is the + # entire world or if we have bound a device id to the world (which + # causes early connection initialization). + if (is_initialized() and + (len(global_ranks_in_group) == _get_default_group().size() or _get_default_group().bound_device_id)): + split_from = _get_split_source(_get_default_group()) + else: + split_from = None + + # If this is a subgroup (which means group_ranks is specified), + # we check if the current process is a member of the new group. + if not is_default_group: + global_rank = _get_default_group().rank() + if global_rank not in global_ranks_in_group: + # If we are using `ncclCommSplit` (or similar split from + # other APIs) to create the communicator, we will need to + # call `ncclCommSplit` on *all* ranks in this new group's + # parent group, even those not in the new group. This is + # a requirement of the NCCL API as otherwise we would get + # out of sync. + if split_from: + split_from.perform_nocolor_split(_get_default_group().bound_device_id) + return GroupMember.NON_GROUP_MEMBER, None + + prefix_store = PrefixStore(f"{group_name}/", store) + base_pg_options = ProcessGroup.Options(backend=str(backend)) + base_pg_options._timeout = timeout + pg: ProcessGroup = ProcessGroup(prefix_store, group_rank, group_size, base_pg_options) + if device_id: + pg.bound_device_id = device_id + backend_config = BackendConfig(backend) + backend_class: torch._C._distributed_c10d.Backend + for device, backend_str in backend_config.get_device_backend_map().items(): + # Use the group name as prefix in the default store, such that + # a single store can be reused by multiple groups. + backend_prefix_store = PrefixStore(f"{device}/", prefix_store) + + if backend_str == Backend.MPI: + if not is_mpi_available(): + raise RuntimeError( + "Distributed package doesn't have MPI built in." + " MPI is only included if you build PyTorch from" + " source on a host that has MPI installed." + ) + backend_class = ProcessGroupMPI.create(global_ranks_in_group) + backend_type = ProcessGroup.BackendType.MPI + if not backend_class: + return GroupMember.NON_GROUP_MEMBER, None + # create new process group with accurate rank and size + if pg.rank() == -1 and pg.size() == -1: + pg = ProcessGroup(backend_prefix_store, backend_class.rank(), backend_class.size(), base_pg_options) + elif backend_str == Backend.GLOO: + # TODO: remove this check after lazy initialization is supported + # if pg_options is not None: + # raise RuntimeError("GLOO options not supported") + backend_class = ProcessGroupGloo(backend_prefix_store, group_rank, group_size, timeout=timeout) + backend_type = ProcessGroup.BackendType.GLOO + elif backend_str == Backend.NCCL: + if not is_nccl_available(): + raise RuntimeError("Distributed package doesn't have NCCL built in") + if pg_options is not None: + assert isinstance( + pg_options, ProcessGroupNCCL.Options + ), "Expected pg_options argument to be of type ProcessGroupNCCL.Options" + if pg_options._timeout != timeout: + warnings.warn( + "pg_options._timeout was specified, " + "but timeout kwarg has a default value that will always override it. " + ) + else: + # default pg_options for NCCL + pg_options = ProcessGroupNCCL.Options() + pg_options.is_high_priority_stream = False + pg_options._timeout = timeout + + if split_from: + pg_options.split_from = split_from + pg_options.split_color = _process_group_color(global_ranks_in_group) + pg_options.global_ranks_in_group = global_ranks_in_group + backend_class = ProcessGroupNCCL( + backend_prefix_store, group_rank, group_size, pg_options) + backend_type = ProcessGroup.BackendType.NCCL + elif backend_str == Backend.UCC and is_ucc_available(): + # TODO: once UCC plugin is fully deprecated, remove + # is_ucc_available() from above elif-condition and raise + # RuntimeError if is_ucc_available() returns false. + + backend_class = ProcessGroupUCC(backend_prefix_store, group_rank, group_size, timeout=timeout) + backend_type = ProcessGroup.BackendType.UCC + else: + assert backend_str.upper() in Backend._plugins, ( + f"Unknown c10d backend type {backend_str.upper()}" + ) + + backend_plugin = Backend._plugins[backend_str.upper()] + creator_fn = backend_plugin.creator_fn + extended_api = backend_plugin.extended_api + backend_type = ProcessGroup.BackendType.CUSTOM + + if not extended_api: + backend_class = creator_fn(backend_prefix_store, group_rank, group_size, timeout) + else: + dist_backend_opts = _DistributedBackendOptions() + dist_backend_opts.store = backend_prefix_store + dist_backend_opts.group_rank = group_rank + dist_backend_opts.group_size = group_size + dist_backend_opts.timeout = timeout + dist_backend_opts.group_id = group_name + dist_backend_opts.global_ranks_in_group = global_ranks_in_group + + backend_class = creator_fn(dist_backend_opts, pg_options) + + # Set sequence numbers for gloo and nccl backends. + if backend_str == Backend.GLOO: + assert isinstance(backend_class, ProcessGroupGloo) + backend_class._set_sequence_number_for_group() + elif backend_str == Backend.NCCL: + assert isinstance(backend_class, ProcessGroupNCCL) + backend_class._set_sequence_number_for_group() + + # If the type is a subclass of ProcessGroup then return this process group immediately + # TODO: This defaults to the old behavior for PythonProcessGroups which overwrites the + # ProcessGroup instance + if issubclass(type(backend_class), ProcessGroup): + pg = backend_class # type: ignore[assignment] + break + + # Process group wrapper initialization for supported PGs when TORCH_DISTRIBUTED_DEBUG is set + if backend_str in [Backend.GLOO, Backend.NCCL, Backend.UCC]: + # In debug mode and if GLOO is available, wrap in a wrapper PG that + # enables enhanced collective checking for debuggability. + if get_debug_level() == DebugLevel.DETAIL: + if not _GLOO_AVAILABLE: + logger.info( + """TORCH_DISTRIBUTED_DEBUG was set to DETAIL, but + GLOO is not available. Build with Gloo to + create a wrapper process group in debug mode + to aid collective desynchronization debugging.""" + ) + else: + backend_class = _create_process_group_wrapper( + wrapped_pg=backend_class, + store_prefix=group_name, + store=backend_prefix_store, + rank=group_rank, + world_size=group_size, + timeout=timeout, + ) + + # register only a single backend when all get_device_backend_map values are the same + if len(set(backend_config.get_device_backend_map().values())) == 1: + for device in backend_config.get_device_backend_map().keys(): + pg._register_backend(torch.device(device), backend_type, backend_class) + + # break out of outer loop to not create any more backends + break + + pg._register_backend(torch.device(device), backend_type, backend_class) + + if device_id and pg._get_backend(device_id).supports_splitting: + eager_backend = pg._get_backend(device_id) + eager_backend.eager_connect_single_device(device_id) + + # update global state + assert group_name is not None + _world.pg_map[pg] = (backend, prefix_store) + _world.pg_names[pg] = group_name + pg._set_group_name(group_name) + _register_process_group(group_name, pg) + + _world.pg_backend_config[pg] = str(backend_config) + # "" is the default tag for user PGs + if pg_tag in [None, ""]: + pg_tag = f"ptd:{group_name}" + _world.tags_to_pg.setdefault("", []).append(pg) + else: + pg_tag = f"user:{pg_tag}" + + _world.tags_to_pg.setdefault(pg_tag, []).append(pg) + _world.pg_to_tag[pg] = pg_tag + return pg, prefix_store + +def destroy_process_group(group: Optional[ProcessGroup] = None): + """ + Destroy a given process group, and deinitialize the distributed package. + + Args: + group (ProcessGroup, optional): The process group to be destroyed, if + group.WORLD is given, all process + groups including the default one will + be destroyed. + """ + global _world + + if group == GroupMember.NON_GROUP_MEMBER: + return + + if group is None: + pg = GroupMember.WORLD + else: + pg = group + + assert pg is not None + if _world.pg_map.get(pg, None) is None: + raise ValueError("Invalid process group specified") + + # When users register Python onCompletion hooks, those hooks will run on a + # different thread than the main thread. Today, the ProcessGroup dtor does + # wait for that thread. However, the dtor might finish after the Python + # Interpreter exits. After that grabbing the GIL for the Python hook will crash. + # We can either revive the interpreter when running hooks or keep the main one + # alive until all works and hooks are done. The current implementation does the + # latter. Therefore, we explicitly call _wait_for_pending_works() here to wait + # for the pending hooks to finish. + if pg.name().lower() == "nccl" and pg._has_hooks(): + pg._wait_for_pending_works() + + if group is None or group == GroupMember.WORLD: + if _abort_in_destroy_pg(): + # shutdown all backends in the order of pg names. shutting down in order because + # ncclCommAbort() was a 'collective' call in some versions of NCCL. + for pg_to_shutdown in sorted(_world.pg_names, key=lambda x: _world.pg_names[x], reverse=True): + _shutdown_backend(pg_to_shutdown) + + _update_default_pg(None) + _world.pg_map.clear() + _world.pg_names.clear() + _world.pg_group_ranks.clear() + _world.pg_backend_config.clear() + _world.pg_to_tag.clear() + _world.tags_to_pg.clear() + _world.pg_coalesce_state.clear() + _world.pg_default_device.clear() + _unregister_all_process_groups() + + # when process group doesn't have an explicit name (only WORLD (default) + # process group can have an explicit name), we use global _world.group_count + # to generate the name. We need to reset the counter on destruction to + # allow consistent value to be generated when we re-create process + # groups after some trainers recover from failure + # + # We only reset this when WORLD is being destroyed because if this + # process group is in good state, we aren't dealing with failures. + _world.group_count = 0 + else: + if _abort_in_destroy_pg(): + _shutdown_backend(pg) + del _world.pg_map[pg] + del _world.pg_names[pg] + del _world.pg_group_ranks[pg] + del _world.pg_backend_config[pg] + if pg in _world.pg_default_device: + del _world.pg_default_device[pg] + if pg in _world.pg_coalesce_state.keys(): + warnings.warn( + "Some coalesced collectives haven't been launched when " + "ProcessGroup is destroyed. They will be cleaned." + ) + del _world.pg_coalesce_state[pg] + + tag = _world.pg_to_tag.get(pg) + del _world.pg_to_tag[pg] + if tag is not None: + try: + _world.tags_to_pg[tag].remove(pg) + if tag.startswith("ptd:"): + _world.tags_to_pg[""].remove(pg) + except Exception: + pass + _unregister_process_group(pg.group_name) + + +def get_rank(group: Optional[ProcessGroup] = None) -> int: + """ + Return the rank of the current process in the provided ``group``, default otherwise. + + Rank is a unique identifier assigned to each process within a distributed + process group. They are always consecutive integers ranging from 0 to + ``world_size``. + + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Returns: + The rank of the process group + -1, if not part of the group + + """ + if _rank_not_in_group(group): + return -1 + + default_pg = _get_default_group() + if group is None or group is GroupMember.WORLD: + return default_pg.rank() + + return get_group_rank(group, default_pg.rank()) + + +def get_world_size(group: Optional[ProcessGroup] = None) -> int: + """ + Return the number of processes in the current process group. + + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Returns: + The world size of the process group + -1, if not part of the group + + """ + if _rank_not_in_group(group): + return -1 + + return _get_group_size(group) + + +def isend(tensor: torch.Tensor, dst: int, group: Optional[ProcessGroup] = None, tag: int = 0) -> Optional[Work]: + """ + Send a tensor asynchronously. + + .. warning:: + Modifying ``tensor`` before the request completes causes undefined + behavior. + + .. warning:: + ``tag`` is not supported with the NCCL backend. + + Args: + tensor (Tensor): Tensor to send. + dst (int): Destination rank on global process group (regardless of ``group`` argument) + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + tag (int, optional): Tag to match send with remote recv + + Returns: + A distributed request object. + None, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("isend") + return None + + if tensor.is_complex(): + tensor = torch.view_as_real(tensor) + + if group is None or group is GroupMember.WORLD: + pg = _get_default_group() + else: + pg = group + dst = get_group_rank(pg, dst) + + return pg.send([tensor], dst, tag) + +def irecv(tensor: torch.Tensor, src: Optional[int] = None, group: Optional[ProcessGroup] = None, tag: int = 0) -> Optional[Work]: + """ + Receives a tensor asynchronously. + + .. warning:: + ``tag`` is not supported with the NCCL backend. + + Args: + tensor (Tensor): Tensor to fill with received data. + src (int, optional): Source rank on global process group (regardless of ``group`` argument). + Will receive from any process if unspecified. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + tag (int, optional): Tag to match recv with remote send + + Returns: + A distributed request object. + None, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("irecv") + return None + + if tensor.is_complex(): + tensor = torch.view_as_real(tensor) + + if group is None or group is GroupMember.WORLD: + pg = _get_default_group() + else: + pg = group + + if src is None: + return pg.recv_anysource([tensor], tag) + else: + if pg is GroupMember.WORLD: + return pg.recv([tensor], src, tag) + else: + group_src_rank = get_group_rank(pg, src) + return pg.recv([tensor], group_src_rank, tag) + +@_exception_logger +def send(tensor: torch.Tensor, dst: int, group: Optional[ProcessGroup] = None, tag: int = 0) -> None: + """ + Send a tensor synchronously. + + Args: + tensor (Tensor): Tensor to send. + dst (int): Destination rank on global process group (regardless of ``group`` argument). + Destination rank should not be the same as the rank of the current process. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + tag (int, optional): Tag to match send with remote recv + + """ + if get_rank() == dst: + raise ValueError( + "Invalid destination rank: destination rank should not be the same as " + "the rank of the current process." + ) + + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("send") + return None + + if tensor.is_complex(): + tensor = torch.view_as_real(tensor) + + if group is None or group is GroupMember.WORLD: + default_pg = _get_default_group() + default_pg.send([tensor], dst, tag).wait() + else: + group_dst_rank = get_group_rank(group, dst) + group.send([tensor], group_dst_rank, tag).wait() + +@_exception_logger +def recv(tensor: torch.Tensor, src: Optional[int] = None, group: Optional[ProcessGroup] = None, tag: int = 0) -> int: + """ + Receives a tensor synchronously. + + Args: + tensor (Tensor): Tensor to fill with received data. + src (int, optional): Source rank on global process group (regardless of ``group`` argument). + Will receive from any process if unspecified. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + tag (int, optional): Tag to match recv with remote send + + Returns: + Sender rank + -1, if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("recv") + return -1 + + if tensor.is_complex(): + tensor = torch.view_as_real(tensor) + + if group is None: + pg = _get_default_group() + else: + pg = group + + if src is None: + work = pg.recv_anysource([tensor], tag) + work.wait() + src_rank = work._source_rank() + if group is None or group is GroupMember.WORLD: + return src_rank + else: + return get_global_rank(pg, src_rank) + else: + if group is None or group is GroupMember.WORLD: + pg.recv([tensor], src, tag).wait() + else: + group_src_rank = get_group_rank(pg, src) + pg.recv([tensor], group_src_rank, tag).wait() + return src + + +class _IllegalWork(Work): + def __getattribute__(self, name): + if name in ["is_success", "exception", "wait", "source_rank", "_source_rank", "result", "synchronize"]: + raise ValueError(f"Illegal to call {name} on IllegalWork object") + + +class _CoalescingManager: + def __init__(self): + self.works: List[Work] = [] + + def append(self, work: Work): + if work: + self.works.append(work) + + def wait(self): + for work in self.works: + work.wait() + + +@contextlib.contextmanager +def _coalescing_manager( + group: Optional[ProcessGroup] = None, + device: Optional[torch.device] = None, + async_ops: Optional[bool] = False, +): + """ + Context manager used to coalesce collectives or P2P operations when possible. + + Args: + group (`ProcessGroup`, optional): The process group to work on. If None, + the default process group will be used. + device (`torch.device`, optional): Default is None, set to a device if + there isn't a `**_coalesced` implementation by the backend. + async_ops (`bool`, optional): whether the coalesced ops are async ops. + + Examples: + >>> # xdoctest: +SKIP("no rank") + >>> # Synchronous ops + >>> with _coalescing_manager(): + >>> for i in range(num_colls): + >>> dist.all_reduce(tensors[i]) + >>> # Asynchronous ops + >>> with _coalescing_manager(async_ops=True) as cm: + >>> for i in range(num_colls): + >>> dist.all_reduce(tensors[i]) + >>> cm.wait() + + .. warning:: + :func:`_coalescing_manager` currently do not support coalescing + all-reduces with different reduce operators, e.g. `ReduceOp.SUM` mixed + with `ReduceOp.PRODUCT`. + """ + group = group or _get_default_group() + op_list = _world.pg_coalesce_state.setdefault(group, []) + if op_list: + raise ValueError("ProcessGroup has non-empty op list at the start of coalescing") + if device: + group._start_coalescing(device) + cm = _CoalescingManager() + yield cm + op_list = _world.pg_coalesce_state.pop(group) + if op_list: + # Collectives supporting "Fast Path" coalescing are captured. + # See implementation in corresponding collective APIs. + # Currently supported: + # - coalesced `all_reduce` + # - coalesced `all_gather_into_tensor` + # - coalesced `reduce_scatter_tensor` + op0 = op_list[0].op + if op0 == all_reduce: + tensors = [] + for op in op_list: + tensors.append(op.tensor) + all_reduce_opts = AllreduceCoalescedOptions() + all_reduce_opts.reduceOp = not_none(op_list[0].redop) + work = group.allreduce_coalesced(tensors, all_reduce_opts) + elif op0 == all_gather_into_tensor: + inputs = [] + outputs = [] + for op in op_list: + inputs.append(op.tensor) + outputs.append(not_none(op.dst_tensor)) + work = group.allgather_into_tensor_coalesced(outputs, inputs) + elif op0 == reduce_scatter_tensor: + inputs = [] + outputs = [] + for op in op_list: + inputs.append(op.tensor) + outputs.append(not_none(op.dst_tensor)) + reduce_opts = ReduceScatterOptions() + reduce_opts.reduceOp = not_none(op_list[0].redop) + work = group.reduce_scatter_tensor_coalesced(outputs, inputs, reduce_opts) + else: + raise AssertionError( + f"Coalescing manager does not support fast-path coalescing of {op0}, " + f"yet {op0} is still recorded in op list. This is an internal error of c10d." + ) + + if device: + # Old style of letting each coll inside the context manager to call into C++ counterpart via python binding + work = group._end_coalescing(device) + + if async_ops: + cm.append(work) # type: ignore[possibly-undefined] + else: + work.wait() # type: ignore[possibly-undefined] + + +def batch_isend_irecv(p2p_op_list): + """ + Send or Receive a batch of tensors asynchronously and return a list of requests. + + Process each of the operations in ``p2p_op_list`` and return the corresponding + requests. NCCL, Gloo, and UCC backend are currently supported. + + Args: + p2p_op_list: A list of point-to-point operations(type of each operator is + ``torch.distributed.P2POp``). The order of the isend/irecv in the list + matters and it needs to match with corresponding isend/irecv on the + remote end. + + Returns: + A list of distributed request objects returned by calling the corresponding + op in the op_list. + + Examples: + >>> # xdoctest: +SKIP("no rank") + >>> send_tensor = torch.arange(2, dtype=torch.float32) + 2 * rank + >>> recv_tensor = torch.randn(2, dtype=torch.float32) + >>> send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1)%world_size) + >>> recv_op = dist.P2POp(dist.irecv, recv_tensor, (rank - 1 + world_size)%world_size) + >>> reqs = batch_isend_irecv([send_op, recv_op]) + >>> for req in reqs: + >>> req.wait() + >>> recv_tensor + tensor([2, 3]) # Rank 0 + tensor([0, 1]) # Rank 1 + + .. note:: Note that when this API is used with the NCCL PG backend, users must set + the current GPU device with `torch.cuda.set_device`, otherwise it will + lead to unexpected hang issues. + + In addition, if this API is the first collective call in the ``group`` + passed to ``dist.P2POp``, all ranks of the ``group`` must participate in + this API call; otherwise, the behavior is undefined. If this API call is + not the first collective call in the ``group``, batched P2P operations + involving only a subset of ranks of the ``group`` are allowed. + """ + _check_p2p_op_list(p2p_op_list) + group = p2p_op_list[0].group + device = p2p_op_list[0].tensor.device + if device.type == "cuda": + # NCCL style coalescing + with _coalescing_manager(group, device, async_ops=True) as cm: + for p2p_op in p2p_op_list: + p2p_op.op(p2p_op.tensor, p2p_op.peer, p2p_op.group, p2p_op.tag) + return cm.works + else: + # Backward support for Gloo + reqs = [] + for p2p_op in p2p_op_list: + work = p2p_op.op(p2p_op.tensor, p2p_op.peer, p2p_op.group, p2p_op.tag) + if work: + reqs.append(work) + return reqs + + +@_exception_logger +def broadcast(tensor, src, group=None, async_op=False): + """ + Broadcasts the tensor to the whole group. + + ``tensor`` must have the same number of elements in all processes + participating in the collective. + + Args: + tensor (Tensor): Data to be sent if ``src`` is the rank of current + process, and tensor to be used to save received data otherwise. + src (int): Source rank on global process group (regardless of ``group`` argument). + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("broadcast") + return + + opts = BroadcastOptions() + opts.rootRank = src + opts.rootTensor = 0 + opts.asyncOp = async_op + + if group is None or group is GroupMember.WORLD: + default_pg = _get_default_group() + work = default_pg.broadcast([tensor], opts) + else: + group_src_rank = get_group_rank(group, src) + opts.rootRank = group_src_rank + work = group.broadcast([tensor], opts) + if async_op: + return work + else: + work.wait() + +@_exception_logger +def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False): + """ + Reduces the tensor data across all machines in a way that all get the final result. + + After the call ``tensor`` is going to be bitwise identical in all processes. + + Complex tensors are supported. + + Args: + tensor (Tensor): Input and output of the collective. The function + operates in-place. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + Examples: + >>> # xdoctest: +SKIP("no rank") + >>> # All tensors below are of torch.int64 type. + >>> # We have 2 process groups, 2 ranks. + >>> device = torch.device(f'cuda:{rank}') + >>> tensor = torch.arange(2, dtype=torch.int64, device=device) + 1 + 2 * rank + >>> tensor + tensor([1, 2], device='cuda:0') # Rank 0 + tensor([3, 4], device='cuda:1') # Rank 1 + >>> dist.all_reduce(tensor, op=ReduceOp.SUM) + >>> tensor + tensor([4, 6], device='cuda:0') # Rank 0 + tensor([4, 6], device='cuda:1') # Rank 1 + + >>> # All tensors below are of torch.cfloat type. + >>> # We have 2 process groups, 2 ranks. + >>> tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat, device=device) + 2 * rank * (1+1j) + >>> tensor + tensor([1.+1.j, 2.+2.j], device='cuda:0') # Rank 0 + tensor([3.+3.j, 4.+4.j], device='cuda:1') # Rank 1 + >>> dist.all_reduce(tensor, op=ReduceOp.SUM) + >>> tensor + tensor([4.+4.j, 6.+6.j], device='cuda:0') # Rank 0 + tensor([4.+4.j, 6.+6.j], device='cuda:1') # Rank 1 + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("all_reduce") + return + + if tensor.is_complex(): + if not supports_complex(op): + raise ValueError(f"all_reduce does not support {op} on complex tensors") + tensor = torch.view_as_real(tensor) + + opts = AllreduceOptions() + opts.reduceOp = op + if group is None: + group = _get_default_group() + + if group in _world.pg_coalesce_state.keys(): + # We are in coalescing context, do not issue single operation, just append a collective representation + coll = _CollOp(all_reduce, tensor, None, op, None) + _world.pg_coalesce_state[group].append(coll) + if async_op: + return _IllegalWork() + else: + return None + + work = group.allreduce([tensor], opts) + + if async_op: + return work + else: + work.wait() + +@_exception_logger +def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False): + """ + WARNING: at this time individual shape checking is not implemented across nodes. + + For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the + rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the allreduce + operation will proceed without complaint and return erroneous outputs. This lack + of shape checking results in significant performance improvements but users of this + function should take extra care to ensure that each node passes in tensors whose + shapes match across nodes. + + Reduces each tensor in tensors (residing on the same device) across all machines + in such a way that all get the final result. + + After the call each tensor in tensors is going to bitwise identical + in all processes. + + Complex tensors are supported. + + Args: + tensors (Union[List[Tensor], Tensor]): Input and output of the collective. + The function operates in-place. + op (Optional[ReduceOp]): One of the values from + ``torch.distributed.ReduceOp`` enum. Specifies an operation used for + element-wise reductions. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (Optional[bool]): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + """ + warnings.warn( + "torch.distributed.all_reduce_coalesced will be deprecated. If you must " + "use it, please revisit our documentation later at " + "https://pytorch.org/docs/master/distributed.html#collective-functions" + ) + if isinstance(tensors, torch.Tensor): + tensors = [tensors] + _check_tensor_list(tensors, "tensor") + _ensure_all_tensors_same_dtype(tensors) + if _rank_not_in_group(group): + _warn_not_in_group("all_reduce_coalesced") + return + + if any(t.is_complex() for t in tensors) and not supports_complex(op): + raise ValueError(f"all_reduce does not support {op} on complex tensors") + + tensors = [t if not t.is_complex() else torch.view_as_real(t) for t in tensors] + + opts = AllreduceCoalescedOptions() + opts.reduceOp = op + if group is None: + default_pg = _get_default_group() + work = default_pg.allreduce_coalesced(tensors, opts) + else: + work = group.allreduce_coalesced(tensors, opts) + + if async_op: + return work.get_future() + else: + work.wait() + +@_exception_logger +def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): + """ + Reduces the tensor data across all machines. + + Only the process with rank ``dst`` is going to receive the final result. + + Args: + tensor (Tensor): Input and output of the collective. The function + operates in-place. + dst (int): Destination rank on global process group (regardless of ``group`` argument) + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + if _rank_not_in_group(group): + _warn_not_in_group("reduce") + return + + opts = ReduceOptions() + opts.reduceOp = op + opts.rootRank = dst + + if group is None or group is GroupMember.WORLD: + default_pg = _get_default_group() + work = default_pg.reduce([tensor], opts) + else: + group_dst_rank = get_group_rank(group, dst) + opts.rootRank = group_dst_rank + work = group.reduce([tensor], opts) + + if async_op: + return work + else: + work.wait() + +def _object_to_tensor(obj, device, group): + f = io.BytesIO() + _pickler(f).dump(obj) + byte_storage = torch.ByteStorage._from_buffer(f.getvalue()) # type: ignore[attr-defined] + # Do not replace `torch.ByteTensor` or `torch.LongTensor` with torch.tensor and specifying dtype. + # Otherwise, it will casue 100X slowdown. + # See: https://github.com/pytorch/pytorch/issues/65696 + byte_tensor = torch.ByteTensor(byte_storage).to(device) + if get_debug_level() == DebugLevel.DETAIL and is_nccl_available(): + backend = get_backend(group) + if backend == Backend.NCCL: + hash = torch._C._distributed_c10d._hash_tensors([byte_tensor]) + logger.warning(f"_object_to_tensor size: {byte_tensor.numel()} hash value: {hash}") # noqa: G004 + local_size = torch.LongTensor([byte_tensor.numel()]).to(device) + return byte_tensor, local_size + + +def _tensor_to_object(tensor, tensor_size, group): + if get_debug_level() == DebugLevel.DETAIL and is_nccl_available(): + backend = get_backend(group) + if backend == Backend.NCCL: + hash = torch._C._distributed_c10d._hash_tensors([tensor]) + logger.warning(f"_tensor_to_object size: {tensor.numel()} hash value: {hash}") # noqa: G004 + tensor = tensor.cpu() + buf = tensor.numpy().tobytes()[:tensor_size] + return _unpickler(io.BytesIO(buf)).load() + + +@_exception_logger +def all_gather_object(object_list, obj, group=None): + """ + Gathers picklable objects from the whole group into a list. + + Similar to :func:`all_gather`, but Python objects can be passed in. + Note that the object must be picklable in order to be gathered. + + Args: + object_list (list[Any]): Output list. It should be correctly sized as the + size of the group for this collective and will contain the output. + obj (Any): Pickable Python object to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. Default is ``None``. + + Returns: + None. If the calling rank is part of this group, the output of the + collective will be populated into the input ``object_list``. If the + calling rank is not part of the group, the passed in ``object_list`` will + be unmodified. + + .. note:: Note that this API differs slightly from the :func:`all_gather` + collective since it does not provide an ``async_op`` handle and thus + will be a blocking call. + + .. note:: For NCCL-based processed groups, internal tensor representations + of objects must be moved to the GPU device before communication takes + place. In this case, the device used is given by + ``torch.cuda.current_device()`` and it is the user's responsiblity to + ensure that this is set so that each rank has an individual GPU, via + ``torch.cuda.set_device()``. + + .. warning:: + :func:`all_gather_object` uses ``pickle`` module implicitly, which is + known to be insecure. It is possible to construct malicious pickle data + which will execute arbitrary code during unpickling. Only call this + function with data you trust. + + .. warning:: + Calling :func:`all_gather_object` with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be + pickled. Please consider using :func:`all_gather` instead. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> dist.all_gather_object(output, gather_objects[dist.get_rank()]) + >>> output + ['foo', 12, {1: 2}] + """ + if _rank_not_in_group(group): + _warn_not_in_group("all_gather_object") + return + + current_device = _get_pg_default_device(group) + input_tensor, local_size = _object_to_tensor(obj, current_device, group) + + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = get_world_size(group=group) + object_sizes_tensor = torch.zeros( + group_size, dtype=torch.long, device=current_device + ) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes + all_gather(object_size_list, local_size, group=group) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor.resize_(max_object_size) + coalesced_output_tensor = torch.empty( + max_object_size * group_size, dtype=torch.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + all_gather(output_tensors, input_tensor, group=group) + # Deserialize outputs back to object. + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(torch.uint8) + tensor_size = object_size_list[i] + object_list[i] = _tensor_to_object(tensor, tensor_size, group) + + +@_exception_logger +def gather_object(obj, object_gather_list=None, dst=0, group=None): + """ + Gathers picklable objects from the whole group in a single process. + + Similar to :func:`gather`, but Python objects can be passed in. Note that the + object must be picklable in order to be gathered. + + Args: + obj (Any): Input object. Must be picklable. + object_gather_list (list[Any]): Output list. On the ``dst`` rank, it + should be correctly sized as the size of the group for this + collective and will contain the output. Must be ``None`` on non-dst + ranks. (default is ``None``) + dst (int, optional): Destination rank on global process group (regardless of ``group`` argument). (default is 0) + group: (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. Default is ``None``. + + Returns: + None. On the ``dst`` rank, ``object_gather_list`` will contain the + output of the collective. + + .. note:: Note that this API differs slightly from the gather collective + since it does not provide an async_op handle and thus will be a blocking + call. + + .. note:: For NCCL-based processed groups, internal tensor representations + of objects must be moved to the GPU device before communication takes + place. In this case, the device used is given by + ``torch.cuda.current_device()`` and it is the user's responsiblity to + ensure that this is set so that each rank has an individual GPU, via + ``torch.cuda.set_device()``. + + .. warning:: + :func:`gather_object` uses ``pickle`` module implicitly, which is + known to be insecure. It is possible to construct malicious pickle data + which will execute arbitrary code during unpickling. Only call this + function with data you trust. + + .. warning:: + Calling :func:`gather_object` with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be + pickled. Please consider using :func:`gather` instead. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> # Assumes world_size of 3. + >>> gather_objects = ["foo", 12, {1: 2}] # any picklable object + >>> output = [None for _ in gather_objects] + >>> dist.gather_object( + ... gather_objects[dist.get_rank()], + ... output if dist.get_rank() == 0 else None, + ... dst=0 + ... ) + >>> # On rank 0 + >>> output + ['foo', 12, {1: 2}] + """ + if _rank_not_in_group(group): + _warn_not_in_group("gather_object") + return + + # Ensure object_gather_list is specified appropriately. + my_rank = get_rank() + _validate_output_list_for_rank(my_rank, dst, object_gather_list) + current_device = _get_pg_default_device(group) + input_tensor, local_size = _object_to_tensor(obj, current_device, group) + + # Gather all local sizes. This is so that we can find the max size, and index + # until the correct size when deserializing the tensors. + group_size = get_world_size(group=group) + object_sizes_tensor = torch.zeros( + group_size, dtype=torch.long, device=current_device + ) + object_size_list = [ + object_sizes_tensor[i].unsqueeze(dim=0) for i in range(group_size) + ] + # Allgather tensor sizes. An all-gather is needed here despite this being a + # gather, since each rank needs to broadcast a tensor of the same (maximal) + # size. + all_gather(object_size_list, local_size, group=group) + max_object_size = int(max(object_size_list).item()) # type: ignore[type-var] + # Resize tensor to max size across all ranks. + input_tensor.resize_(max_object_size) + # Avoid populating output tensors if the result won't be gathered on this rank. + if my_rank == dst: + coalesced_output_tensor = torch.empty( + max_object_size * group_size, dtype=torch.uint8, device=current_device + ) + # Output tensors are nonoverlapping views of coalesced_output_tensor + output_tensors = [ + coalesced_output_tensor[max_object_size * i : max_object_size * (i + 1)] + for i in range(group_size) + ] + # All ranks call gather with equal-sized tensors. + gather( + input_tensor, + gather_list=output_tensors if my_rank == dst else None, # type: ignore[possibly-undefined] + dst=dst, + group=group, + ) + if my_rank != dst: + return + for i, tensor in enumerate(output_tensors): + tensor = tensor.type(torch.uint8) + tensor_size = object_size_list[i] + object_gather_list[i] = _tensor_to_object(tensor, tensor_size, group) + + +@_exception_logger +def broadcast_object_list(object_list, src=0, group=None, device=None): + """ + Broadcasts picklable objects in ``object_list`` to the whole group. + + Similar to :func:`broadcast`, but Python objects can be passed in. + Note that all objects in ``object_list`` must be picklable in order to be + broadcasted. + + Args: + object_list (List[Any]): List of input objects to broadcast. + Each object must be picklable. Only objects on the ``src`` rank will + be broadcast, but each rank must provide lists of equal sizes. + src (int): Source rank from which to broadcast ``object_list``. + Source rank is based on global process group (regardless of ``group`` argument) + group: (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. Default is ``None``. + device (``torch.device``, optional): If not None, the objects are + serialized and converted to tensors which are moved to the + ``device`` before broadcasting. Default is ``None``. + + Returns: + ``None``. If rank is part of the group, ``object_list`` will contain the + broadcasted objects from ``src`` rank. + + .. note:: For NCCL-based process groups, internal tensor representations + of objects must be moved to the GPU device before communication takes + place. In this case, the device used is given by + ``torch.cuda.current_device()`` and it is the user's responsibility to + ensure that this is set so that each rank has an individual GPU, via + ``torch.cuda.set_device()``. + + .. note:: Note that this API differs slightly from the :func:`all_gather` + collective since it does not provide an ``async_op`` handle and thus + will be a blocking call. + + .. warning:: + :func:`broadcast_object_list` uses ``pickle`` module implicitly, which + is known to be insecure. It is possible to construct malicious pickle + data which will execute arbitrary code during unpickling. Only call this + function with data you trust. + + .. warning:: + Calling :func:`broadcast_object_list` with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be + pickled. Please consider using :func:`broadcast` instead. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> if dist.get_rank() == 0: + >>> # Assumes world_size of 3. + >>> objects = ["foo", 12, {1: 2}] # any picklable object + >>> else: + >>> objects = [None, None, None] + >>> # Assumes backend is not NCCL + >>> device = torch.device("cpu") + >>> dist.broadcast_object_list(objects, src=0, device=device) + >>> objects + ['foo', 12, {1: 2}] + """ + if _rank_not_in_group(group): + _warn_not_in_group("broadcast_object_list") + return + + # Current device selection. + # To preserve backwards compatibility, ``device`` is default to ``None`` + # in which case we run current logic of device selection, i.e. + # ``current_device`` is CUDA if backend is NCCL otherwise CPU device. In the + # case it is not ``None`` we move the size and object tensors to be + # broadcasted to this device. + current_device = device or _get_pg_default_device(group) + my_rank = get_rank() + # Serialize object_list elements to tensors on src rank. + if my_rank == src: + tensor_list, size_list = zip(*[_object_to_tensor(obj, current_device, group) for obj in object_list]) + object_sizes_tensor = torch.cat(size_list) + else: + object_sizes_tensor = torch.empty(len(object_list), dtype=torch.long, device=current_device) + + # Broadcast object sizes + broadcast(object_sizes_tensor, src=src, group=group) + + # Concatenate and broadcast serialized object tensors + # Note: torch.cat will do an extra memory copy to the current device, if the tensor_list + # has only one element, we can skip the copy. + if my_rank == src: + if len(tensor_list) == 1: # type: ignore[possibly-undefined] + object_tensor = tensor_list[0] + else: + object_tensor = torch.cat(tensor_list) + else: + object_tensor = torch.empty( # type: ignore[call-overload] + torch.sum(object_sizes_tensor).item(), # type: ignore[arg-type] + dtype=torch.uint8, + device=current_device + ) + + broadcast(object_tensor, src=src, group=group) + # Deserialize objects using their stored sizes. + offset = 0 + if my_rank != src: + for i, obj_size in enumerate(object_sizes_tensor): + obj_view = object_tensor[offset : offset + obj_size] + obj_view = obj_view.type(torch.uint8) + offset += obj_size + object_list[i] = _tensor_to_object(obj_view, obj_size, group) + + +@_exception_logger +def scatter_object_list( + scatter_object_output_list, scatter_object_input_list, src=0, group=None +): + """ + Scatters picklable objects in ``scatter_object_input_list`` to the whole group. + + Similar to :func:`scatter`, but Python objects can be passed in. On + each rank, the scattered object will be stored as the first element of + ``scatter_object_output_list``. Note that all objects in + ``scatter_object_input_list`` must be picklable in order to be scattered. + + Args: + scatter_object_output_list (List[Any]): Non-empty list whose first + element will store the object scattered to this rank. + scatter_object_input_list (List[Any]): List of input objects to scatter. + Each object must be picklable. Only objects on the ``src`` rank will + be scattered, and the argument can be ``None`` for non-src ranks. + src (int): Source rank from which to scatter ``scatter_object_input_list``. + Source rank is based on global process group (regardless of ``group`` argument). + group: (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. Default is ``None``. + + Returns: + ``None``. If rank is part of the group, ``scatter_object_output_list`` + will have its first element set to the scattered object for this rank. + + .. note:: Note that this API differs slightly from the scatter collective + since it does not provide an ``async_op`` handle and thus will be a + blocking call. + + .. warning:: + :func:`scatter_object_list` uses ``pickle`` module implicitly, which + is known to be insecure. It is possible to construct malicious pickle + data which will execute arbitrary code during unpickling. Only call this + function with data you trust. + + .. warning:: + Calling :func:`scatter_object_list` with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be + pickled. Please consider using :func:`scatter` instead. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> if dist.get_rank() == 0: + >>> # Assumes world_size of 3. + >>> objects = ["foo", 12, {1: 2}] # any picklable object + >>> else: + >>> # Can be any list on non-src ranks, elements are not used. + >>> objects = [None, None, None] + >>> output_list = [None] + >>> dist.scatter_object_list(output_list, objects, src=0) + >>> # Rank i gets objects[i]. For example, on rank 2: + >>> output_list + [{1: 2}] + """ + if _rank_not_in_group(group): + _warn_not_in_group("scatter_object_list") + return + + if ( + not isinstance(scatter_object_output_list, list) + or len(scatter_object_output_list) < 1 + ): + raise ValueError( + "Expected argument scatter_object_output_list to be a list of size at least 1." + ) + + my_rank = get_rank() + pg_device = _get_pg_default_device(group) + if my_rank == src: + tensor_list, tensor_sizes = zip( + *[_object_to_tensor(obj, pg_device, group) for obj in scatter_object_input_list] + ) + tensor_list, tensor_sizes = list(tensor_list), list(tensor_sizes) + + # Src rank broadcasts the maximum tensor size. This is because all ranks are + # expected to call into scatter() with equal-sized tensors. + if my_rank == src: + max_tensor_size = max(tensor_sizes) # type: ignore[possibly-undefined] + for tensor in tensor_list: # type: ignore[possibly-undefined] + tensor.resize_(max_tensor_size) + else: + max_tensor_size = torch.tensor([0], dtype=torch.long, device=pg_device) + broadcast(max_tensor_size, src=src, group=group) + + # Scatter actual serialized objects + output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8, device=pg_device) + scatter( + output_tensor, + scatter_list=None if my_rank != src else tensor_list, # type: ignore[possibly-undefined] + src=src, + group=group, + ) + + # Scatter per-object sizes to trim tensors when deserializing back to object + obj_tensor_size = torch.tensor([0], dtype=torch.long, device=pg_device) + scatter( + obj_tensor_size, + scatter_list=None if my_rank != src else tensor_sizes, # type: ignore[possibly-undefined] + src=src, + group=group, + ) + + # Deserialize back to object + scatter_object_output_list[0] = _tensor_to_object(output_tensor, obj_tensor_size, group) + + +@_exception_logger +def all_gather(tensor_list, tensor, group=None, async_op=False): + """ + Gathers tensors from the whole group in a list. + + Complex tensors are supported. + + Args: + tensor_list (list[Tensor]): Output list. It should contain + correctly-sized tensors to be used for output of the collective. + tensor (Tensor): Tensor to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + Examples: + >>> # xdoctest: +SKIP("need process group init") + >>> # All tensors below are of torch.int64 dtype. + >>> # We have 2 process groups, 2 ranks. + >>> device = torch.device(f'cuda:{rank}') + >>> tensor_list = [torch.zeros(2, dtype=torch.int64, device=device) for _ in range(2)] + >>> tensor_list + [tensor([0, 0], device='cuda:0'), tensor([0, 0], device='cuda:0')] # Rank 0 + [tensor([0, 0], device='cuda:0'), tensor([0, 0], device='cuda:1')] # Rank 1 + >>> tensor = torch.arange(2, dtype=torch.int64, device=device) + 1 + 2 * rank + >>> tensor + tensor([1, 2], device='cuda:0') # Rank 0 + tensor([3, 4], device='cuda:1') # Rank 1 + >>> dist.all_gather(tensor_list, tensor) + >>> tensor_list + [tensor([1, 2], device='cuda:0'), tensor([3, 4], device='cuda:0')] # Rank 0 + [tensor([1, 2], device='cuda:1'), tensor([3, 4], device='cuda:1')] # Rank 1 + + >>> # All tensors below are of torch.cfloat dtype. + >>> # We have 2 process groups, 2 ranks. + >>> tensor_list = [torch.zeros(2, dtype=torch.cfloat, device=device) for _ in range(2)] + >>> tensor_list + [tensor([0.+0.j, 0.+0.j], device='cuda:0'), tensor([0.+0.j, 0.+0.j], device='cuda:0')] # Rank 0 + [tensor([0.+0.j, 0.+0.j], device='cuda:1'), tensor([0.+0.j, 0.+0.j], device='cuda:1')] # Rank 1 + >>> tensor = torch.tensor([1+1j, 2+2j], dtype=torch.cfloat, device=device) + 2 * rank * (1+1j) + >>> tensor + tensor([1.+1.j, 2.+2.j], device='cuda:0') # Rank 0 + tensor([3.+3.j, 4.+4.j], device='cuda:1') # Rank 1 + >>> dist.all_gather(tensor_list, tensor) + >>> tensor_list + [tensor([1.+1.j, 2.+2.j], device='cuda:0'), tensor([3.+3.j, 4.+4.j], device='cuda:0')] # Rank 0 + [tensor([1.+1.j, 2.+2.j], device='cuda:1'), tensor([3.+3.j, 4.+4.j], device='cuda:1')] # Rank 1 + + """ + _check_tensor_list(tensor_list, "tensor_list") + _check_single_tensor(tensor, "tensor") + _ensure_all_tensors_same_dtype(tensor_list, tensor) + if _rank_not_in_group(group): + _warn_not_in_group("all_gather") + return + + tensor_list = [ + t if not t.is_complex() else torch.view_as_real(t) for t in tensor_list + ] + tensor = tensor if not tensor.is_complex() else torch.view_as_real(tensor) + + if group is None: + default_pg = _get_default_group() + work = default_pg.allgather([tensor_list], [tensor]) + else: + work = group.allgather([tensor_list], [tensor]) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=False): + """ + Gather tensors from all ranks and put them in a single output tensor. + + Args: + output_tensor (Tensor): Output tensor to accommodate tensor elements + from all ranks. It must be correctly sized to have one of the + following forms: + (i) a concatenation of all the input tensors along the primary + dimension; for definition of "concatenation", see ``torch.cat()``; + (ii) a stack of all the input tensors along the primary dimension; + for definition of "stack", see ``torch.stack()``. + Examples below may better explain the supported output forms. + input_tensor (Tensor): Tensor to be gathered from current rank. + Different from the ``all_gather`` API, the input tensors in this + API must have the same size across all ranks. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + Examples: + >>> # xdoctest: +SKIP("need process group init") + >>> # All tensors below are of torch.int64 dtype and on CUDA devices. + >>> # We have two ranks. + >>> device = torch.device(f'cuda:{rank}') + >>> tensor_in = torch.arange(2, dtype=torch.int64, device=device) + 1 + 2 * rank + >>> tensor_in + tensor([1, 2], device='cuda:0') # Rank 0 + tensor([3, 4], device='cuda:1') # Rank 1 + >>> # Output in concatenation form + >>> tensor_out = torch.zeros(world_size * 2, dtype=torch.int64, device=device) + >>> dist.all_gather_into_tensor(tensor_out, tensor_in) + >>> tensor_out + tensor([1, 2, 3, 4], device='cuda:0') # Rank 0 + tensor([1, 2, 3, 4], device='cuda:1') # Rank 1 + >>> # Output in stack form + >>> tensor_out2 = torch.zeros(world_size, 2, dtype=torch.int64, device=device) + >>> dist.all_gather_into_tensor(tensor_out2, tensor_in) + >>> tensor_out2 + tensor([[1, 2], + [3, 4]], device='cuda:0') # Rank 0 + tensor([[1, 2], + [3, 4]], device='cuda:1') # Rank 1 + + .. warning:: + The Gloo backend does not support this API. + + """ + _check_single_tensor(input_tensor, "input_tensor") + _check_single_tensor(output_tensor, "output_tensor") + if _rank_not_in_group(group): + _warn_not_in_group("all_gather_into_tensor") + return + + output_tensor = ( + output_tensor + if not output_tensor.is_complex() + else torch.view_as_real(output_tensor) + ) + input_tensor = ( + input_tensor + if not input_tensor.is_complex() + else torch.view_as_real(input_tensor) + ) + + opts = AllgatherOptions() + opts.asyncOp = async_op + + group = group or _get_default_group() + + if group in _world.pg_coalesce_state.keys(): + # We are in coalescing context, do not issue single operation, just append a collective representation + coll = _CollOp(all_gather_into_tensor, input_tensor, output_tensor) + _world.pg_coalesce_state[group].append(coll) + if async_op: + return _IllegalWork() + else: + return None + + work = group._allgather_base(output_tensor, input_tensor, opts) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def _all_gather_base(output_tensor, input_tensor, group=None, async_op=False): + """ + Single tensor all gather. Gathers a single tensor from all ranks, and puts them in a single output tensor. + + Args: + output_tensor (Tensor): Output tensor. It should contain + correctly-sized tensors to be used for output of the collective. + input_tensor (Tensor): Tensor to be broadcast from current process. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + .. warning:: + `_all_gather_base` is a private function. Users should use + `all_gather_into_tensor` instead. + + """ + warnings.warn( + "torch.distributed._all_gather_base is a private function and will be " + "deprecated. Please use torch.distributed.all_gather_into_tensor " + "instead." + ) + return all_gather_into_tensor(output_tensor, input_tensor, group, async_op) + + +@_exception_logger +def all_gather_coalesced( + output_tensor_lists, input_tensor_list, group=None, async_op=False +): + """ + Gathers input tensors from the whole group in a list in a coalesced manner. + + Complex tensors are supported. + + Args: + output_tensor_lists (list[list[Tensor]]): Output list. It should contain + correctly-sized tensors to be used for output of the collective. + input_tensor_list (list[Tensor]): Tensors to be broadcast from + current process. At least one tensor has to be non empty. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + Example: + we have 2 process groups, 2 ranks. + rank 0 passes: + input_tensor_list = [[[1, 1], [1, 1]], [2], [3, 3]] + output_tensor_lists = + [[[[-1, -1], [-1, -1]], [-1], [-1, -1]], + [[[-1, -1], [-1, -1]], [-1], [-1, -1]]] + rank 1 passes: + input_tensor_list = [[[3, 3], [3, 3]], [5], [1, 1]] + output_tensor_lists = + [[[[-1, -1], [-1, -1]], [-1], [-1, -1]], + [[[-1, -1], [-1, -1]], [-1], [-1, -1]]] + both rank 0 and 1 get: + output_tensor_lists = + [[[1, 1], [1, 1]], [2], [3, 3]], + [[3, 3], [3, 3]], [5], [1, 1]]]. + + WARNING: at this time individual shape checking is not implemented across nodes. + For example, if the rank 0 node passes [torch.rand(4), torch.rand(2)] and the + rank 1 node passes [torch.rand(2), torch.rand(2), torch.rand(2)], the + all_gather_coalesced operation will proceed without complaint and return + erroneous outputs. This lack of shape checking results in significant + performance improvements but users of this function should take extra care + to ensure that each node passes in tensors whose shapes match across nodes. + """ + warnings.warn( + "torch.distributed.all_gather_coalesced will be deprecated. If you must " + "use it, please revisit our documentation later at " + "https://pytorch.org/docs/master/distributed.html#collective-functions" + ) + # We only check basic compatibility with C++ params here, C++ code will + # do shape and type checking. + if _rank_not_in_group(group): + _warn_not_in_group("all_gather_coalesced") + return + _check_tensor_list(input_tensor_list, "input_tensor_list") + _ensure_all_tensors_same_dtype(input_tensor_list) + if not isinstance(output_tensor_lists, list): + raise TypeError( + "Invalid function argument: output_tensor_lists should be a list" + ) + for output_tensor_list in output_tensor_lists: + _check_tensor_list(output_tensor_list, "output_tensor_lists") + _ensure_all_tensors_same_dtype(output_tensor_list) + + output_tensor_lists = [ + [t if not t.is_complex() else torch.view_as_real(t) for t in l] + for l in output_tensor_lists + ] + input_tensor_list = [ + t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list + ] + + if group is None: + default_pg = _get_default_group() + work = default_pg.allgather_coalesced(output_tensor_lists, input_tensor_list) + else: + work = group.allgather_coalesced(output_tensor_lists, input_tensor_list) + + if async_op: + return work.get_future() + else: + work.wait() + + +def _validate_output_list_for_rank(my_rank, dst, gather_list): + if dst == my_rank: + if not gather_list: + raise ValueError( + "Argument ``gather_list`` must be specified on destination rank." + ) + elif gather_list: + raise ValueError( + "Argument ``gather_list`` must NOT be specified " + "on non-destination ranks." + ) + + +@_exception_logger +def gather(tensor, gather_list=None, dst=0, group=None, async_op=False): + """ + Gathers a list of tensors in a single process. + + Args: + tensor (Tensor): Input tensor. + gather_list (list[Tensor], optional): List of appropriately-sized + tensors to use for gathered data (default is None, must be specified + on the destination rank) + dst (int, optional): Destination rank on global process group (regardless of ``group`` argument). (default is 0) + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + """ + _check_single_tensor(tensor, "tensor") + + # Parameter ``gather_list`` may be left unspecified on non-dst ranks. + if gather_list: + _check_tensor_list(gather_list, "gather_list") + else: + gather_list = [] + _ensure_all_tensors_same_dtype(tensor, gather_list) + + if _rank_not_in_group(group): + _warn_not_in_group("gather") + return + + my_rank = get_rank() + _validate_output_list_for_rank(my_rank, dst, gather_list) + output_tensors = [gather_list] if dst == my_rank else [] + input_tensors = [tensor] + + opts = GatherOptions() + opts.rootRank = dst + + if group is None or group is GroupMember.WORLD: + default_pg = _get_default_group() + work = default_pg.gather(output_tensors, input_tensors, opts) + else: + group_dst_rank = get_group_rank(group, dst) + opts.rootRank = group_dst_rank + work = group.gather(output_tensors, input_tensors, opts) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def scatter(tensor, scatter_list=None, src=0, group=None, async_op=False): + """ + Scatters a list of tensors to all processes in a group. + + Each process will receive exactly one tensor and store its data in the + ``tensor`` argument. + + Complex tensors are supported. + + Args: + tensor (Tensor): Output tensor. + scatter_list (list[Tensor]): List of tensors to scatter (default is + None, must be specified on the source rank) + src (int): Source rank on global process group (regardless of ``group`` argument). + Default is 0 + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + .. note:: Note that all Tensors in scatter_list must have the same size. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> tensor_size = 2 + >>> t_ones = torch.ones(tensor_size) + >>> t_fives = torch.ones(tensor_size) * 5 + >>> output_tensor = torch.zeros(tensor_size) + >>> if dist.get_rank() == 0: + >>> # Assumes world_size of 2. + >>> # Only tensors, all of which must be the same size. + >>> scatter_list = [t_ones, t_fives] + >>> else: + >>> scatter_list = None + >>> dist.scatter(output_tensor, scatter_list, src=0) + >>> # Rank i gets scatter_list[i]. For example, on rank 1: + >>> output_tensor + tensor([5., 5.]) + + """ + _check_single_tensor(tensor, "tensor") + + # Parameter ``scatter_list`` may be left unspecified on non-src ranks. + if scatter_list: + _check_tensor_list(scatter_list, "scatter_list") + else: + scatter_list = [] + _ensure_all_tensors_same_dtype(tensor, scatter_list) + + if _rank_not_in_group(group): + _warn_not_in_group("scatter") + return + scatter_list = [ + t if not t.is_complex() else torch.view_as_real(t) for t in scatter_list + ] + tensor = tensor if not tensor.is_complex() else torch.view_as_real(tensor) + + my_rank = get_rank() + if src == my_rank: + if not scatter_list: + raise ValueError( + "Argument ``scatter_list`` must be specified on source rank." + ) + input_tensors = [scatter_list] + output_tensors = [tensor] + else: + if scatter_list: + raise ValueError( + "Argument ``scatter_list`` must NOT be specified " + "on non-source ranks." + ) + input_tensors = [] + output_tensors = [tensor] + + opts = ScatterOptions() + opts.rootRank = src + opts.asyncOp = async_op + + if group is None or group is GroupMember.WORLD: + default_pg = _get_default_group() + work = default_pg.scatter(output_tensors, input_tensors, opts) + else: + group_src_rank = get_group_rank(group, src) + opts.rootRank = group_src_rank + work = group.scatter(output_tensors, input_tensors, opts) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=False): + """ + Reduces, then scatters a list of tensors to all processes in a group. + + Args: + output (Tensor): Output tensor. + input_list (list[Tensor]): List of tensors to reduce and scatter. + op (optional): One of the values from + ``torch.distributed.ReduceOp`` + enum. Specifies an operation used for element-wise reductions. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + """ + _check_single_tensor(output, "output") + _check_tensor_list(input_list, "input_list") + _ensure_all_tensors_same_dtype(output, input_list) + if _rank_not_in_group(group): + _warn_not_in_group("reduce_scatter") + return + + opts = ReduceScatterOptions() + opts.reduceOp = op + + if group is None: + default_pg = _get_default_group() + work = default_pg.reduce_scatter([output], [input_list], opts) + else: + work = group.reduce_scatter([output], [input_list], opts) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def reduce_scatter_tensor(output, input, op=ReduceOp.SUM, group=None, async_op=False): + """ + Reduces, then scatters a tensor to all ranks in a group. + + Args: + output (Tensor): Output tensor. It should have the same size across all + ranks. + input (Tensor): Input tensor to be reduced and scattered. Its size + should be output tensor size times the world size. The input tensor + can have one of the following shapes: + (i) a concatenation of the output tensors along the primary + dimension, or + (ii) a stack of the output tensors along the primary dimension. + For definition of "concatenation", see ``torch.cat()``. + For definition of "stack", see ``torch.stack()``. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + Examples: + >>> # xdoctest: +SKIP("need process group init") + >>> # All tensors below are of torch.int64 dtype and on CUDA devices. + >>> # We have two ranks. + >>> device = torch.device(f'cuda:{rank}') + >>> tensor_out = torch.zeros(2, dtype=torch.int64, device=device) + >>> # Input in concatenation form + >>> tensor_in = torch.arange(world_size * 2, dtype=torch.int64, device=device) + >>> tensor_in + tensor([0, 1, 2, 3], device='cuda:0') # Rank 0 + tensor([0, 1, 2, 3], device='cuda:1') # Rank 1 + >>> dist.reduce_scatter_tensor(tensor_out, tensor_in) + >>> tensor_out + tensor([0, 2], device='cuda:0') # Rank 0 + tensor([4, 6], device='cuda:1') # Rank 1 + >>> # Input in stack form + >>> tensor_in = torch.reshape(tensor_in, (world_size, 2)) + >>> tensor_in + tensor([[0, 1], + [2, 3]], device='cuda:0') # Rank 0 + tensor([[0, 1], + [2, 3]], device='cuda:1') # Rank 1 + >>> dist.reduce_scatter_tensor(tensor_out, tensor_in) + >>> tensor_out + tensor([0, 2], device='cuda:0') # Rank 0 + tensor([4, 6], device='cuda:1') # Rank 1 + + .. warning:: + The Gloo backend does not support this API. + + """ + _check_single_tensor(output, "output") + _check_single_tensor(input, "input") + + if _rank_not_in_group(group): + _warn_not_in_group("reduce_scatter_tensor") + return + + opts = ReduceScatterOptions() + opts.reduceOp = op + opts.asyncOp = async_op + + group = group or _get_default_group() + + # Check if we are in coalescing context + # If we are, do not issue single operation, just append a collective representation + if group in _world.pg_coalesce_state.keys(): + coll = _CollOp(reduce_scatter_tensor, input, output, op, None) + _world.pg_coalesce_state[group].append(coll) + if async_op: + return _IllegalWork() + else: + return None + + work = group._reduce_scatter_base(output, input, opts) + + if async_op: + return work + else: + work.wait() + + +def _reduce_scatter_base(output, input, op=ReduceOp.SUM, group=None, async_op=False): + """ + Reduces, then scatters a flattened tensor to all processes in a group. + + Args: + output (Tensor): Output tensor. + input (Tensor): Input tensor that is of size output tensor size times world size + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + .. warning:: + `_reduce_scatter_base` is a private function. Users should use + `reduce_scatter_tensor` instead. + + """ + warnings.warn( + "torch.distributed._reduce_scatter_base is a private function and will " + "be deprecated. Please use torch.distributed.reduce_scatter_tensor " + "instead." + ) + return reduce_scatter_tensor(output, input, op, group, async_op) + + +@_exception_logger +def all_to_all_single( + output, + input, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False, +): + """ + Split input tensor and then scatter the split list to all processes in a group. + + Later the received tensors are concatenated from all the processes in the group + and returned as a single output tensor. + + Complex tensors are supported. + + Args: + output (Tensor): Gathered concatenated output tensor. + input (Tensor): Input tensor to scatter. + output_split_sizes: (list[Int], optional): Output split sizes for dim 0 + if specified None or empty, dim 0 of ``output`` tensor must divide + equally by ``world_size``. + input_split_sizes: (list[Int], optional): Input split sizes for dim 0 + if specified None or empty, dim 0 of ``input`` tensor must divide + equally by ``world_size``. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + .. warning:: + `all_to_all_single` is experimental and subject to change. + + Examples: + >>> # xdoctest: +SKIP("Undefined rank") + >>> input = torch.arange(4) + rank * 4 + >>> input + tensor([0, 1, 2, 3]) # Rank 0 + tensor([4, 5, 6, 7]) # Rank 1 + tensor([8, 9, 10, 11]) # Rank 2 + tensor([12, 13, 14, 15]) # Rank 3 + >>> output = torch.empty([4], dtype=torch.int64) + >>> dist.all_to_all_single(output, input) + >>> output + tensor([0, 4, 8, 12]) # Rank 0 + tensor([1, 5, 9, 13]) # Rank 1 + tensor([2, 6, 10, 14]) # Rank 2 + tensor([3, 7, 11, 15]) # Rank 3 + + >>> # Essentially, it is similar to following operation: + >>> scatter_list = list(input.chunk(world_size)) + >>> gather_list = list(output.chunk(world_size)) + >>> for i in range(world_size): + >>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src = i) + + >>> # Another example with uneven split + >>> input + tensor([0, 1, 2, 3, 4, 5]) # Rank 0 + tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1 + tensor([20, 21, 22, 23, 24]) # Rank 2 + tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3 + >>> input_splits + [2, 2, 1, 1] # Rank 0 + [3, 2, 2, 2] # Rank 1 + [2, 1, 1, 1] # Rank 2 + [2, 2, 2, 1] # Rank 3 + >>> output_splits + [2, 3, 2, 2] # Rank 0 + [2, 2, 1, 2] # Rank 1 + [1, 2, 1, 2] # Rank 2 + [1, 2, 1, 1] # Rank 3 + >>> output = ... + >>> dist.all_to_all_single(output, input, output_splits, input_splits) + >>> output + tensor([ 0, 1, 10, 11, 12, 20, 21, 30, 31]) # Rank 0 + tensor([ 2, 3, 13, 14, 22, 32, 33]) # Rank 1 + tensor([ 4, 15, 16, 23, 34, 35]) # Rank 2 + tensor([ 5, 17, 18, 24, 36]) # Rank 3 + + + >>> # Another example with tensors of torch.cfloat type. + >>> input = torch.tensor([1+1j, 2+2j, 3+3j, 4+4j], dtype=torch.cfloat) + 4 * rank * (1+1j) + >>> input + tensor([1+1j, 2+2j, 3+3j, 4+4j]) # Rank 0 + tensor([5+5j, 6+6j, 7+7j, 8+8j]) # Rank 1 + tensor([9+9j, 10+10j, 11+11j, 12+12j]) # Rank 2 + tensor([13+13j, 14+14j, 15+15j, 16+16j]) # Rank 3 + >>> output = torch.empty([4], dtype=torch.int64) + >>> dist.all_to_all_single(output, input) + >>> output + tensor([1+1j, 5+5j, 9+9j, 13+13j]) # Rank 0 + tensor([2+2j, 6+6j, 10+10j, 14+14j]) # Rank 1 + tensor([3+3j, 7+7j, 11+11j, 15+15j]) # Rank 2 + tensor([4+4j, 8+8j, 12+12j, 16+16j]) # Rank 3 + """ + if _rank_not_in_group(group): + _warn_not_in_group("all_to_all_single") + return + + opts = AllToAllOptions() + _check_single_tensor(output, "output") + _check_single_tensor(input, "input") + _ensure_all_tensors_same_dtype(output, input) + + if input.is_complex(): + input = torch.view_as_real(input) + if output.is_complex(): + output = torch.view_as_real(output) + + output_split_sizes = [] if output_split_sizes is None else output_split_sizes + input_split_sizes = [] if input_split_sizes is None else input_split_sizes + + if group is None: + default_pg = _get_default_group() + work = default_pg.alltoall_base( + output, input, output_split_sizes, input_split_sizes, opts + ) + else: + work = group.alltoall_base( + output, input, output_split_sizes, input_split_sizes, opts + ) + + if async_op: + return work + else: + work.wait() + + +@_exception_logger +def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False): + """ + Scatters list of input tensors to all processes in a group and return gathered list of tensors in output list. + + Complex tensors are supported. + + Args: + output_tensor_list (list[Tensor]): List of tensors to be gathered one + per rank. + input_tensor_list (list[Tensor]): List of tensors to scatter one per rank. + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group. + + .. warning:: + `all_to_all` is experimental and subject to change. + + Examples: + >>> # xdoctest: +SKIP("Undefined rank") + >>> input = torch.arange(4) + rank * 4 + >>> input = list(input.chunk(4)) + >>> input + [tensor([0]), tensor([1]), tensor([2]), tensor([3])] # Rank 0 + [tensor([4]), tensor([5]), tensor([6]), tensor([7])] # Rank 1 + [tensor([8]), tensor([9]), tensor([10]), tensor([11])] # Rank 2 + [tensor([12]), tensor([13]), tensor([14]), tensor([15])] # Rank 3 + >>> output = list(torch.empty([4], dtype=torch.int64).chunk(4)) + >>> dist.all_to_all(output, input) + >>> output + [tensor([0]), tensor([4]), tensor([8]), tensor([12])] # Rank 0 + [tensor([1]), tensor([5]), tensor([9]), tensor([13])] # Rank 1 + [tensor([2]), tensor([6]), tensor([10]), tensor([14])] # Rank 2 + [tensor([3]), tensor([7]), tensor([11]), tensor([15])] # Rank 3 + + >>> # Essentially, it is similar to following operation: + >>> scatter_list = input + >>> gather_list = output + >>> for i in range(world_size): + >>> dist.scatter(gather_list[i], scatter_list if i == rank else [], src=i) + + >>> input + tensor([0, 1, 2, 3, 4, 5]) # Rank 0 + tensor([10, 11, 12, 13, 14, 15, 16, 17, 18]) # Rank 1 + tensor([20, 21, 22, 23, 24]) # Rank 2 + tensor([30, 31, 32, 33, 34, 35, 36]) # Rank 3 + >>> input_splits + [2, 2, 1, 1] # Rank 0 + [3, 2, 2, 2] # Rank 1 + [2, 1, 1, 1] # Rank 2 + [2, 2, 2, 1] # Rank 3 + >>> output_splits + [2, 3, 2, 2] # Rank 0 + [2, 2, 1, 2] # Rank 1 + [1, 2, 1, 2] # Rank 2 + [1, 2, 1, 1] # Rank 3 + >>> input = list(input.split(input_splits)) + >>> input + [tensor([0, 1]), tensor([2, 3]), tensor([4]), tensor([5])] # Rank 0 + [tensor([10, 11, 12]), tensor([13, 14]), tensor([15, 16]), tensor([17, 18])] # Rank 1 + [tensor([20, 21]), tensor([22]), tensor([23]), tensor([24])] # Rank 2 + [tensor([30, 31]), tensor([32, 33]), tensor([34, 35]), tensor([36])] # Rank 3 + >>> output = ... + >>> dist.all_to_all(output, input) + >>> output + [tensor([0, 1]), tensor([10, 11, 12]), tensor([20, 21]), tensor([30, 31])] # Rank 0 + [tensor([2, 3]), tensor([13, 14]), tensor([22]), tensor([32, 33])] # Rank 1 + [tensor([4]), tensor([15, 16]), tensor([23]), tensor([34, 35])] # Rank 2 + [tensor([5]), tensor([17, 18]), tensor([24]), tensor([36])] # Rank 3 + + >>> # Another example with tensors of torch.cfloat type. + >>> input = torch.tensor([1+1j, 2+2j, 3+3j, 4+4j], dtype=torch.cfloat) + 4 * rank * (1+1j) + >>> input = list(input.chunk(4)) + >>> input + [tensor([1+1j]), tensor([2+2j]), tensor([3+3j]), tensor([4+4j])] # Rank 0 + [tensor([5+5j]), tensor([6+6j]), tensor([7+7j]), tensor([8+8j])] # Rank 1 + [tensor([9+9j]), tensor([10+10j]), tensor([11+11j]), tensor([12+12j])] # Rank 2 + [tensor([13+13j]), tensor([14+14j]), tensor([15+15j]), tensor([16+16j])] # Rank 3 + >>> output = list(torch.empty([4], dtype=torch.int64).chunk(4)) + >>> dist.all_to_all(output, input) + >>> output + [tensor([1+1j]), tensor([5+5j]), tensor([9+9j]), tensor([13+13j])] # Rank 0 + [tensor([2+2j]), tensor([6+6j]), tensor([10+10j]), tensor([14+14j])] # Rank 1 + [tensor([3+3j]), tensor([7+7j]), tensor([11+11j]), tensor([15+15j])] # Rank 2 + [tensor([4+4j]), tensor([8+8j]), tensor([12+12j]), tensor([16+16j])] # Rank 3 + + """ + if _rank_not_in_group(group): + _warn_not_in_group("all_to_all") + return + + opts = AllToAllOptions() + _check_tensor_list(output_tensor_list, "output_tensor_list") + _check_tensor_list(input_tensor_list, "input_tensor_list") + _ensure_all_tensors_same_dtype(output_tensor_list, input_tensor_list) + + input_tensor_list = [ + t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list + ] + output_tensor_list = [ + t if not t.is_complex() else torch.view_as_real(t) for t in output_tensor_list + ] + + if group is None: + default_pg = _get_default_group() + work = default_pg.alltoall(output_tensor_list, input_tensor_list, opts) + else: + work = group.alltoall(output_tensor_list, input_tensor_list, opts) + + if async_op: + return work + else: + work.wait() + +@_exception_logger +def barrier(group=GroupMember.WORLD, async_op=False, device_ids=None): + """ + Synchronize all processes. + + This collective blocks processes until the whole group enters this function, + if async_op is False, or if async work handle is called on wait(). + + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + async_op (bool, optional): Whether this op should be an async op + device_ids ([int], optional): List of device/GPU ids. + + Returns: + Async work handle, if async_op is set to True. + None, if not async_op or if not part of the group + + .. note:: `ProcessGroupNCCL` now relies on stream synchronization instead of + device synchronization to block the CPU. Thus, please do not assume that + `barrier()` would perform a device synchronization. + """ + if _rank_not_in_group(group): + _warn_not_in_group("barrier") + return + + opts = BarrierOptions() + opts.device = _get_pg_default_device(group) + if device_ids is not None: + if isinstance(device_ids, list): + opts.device_ids = device_ids + else: + raise TypeError( + "Invalid function argument: device_ids type should be List[int]" + ) + + if group is None: + default_pg = _get_default_group() + work = default_pg.barrier(opts=opts) + else: + work = group.barrier(opts=opts) + + if async_op: + return work + else: + work.wait() + + +def monitored_barrier(group=GroupMember.WORLD, timeout=None, wait_all_ranks=False): + """ + Synchronize processes similar to ``torch.distributed.barrier``, but consider a configurable timeout. + + It is able to report ranks that did not pass this barrier within the provided timeout. + Specifically, for non-zero ranks, will block until a send/recv is processed from rank 0. + Rank 0 will block until all send /recv from other ranks are processed, and will report + failures for ranks that failed to respond in time. Note that if one rank does not reach the + monitored_barrier (for example due to a hang), all other ranks would fail in monitored_barrier. + + This collective will block all processes/ranks in the group, until the + whole group exits the function successfully, making it useful for debugging + and synchronizing. However, it can have a performance impact and should only + be used for debugging or scenarios that require full synchronization points + on the host-side. For debugging purposes, this barrier can be inserted + before the application's collective calls to check if any ranks are + desynchronized. + + .. note:: Note that this collective is only supported with the GLOO backend. + + Args: + group (ProcessGroup, optional): The process group to work on. If + ``None``, the default process group will be used. + timeout (datetime.timedelta, optional): Timeout for monitored_barrier. + If ``None``, the default process group timeout will be used. + wait_all_ranks (bool, optional): Whether to collect all failed ranks or + not. By default, this is ``False`` and ``monitored_barrier`` on rank 0 + will throw on the first failed rank it encounters in order to fail + fast. By setting ``wait_all_ranks=True`` ``monitored_barrier`` will + collect all failed ranks and throw an error containing information + about all failed ranks. + + Returns: + ``None``. + + Example:: + >>> # xdoctest: +SKIP("need process group init") + >>> # Note: Process group initialization omitted on each rank. + >>> import torch.distributed as dist + >>> if dist.get_rank() != 1: + >>> dist.monitored_barrier() # Raises exception indicating that + >>> # rank 1 did not call into monitored_barrier. + >>> # Example with wait_all_ranks=True + >>> if dist.get_rank() == 0: + >>> dist.monitored_barrier(wait_all_ranks=True) # Raises exception + >>> # indicating that ranks 1, 2, ... world_size - 1 did not call into + >>> # monitored_barrier. + """ + # Need to call rank not in group before using the group, otherwise + # "Invalid process group" error is raised. + if _rank_not_in_group(group): + _warn_not_in_group("monitored_barrier") + return + + if get_backend(group) != Backend.GLOO: + raise ValueError("monitored_barrier is only implemented for GLOO backend.") + + if timeout is None: + timeout = _get_default_timeout(get_backend(group)) + elif isinstance(timeout, float): + # TODO(whc) aparently some existing test case for monitored_barrier passes in a timeout in float format? + warnings.warn( + "Please specify timeout arg as a timedelta. " + f"Converting current value of {timeout} assuming it represents seconds", + ) + timeout = timedelta(seconds=timeout) + + _check_valid_timeout(timeout) + + group_to_use = _get_default_group() if group is None else group + return group_to_use.monitored_barrier(timeout, wait_all_ranks=wait_all_ranks) + + +def _create_process_group_wrapper( + wrapped_pg: torch._C._distributed_c10d.Backend, + store_prefix: str, + store: Store, + rank: int, + world_size: int, + timeout: timedelta = default_pg_timeout, +): + # (whc) this appears to be just for the gloo backend? if so, `default_pg_timeout` is appropriate... + + # Create a separate prefix store for the helper process group. + prefix = f"{PG_WRAPPER_STORE_PREFIX}:{store_prefix}" + store = PrefixStore(prefix, store) + helper_pg = ProcessGroupGloo(store, rank, world_size, timeout=timeout) + # Wrap the underlying pg with ProcessGroupWrapper. + wrapped_pg = _ProcessGroupWrapper(wrapped_pg, helper_pg) + return wrapped_pg + +# helper function for deterministically hashing a list of ranks +def _hash_ranks(ranks: List[int]): + return hashlib.sha1(bytes("_".join(map(str, ranks)), "utf-8")).hexdigest() + +# Takes a list of ranks and computes an integer color +def _process_group_color(ranks: List[int]) -> int: + # Convert our hash to an int, but avoid negative numbers by shifting a bit. + return int(_hash_ranks(ranks), 16) % (sys.maxsize >> 1) + +def _process_group_name(ranks, use_hashed_name): + global _world + if use_hashed_name: + pg_name = _hash_ranks(ranks) + while pg_name in _world.pg_names.values(): + pg_name = hashlib.sha1(bytes(pg_name + "_", "utf-8")).hexdigest() + else: + pg_name = str(_world.group_count) + _world.group_count += 1 + return pg_name + +def _get_backend_from_str(backend: Optional[str] = None) -> Backend: + # Default to the same backend as the global process group + # if backend is not specified. + if not backend: + backend = get_backend(_get_default_group()) + return Backend(backend) + + +@_time_logger +def new_group(ranks=None, timeout=None, backend=None, pg_options=None, use_local_synchronization=False): + """ + Create a new distributed group. + + This function requires that all processes in the main group (i.e. all + processes that are part of the distributed job) enter this function, even + if they are not going to be members of the group. Additionally, groups + should be created in the same order in all processes. + + .. warning:: + Using multiple process groups with the ``NCCL`` backend concurrently + is not safe and the user should perform explicit synchronization in + their application to ensure only one process group is used at a time. + This means collectives from one process group should have completed + execution on the device (not just enqueued since CUDA execution is + async) before collectives from another process group are enqueued. + See `Using multiple NCCL communicators concurrently `_ for more details. + + Args: + ranks (list[int]): List of ranks of group members. If ``None``, will be + set to all ranks. Default is ``None``. + timeout (timedelta, optional): see `init_process_group` for details and default value. + backend (str or Backend, optional): The backend to use. Depending on + build-time configurations, valid values are ``gloo`` and ``nccl``. + By default uses the same backend as the global group. This field + should be given as a lowercase string (e.g., ``"gloo"``), which can + also be accessed via :class:`Backend` attributes (e.g., + ``Backend.GLOO``). If ``None`` is passed in, the backend + corresponding to the default process group will be used. Default is + ``None``. + pg_options (ProcessGroupOptions, optional): process group options + specifying what additional options need to be passed in during + the construction of specific process groups. i.e. for the ``nccl`` + backend, ``is_high_priority_stream`` can be specified so that + process group can pick up high priority cuda streams. + use_local_synchronization (bool, optional): perform a group-local + barrier at the end of the process group creation. This is different + in that non-member ranks don't need to call into API and don't + join the barrier. + + Returns: + A handle of distributed group that can be given to collective calls or None if the rank is not part of ``ranks``. + + N.B. use_local_synchronization doesn't work with MPI. + + N.B. While use_local_synchronization=True can be significantly faster with larger + clusters and small process groups, care must be taken since it changes cluster behavior + as non-member ranks don't join the group barrier(). + + N.B. use_local_synchronization=True can lead to deadlocks when each rank creates + multiple overlaping process groups. To avoid that, make sure all ranks follow the + same global creation order. + """ + return _new_group_with_tag(ranks, timeout, backend, pg_options, None, use_local_synchronization=use_local_synchronization) + +def _new_group_with_tag( + ranks=None, + timeout=None, + backend=None, + pg_options=None, + pg_tag=None, + use_local_synchronization=False +): + """ + Variant of ``new_group`` that exposes tag creation. + + :: N.B. The mechanism is experimental and tied to the functional collectives effort, see + ``torch.distributed._functional_collectives`` for reference on how to use it. + """ + global _world + + default_pg = _get_default_group() + default_backend, default_store = _world.pg_map[default_pg] + global_rank = default_pg.rank() + global_world_size = default_pg.size() + + + # Default to the same backend as the global process group + # if the backend is not specified. + if not backend: + backend = default_backend + backend = Backend(backend) + + # this timeout defaulting/validation is used for all the new_groups/new_subgroups variants, + # which may just pass their timeout value (or None) + if timeout is None: + timeout = _get_default_timeout(backend) + _check_valid_timeout(timeout) + + if use_local_synchronization: + # MPI backend doesn't have have a way for us to perform a partial sync + if backend == Backend.MPI: + raise ValueError("MPI backend doesn't support use_local_synchronization=True") + if ranks is not None and get_rank() not in ranks: + return None + + # checks the input ranks + if ranks is not None: + ranks = sorted(ranks) + group_world_size = len(ranks) + if group_world_size > global_world_size: + raise ValueError( + "the new group's world size should be less or " + "equal to the world size set by " + "init_process_group" + ) + # check ranks' sanity + for rank in ranks: + if rank < 0 or rank >= global_world_size: + raise ValueError( + "The new group's rank should be within " + "the world_size set by init_process_group" + ) + if global_rank in ranks: + group_rank = ranks.index(global_rank) + else: + group_rank = None + else: + ranks = list(range(global_world_size)) + group_world_size = global_world_size + group_rank = global_rank + + group_name = _process_group_name(ranks, use_hashed_name=use_local_synchronization) + + pg, pg_store = _new_process_group_helper( + group_world_size, + group_rank, + ranks, + backend, + default_store, + group_name, + pg_options=pg_options, + timeout=timeout, + pg_tag=pg_tag + ) + + # Create the global rank to group rank mapping + _world.pg_group_ranks[pg] = { + global_rank: group_rank for group_rank, global_rank in enumerate(ranks) + } + + if _is_barrier_after_init() == 1: + # barrier at the end to ensure that once we return from this method, all + # process groups including global variables (if any) are updated + # correctly on all ranks. + # Update 04/2023: for large-scale runs, this barrier (esp. store-based + # barrier) may be costly and/or unscalable. Also, in a lot of cases, + # these barriers may be unnecessary, as proven by a green CI after + # removal. An environment variable `TORCH_DIST_INIT_BARRIER` has been + # added which enables this barrier only when set to 1. + logger.info( + "Performing barrier after ProcessGroup initialization since " + "TORCH_DIST_INIT_BARRIER = 1" + ) + if backend == Backend.MPI: + # MPI doesn't have store. + barrier() + else: + barrier_store = pg_store if use_local_synchronization else default_store + world_size = len(ranks) if use_local_synchronization else get_world_size() + # Use store based barrier here since barrier() used a bunch of + # default devices and messes up NCCL internal state. + _store_based_barrier(global_rank, barrier_store, group_name, world_size, timeout) + + return pg + + +def new_subgroups( + group_size=None, + group=None, + timeout=None, + backend=None, + pg_options=None, +): + """ + Create subgroups of equal size. + + By default, it creates intra-machine subgroups, + where each of which contains all the ranks of a machine, based on the assumption + that each machine has the same number of devices. + + This is a convenience API that calls ``new_group`` to generate multiple subgroups. + It requires that all processes in the main group (i.e. all + processes that are part of the distributed job) enter this function, even + if they are not going to be members of the group. + + .. warning:: + If ``group_size`` is passed in, the world size must be divisible by ``group_size``. + If no ``group_size`` is passed in, it believe that you are creating a group based + on CUDA and determining the group size by number of CUDA devices, and if not all + the machines have the same number of devices, the subgroup division will be + different across nodes and can cause unexpected behaviors. Therefore, if you are + creating a subgroup that does not depend on CUDA (such as Gloo on CPU), please + pass in ``group_size`` correctly. + + .. warning:: + Using multiple process groups with the ``NCCL`` backend concurrently + is not safe and the user should perform explicit synchronization in + their application to ensure only one process group is used at a time. + This means collectives from one process group should have completed + execution on the device (not just enqueued since CUDA execution is + async) before collectives from another process group are enqueued. + See `Using multiple NCCL communicators concurrently `_ for more details. + + Args: + group_size (int, optional): The size of each subgroup. If ``None``, + the default subgroup size is equal to the number of devices on each machine, + based on the assumption that each machine has exactly the same + number of devices. Default is ``None``. + timeout (timedelta, optional): see `init_process_group` for details and default value. + backend (str or Backend, optional): The backend to use. Depending on + build-time configurations, valid values are ``gloo`` and ``nccl``. + By default uses the same backend as the global group. This field + should be given as a lowercase string (e.g., ``"gloo"``), which can + also be accessed via :class:`Backend` attributes (e.g., + ``Backend.GLOO``). If ``None`` is passed in, the backend + corresponding to the default process group will be used. Default is + ``None``. + pg_options (ProcessGroupOptions, optional): process group options + specifying what additional options need to be passed in during + the construction of specific process groups. i.e. for the ``nccl`` + backend, ``is_high_priority_stream`` can be specified so that + process group can pick up high priority cuda streams. + + Returns: + The subgroup containing the current rank, and all the subgroups used for cleanup. + + Examples: + >>> # Create intra-machine subgroups. + >>> # xdoctest: +SKIP("need process group init") + >>> cur_subgroup, subgroups = dist.new_subgroups() + >>> # Allreduce within the machine. + >>> rank = dist.get_rank() + >>> tensor = torch.ones(1, device=rank) * rank + >>> dist.all_reduce(tensor, group=cur_subgroup) + >>> tensor + tensor([8]) # Assume 8 is the number of CUDA devices per machine. + >>> # Cleanup. + >>> for subgroup in subgroups: + >>> dist.destroy_process_group(subgroup) + """ + if group_size is None: + if not torch.cuda.is_available(): + raise ValueError("Default group size only takes effect when CUDA is available." + "If your subgroup using a backend that does not depend on CUDA," + "please pass in 'group_size' correctly.") + group_size = torch.cuda.device_count() + if group_size <= 0: + raise ValueError(f"The arg 'group_size' ({group_size}) must be positive") + + world_size = get_world_size() + if world_size < group_size: + raise ValueError(f"The arg 'group_size' ({group_size}) must not exceed the world size ({world_size})") + if world_size % group_size != 0: + raise ValueError("The world size must be divisible by 'group_size'") + + subgroups = [] + cur_subgroup = None + + for subgroup_id in range(world_size // group_size): + start_rank = subgroup_id * group_size + end_rank = start_rank + group_size + ranks_in_subgroup = list(range(start_rank, end_rank)) + subgroup = new_group( + ranks=ranks_in_subgroup, + timeout=timeout, + backend=backend, + pg_options=pg_options, + ) + subgroups.append(subgroup) + + rank = get_rank() + if rank in ranks_in_subgroup: + cur_subgroup = subgroup + logger.info( + "Rank %s is assigned to subgroup %s", + rank, ranks_in_subgroup + ) + + return cur_subgroup, subgroups + + +def new_subgroups_by_enumeration( + ranks_per_subgroup_list, + timeout=None, + backend=None, + pg_options=None, +): + """ + Create subgroups by dividing the global world. + + The division is specified by a nested list of ranks. The subgroups cannot have + overlap, and some ranks may not have to be in any subgroup. + + This is a convenience API that calls ``new_group`` to generate multiple subgroups. + It requires that all processes in the main group (i.e. all + processes that are part of the distributed job) enter this function, even + if they are not going to be members of the group. + + .. warning:: + Using multiple process groups with the ``NCCL`` backend concurrently + is not safe and the user should perform explicit synchronization in + their application to ensure only one process group is used at a time. + This means collectives from one process group should have completed + execution on the device (not just enqueued since CUDA execution is + async) before collectives from another process group are enqueued. + See `Using multiple NCCL communicators concurrently `_ for more details. + + Args: + ranks_per_subgroup_list (list[list[int]]): A nested list of ranks of + group members. + timeout (timedelta, optional): see `init_process_group` for details and default value. + backend (str or Backend, optional): The backend to use. Depending on + build-time configurations, valid values are ``gloo`` and ``nccl``. + By default uses the same backend as the global group. This field + should be given as a lowercase string (e.g., ``"gloo"``), which can + also be accessed via :class:`Backend` attributes (e.g., + ``Backend.GLOO``). If ``None`` is passed in, the backend + corresponding to the default process group will be used. Default is + ``None``. + pg_options (ProcessGroupOptions, optional): process group options + specifying what additional options need to be passed in during + the construction of specific process groups. i.e. for the ``nccl`` + backend, ``is_high_priority_stream`` can be specified so that + process group can pick up high priority cuda streams. + + Returns: + The subgroup containing the current rank, and all the subgroups used for cleanup. + + Examples: + >>> # Create two subgroups, where each has 2 processes. + >>> # xdoctest: +SKIP("need process group init") + >>> cur_subgroup, subgroups = dist.new_subgroups(ranks=[[0, 2], [1, 3]]) + >>> rank = dist.get_rank() + >>> tensor = torch.ones(1, device=rank) * rank + >>> dist.all_reduce(tensor, group=cur_subgroup) + >>> tensor + tensor([2]) # Subgroup 0: ranks 0 and 2 + tensor([4]) # Subgroup 1: ranks 1 and 3 + """ + if ranks_per_subgroup_list is None or len(ranks_per_subgroup_list) == 0: + raise ValueError("The arg 'ranks_per_subgroup_list' cannot be empty") + + subgroups = [] + cur_subgroup = None + # Create a mapping from rank to subgroup to check if there is any subgroup overlap. + rank_to_ranks_dict = {} # type: ignore[var-annotated] + for ranks in ranks_per_subgroup_list: + subgroup = new_group( + ranks=ranks, + timeout=timeout, + backend=backend, + pg_options=pg_options, + ) + subgroups.append(subgroup) + my_rank = get_rank() + for rank in ranks: + if rank in rank_to_ranks_dict: + raise ValueError( + f"Rank {rank} has appeared in both subgroup {rank_to_ranks_dict[rank]} and {ranks}" + ) + rank_to_ranks_dict[rank] = ranks + if my_rank == rank: + cur_subgroup = subgroup + logger.info("Rank %s is assigned to subgroup %s", rank, ranks) + + return cur_subgroup, subgroups + + +def _find_pg_by_ranks_and_tag(tag: str, ranks: List[int]) -> Optional[ProcessGroup]: + if len(tag) > 0 and not tag.startswith("ptd:") and not tag.startswith("user:"): + tag = f"user:{tag}" + + for group in _world.tags_to_pg.get(tag, []): + if group.size() != len(ranks): + continue + + group_ranks = get_process_group_ranks(group) + good = all(r in group_ranks for r in ranks) + if good: + return group + return None + +def _find_or_create_pg_by_ranks_and_tag(tag: str, ranks: List[int], stride: int) -> ProcessGroup: + assert len(ranks) % stride == 0, f"Ranks length ({len(ranks)}) must be divisible by stride ({stride})" + + my_rank = get_rank() + my_ranks = None + + if stride == len(ranks): + my_ranks = ranks.copy() + assert my_rank in my_ranks, "rankset doesn't include the current node" + else: + for i in range(0, len(ranks), stride): + rank_set = ranks[i : i + stride] + if my_rank in rank_set: + my_ranks = rank_set + assert my_ranks is not None, "rankset doesn't include the current node" + + my_ranks.sort() + + pg = _find_pg_by_ranks_and_tag(tag, my_ranks) + if pg is not None: + return pg + if tag == "": + raise ValueError("Cannot automatically create PG with empty tag") + # TODO copy settings and timeout from default PG + return _new_group_with_tag(my_ranks, pg_tag=tag) + +def _get_group_tag(pg: ProcessGroup) -> str: + """Return the tag associated with ``pg``.""" + tag = _world.pg_to_tag[pg] + if tag.startswith("user:"): + tag = tag[5:] + return tag + +def _get_process_group_name(pg: ProcessGroup) -> str: + return _world.pg_names.get(pg, "None") + +def _get_process_group_store(pg: ProcessGroup) -> Store: + return _world.pg_map[pg][1] + +# This ops are not friendly to TorchDynamo. So, we decide to disallow these ops +# in FX graph, allowing them to run them on eager, with torch.compile. +dynamo_unsupported_distributed_c10d_ops = [ + recv, + all_gather_object, + all_gather_coalesced, + all_to_all_single, + all_reduce, + gather_object, + all_to_all, + all_reduce_coalesced, + gather, + broadcast_object_list, + barrier, + scatter, + scatter_object_list, + reduce, + all_gather, + reduce_scatter, + all_gather_into_tensor, + broadcast, + reduce_scatter_tensor, + send, +] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/launch.py b/venv/lib/python3.10/site-packages/torch/distributed/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f7979308a296bc60eff2ef07b5e5933e812d65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/launch.py @@ -0,0 +1,198 @@ +r""" +Module ``torch.distributed.launch``. + +``torch.distributed.launch`` is a module that spawns up multiple distributed +training processes on each of the training nodes. + +.. warning:: + + This module is going to be deprecated in favor of :ref:`torchrun `. + +The utility can be used for single-node distributed training, in which one or +more processes per node will be spawned. The utility can be used for either +CPU training or GPU training. If the utility is used for GPU training, +each distributed process will be operating on a single GPU. This can achieve +well-improved single-node training performance. It can also be used in +multi-node distributed training, by spawning up multiple processes on each node +for well-improved multi-node distributed training performance as well. +This will especially be beneficial for systems with multiple Infiniband +interfaces that have direct-GPU support, since all of them can be utilized for +aggregated communication bandwidth. + +In both cases of single-node distributed training or multi-node distributed +training, this utility will launch the given number of processes per node +(``--nproc-per-node``). If used for GPU training, this number needs to be less +or equal to the number of GPUs on the current system (``nproc_per_node``), +and each process will be operating on a single GPU from *GPU 0 to +GPU (nproc_per_node - 1)*. + +**How to use this module:** + +1. Single-Node multi-process distributed training + +:: + + python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE + YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other + arguments of your training script) + +2. Multi-Node multi-process distributed training: (e.g. two nodes) + + +Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* + +:: + + python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node-rank=0 --master-addr="192.168.1.1" + --master-port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) + +Node 2: + +:: + + python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node-rank=1 --master-addr="192.168.1.1" + --master-port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) + +3. To look up what optional arguments this module offers: + +:: + + python -m torch.distributed.launch --help + + +**Important Notices:** + +1. This utility and multi-process distributed (single-node or +multi-node) GPU training currently only achieves the best performance using +the NCCL distributed backend. Thus NCCL backend is the recommended backend to +use for GPU training. + +2. In your training program, you must parse the command-line argument: +``--local-rank=LOCAL_PROCESS_RANK``, which will be provided by this module. +If your training program uses GPUs, you should ensure that your code only +runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by: + +Parsing the local_rank argument + +:: + + >>> # xdoctest: +SKIP + >>> import argparse + >>> parser = argparse.ArgumentParser() + >>> parser.add_argument("--local-rank", type=int) + >>> args = parser.parse_args() + +Set your device to local rank using either + +:: + + >>> torch.cuda.set_device(args.local_rank) # before your code runs + +or + +:: + + >>> with torch.cuda.device(args.local_rank): + >>> # your code to run + >>> ... + +3. In your training program, you are supposed to call the following function +at the beginning to start the distributed backend. It is strongly recommended +that ``init_method=env://``. Other init methods (e.g. ``tcp://``) may work, +but ``env://`` is the one that is officially supported by this module. + +:: + + >>> torch.distributed.init_process_group(backend='YOUR BACKEND', + >>> init_method='env://') + +4. In your training program, you can either use regular distributed functions +or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your +training program uses GPUs for training and you would like to use +:func:`torch.nn.parallel.DistributedDataParallel` module, +here is how to configure it. + +:: + + >>> model = torch.nn.parallel.DistributedDataParallel(model, + >>> device_ids=[args.local_rank], + >>> output_device=args.local_rank) + +Please ensure that ``device_ids`` argument is set to be the only GPU device id +that your code will be operating on. This is generally the local rank of the +process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``, +and ``output_device`` needs to be ``args.local_rank`` in order to use this +utility + +5. Another way to pass ``local_rank`` to the subprocesses via environment variable +``LOCAL_RANK``. This behavior is enabled when you launch the script with +``--use-env=True``. You must adjust the subprocess example above to replace +``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher +will not pass ``--local-rank`` when you specify this flag. + +.. warning:: + + ``local_rank`` is NOT globally unique: it is only unique per process + on a machine. Thus, don't use it to decide if you should, e.g., + write to a networked filesystem. See + https://github.com/pytorch/pytorch/issues/12042 for an example of + how things can go wrong if you don't do this correctly. + + + +""" + +import logging +import warnings + +from torch.distributed.run import get_args_parser, run + + +logger = logging.getLogger(__name__) + + +def parse_args(args): + parser = get_args_parser() + parser.add_argument( + "--use-env", + "--use_env", + default=False, + action="store_true", + help="Use environment variable to pass " + "'local rank'. For legacy reasons, the default value is False. " + "If set to True, the script will not pass " + "--local-rank as argument, and will instead set LOCAL_RANK.", + ) + return parser.parse_args(args) + + +def launch(args): + if args.no_python and not args.use_env: + raise ValueError( + "When using the '--no-python' flag," + " you must also set the '--use-env' flag." + ) + run(args) + + +def main(args=None): + warnings.warn( + "The module torch.distributed.launch is deprecated\n" + "and will be removed in future. Use torchrun.\n" + "Note that --use-env is set by default in torchrun.\n" + "If your script expects `--local-rank` argument to be set, please\n" + "change it to read from `os.environ['LOCAL_RANK']` instead. See \n" + "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n" + "further instructions\n", + FutureWarning, + ) + args = parse_args(args) + launch(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d25f8080c26916486bfc567ee4206a3b8c8da6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +from torch.distributed.launcher.api import ( # noqa: F401 + LaunchConfig, + elastic_launch, + launch_agent, +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b592d442069a183d5a97ec26cce6e6d9489d466 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92dd2a77a536ff214bb0aa84970fbf80d2d5ac47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py b/venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b4aca644f84384d791dbedff8cb7d17ecb7994 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/launcher/api.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import sys +import uuid +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch.distributed.elastic.rendezvous.registry as rdzv_registry +from torch.distributed.elastic import events, metrics +from torch.distributed.elastic.agent.server.api import WorkerSpec +from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent +from torch.distributed.elastic.multiprocessing import DefaultLogsSpecs, LogsSpecs, SignalException +from torch.distributed.elastic.multiprocessing.errors import ChildFailedError +from torch.distributed.elastic.rendezvous import RendezvousParameters +from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint +from torch.distributed.elastic.utils.logging import get_logger + +__all__ = ['LaunchConfig', 'elastic_launch', 'launch_agent'] + +logger = get_logger(__name__) + + +@dataclass +class LaunchConfig: + """ + Creates a rendezvous config. + + Args: + min_nodes: Minimum amount of nodes that the user function will + be launched on. Elastic agent ensures that the user + function start only when the min_nodes amount enters + the rendezvous. + max_nodes: Maximum amount of nodes that the user function + will be launched on. + nproc_per_node: On each node the elastic agent will launch + this amount of workers that will execute user + defined function. + rdzv_backend: rdzv_backend to use in the rendezvous (zeus-adapter, etcd). + rdzv_endpoint: The endpoint of the rdzv sync. storage. + rdzv_configs: Key, value pair that specifies rendezvous specific configuration. + rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going + to be removed in future versions, see the note below. The default timeout is 900 seconds. + run_id: The unique run id of the job (if not passed a unique one will be + deduced from run environment - flow workflow id in flow - or auto generated). + role: User defined role of the worker (defaults to "trainer"). + max_restarts: The maximum amount of restarts that elastic agent will conduct + on workers before failure. + monitor_interval: The interval in seconds that is used by the elastic_agent + as a period of monitoring workers. + start_method: The method is used by the elastic agent to start the + workers (spawn, fork, forkserver). + metrics_cfg: configuration to initialize metrics. + local_addr: address of the local node if any. If not set, a lookup on the local + machine's FQDN will be performed. + local_ranks_filter: ranks for which to show logs in console. If not set, show from all. + ..note: + `rdzv_timeout` is a legacy argument that will be removed in future. + Set the timeout via `rdzv_configs['timeout']` + + """ + + min_nodes: int + max_nodes: int + nproc_per_node: int + logs_specs: Optional[LogsSpecs] = None + run_id: str = "" + role: str = "default_role" + rdzv_endpoint: str = "" + rdzv_backend: str = "etcd" + rdzv_configs: Dict[str, Any] = field(default_factory=dict) + rdzv_timeout: int = -1 + max_restarts: int = 3 + monitor_interval: float = 30 + start_method: str = "spawn" + log_line_prefix_template: Optional[str] = None + metrics_cfg: Dict[str, str] = field(default_factory=dict) + local_addr: Optional[str] = None + + def __post_init__(self): + default_timeout = 900 + if self.rdzv_timeout != -1: + self.rdzv_configs["timeout"] = self.rdzv_timeout + elif "timeout" not in self.rdzv_configs: + self.rdzv_configs["timeout"] = default_timeout + + # Post-processing to enable refactoring to introduce logs_specs due to non-torchrun API usage + if self.logs_specs is None: + self.logs_specs = DefaultLogsSpecs() + + +class elastic_launch: + """ + Launches an torchelastic agent on the container that invoked the entrypoint. + + 1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/ + ``entrypoint`` can be a function or a command. + 2. The return value is a map of each worker's output mapped + by their respective global rank. + + Usage + + :: + + def worker_fn(foo): + # ... + + def main(): + # entrypoint is a function. + outputs = elastic_launch(LaunchConfig, worker_fn)(foo) + # return rank 0's output + return outputs[0] + + # entrypoint is a command and ``script.py`` is the python module. + outputs = elastic_launch(LaunchConfig, "script.py")(args) + outputs = elastic_launch(LaunchConfig, "python")("script.py") + """ + + def __init__( + self, + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + ): + self._config = config + self._entrypoint = entrypoint + + def __call__(self, *args): + return launch_agent(self._config, self._entrypoint, list(args)) + + +def _get_entrypoint_name( + entrypoint: Union[Callable, str, None], args: List[Any] +) -> str: + """Retrieve entrypoint name with the rule: + 1. If entrypoint is a function, use ``entrypoint.__qualname__``. + 2. If entrypoint is a string, check its value: + 2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args`` + which does not start with hifen letter (for example, "-u" will be skipped). + 2.2 otherwise, use ``entrypoint`` value. + 3. Otherwise, return empty string. + """ + if isinstance(entrypoint, Callable): # type: ignore[arg-type] + return entrypoint.__name__ # type: ignore[union-attr] + elif isinstance(entrypoint, str): + if entrypoint == sys.executable: + return next((arg for arg in args if arg[0] != "-"), "") + else: + return entrypoint + else: + return "" + + +def _get_addr_and_port( + rdzv_parameters: RendezvousParameters, +) -> Tuple[Optional[str], Optional[int]]: + if rdzv_parameters.backend != "static": + return (None, None) + endpoint = rdzv_parameters.endpoint + endpoint = endpoint.strip() + if not endpoint: + raise ValueError( + "Endpoint is missing in endpoint. Try to add --master-addr and --master-port" + ) + master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1) + if master_port == -1: + raise ValueError( + f"port is missing in endpoint: {endpoint}. Try to specify --master-port" + ) + return (master_addr, master_port) + + +def launch_agent( + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + args: List[Any], +) -> Dict[int, Any]: + if not config.run_id: + run_id = str(uuid.uuid4().int) + logger.warning("config has no run_id, generated a random run_id: %s", run_id) + config.run_id = run_id + + entrypoint_name = _get_entrypoint_name(entrypoint, args) + + logger.info( + "Starting elastic_operator with launch configs:\n" + " entrypoint : %(entrypoint)s\n" + " min_nodes : %(min_nodes)s\n" + " max_nodes : %(max_nodes)s\n" + " nproc_per_node : %(nproc_per_node)s\n" + " run_id : %(run_id)s\n" + " rdzv_backend : %(rdzv_backend)s\n" + " rdzv_endpoint : %(rdzv_endpoint)s\n" + " rdzv_configs : %(rdzv_configs)s\n" + " max_restarts : %(max_restarts)s\n" + " monitor_interval : %(monitor_interval)s\n" + " log_dir : %(log_dir)s\n" + " metrics_cfg : %(metrics_cfg)s\n", + { + "entrypoint": entrypoint_name, + "min_nodes": config.min_nodes, + "max_nodes": config.max_nodes, + "nproc_per_node": config.nproc_per_node, + "run_id": config.run_id, + "rdzv_backend": config.rdzv_backend, + "rdzv_endpoint": config.rdzv_endpoint, + "rdzv_configs": config.rdzv_configs, + "max_restarts": config.max_restarts, + "monitor_interval": config.monitor_interval, + "log_dir": config.logs_specs.root_log_dir, # type: ignore[union-attr] + "metrics_cfg": config.metrics_cfg + } + ) + + rdzv_parameters = RendezvousParameters( + backend=config.rdzv_backend, + endpoint=config.rdzv_endpoint, + run_id=config.run_id, + min_nodes=config.min_nodes, + max_nodes=config.max_nodes, + local_addr=config.local_addr, + **config.rdzv_configs, + ) + + master_addr, master_port = _get_addr_and_port(rdzv_parameters) + + spec = WorkerSpec( + role=config.role, + local_world_size=config.nproc_per_node, + entrypoint=entrypoint, + args=tuple(args), + rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), + max_restarts=config.max_restarts, + monitor_interval=config.monitor_interval, + master_addr=master_addr, + master_port=master_port, + local_addr=config.local_addr, + ) + + agent = LocalElasticAgent( + spec=spec, + logs_specs=config.logs_specs, # type: ignore[arg-type] + start_method=config.start_method, + log_line_prefix_template=config.log_line_prefix_template, + ) + + shutdown_rdzv = True + try: + metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg)) + + result = agent.run() + # records that agent.run() has succeeded NOT that workers have succeeded + events.record(agent.get_event_succeeded()) + + if result.is_failed(): + # ChildFailedError is treated specially by @record + # if the error files for the failed children exist + # @record will copy the first error (root cause) + # to the error file of the launcher process. + raise ChildFailedError( + name=entrypoint_name, + failures=result.failures, + ) + + return result.return_values + except ChildFailedError: + raise + except SignalException: + # when the agent dies with a signal do NOT shutdown the rdzv_handler + # since this closes the rendezvous on this rdzv_id permanently and + # prevents any additional scaling events + shutdown_rdzv = False + events.record(agent.get_event_failed()) + raise + except Exception: + events.record(agent.get_event_failed()) + raise + finally: + if shutdown_rdzv: + spec.rdzv_handler.shutdown() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/logging_handlers.py b/venv/lib/python3.10/site-packages/torch/distributed/logging_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c607fe45da7713dc52ca01ce70abb53cdebb42f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/logging_handlers.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import Dict, List + +__all__: List[str] = [] + +_log_handlers: Dict[str, logging.Handler] = { + "default": logging.NullHandler(), +} diff --git a/venv/lib/python3.10/site-packages/torch/distributed/remote_device.py b/venv/lib/python3.10/site-packages/torch/distributed/remote_device.py new file mode 100644 index 0000000000000000000000000000000000000000..e26d398bf78654def5f5391eac7f67ef55579cb2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/remote_device.py @@ -0,0 +1,128 @@ +from typing import Optional, Union + +import torch + + +class _remote_device: + """ + Represents a device on a remote worker. + + Args: + remote_device (str or torch.device): Represents a device on a remote worker. + The string format should be one of the following: + + 1. "/", where the device field can be parsed as torch.device type. + E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". + In addition, the device field can be optional and the default value is "cpu". + 2. "rank:/", where is the rank of the + process and device can be parsed as torch.device type. + E.g., "rank:0/cpu", "rank:0", "rank:0/cuda:0" + 3. and are optional and formats like "cpu" + and "cuda:1", just represent local devices. + """ + + def __init__(self, remote_device: Union[str, torch.device]): + PARSE_ERROR = ( + f"Could not parse remote_device: {remote_device}. The valid format is " + "'/' or 'rank:/' or ''" + ) + self._worker_name = None + self._rank = None + self._device: Optional[Union[str, int, torch.device]] = None + + if isinstance(remote_device, torch.device): + self._device = remote_device + elif isinstance(remote_device, str): + fields = remote_device.split("/") + if len(fields) == 2: + self._worker_name, self._device = fields + elif len(fields) == 1: + # Check if this is a valid device. + if _remote_device._is_valid_local_device(fields[0]): + self._device = fields[0] + else: + self._worker_name = fields[0] + self._device = "cpu" + else: + raise ValueError(PARSE_ERROR) + else: + raise TypeError(f'Invalid type for remote_device: {type(remote_device)}') + + # Do some basic sanity check (no empty string) + if self._worker_name is not None and not self._worker_name: + raise ValueError(PARSE_ERROR) + + # Validate the device. + self._device = torch.device(self._device) + + # Check for rank based format. + if self._worker_name is not None: + fields = self._worker_name.split(":") + if len(fields) == 2: + # rank:/device format, extract rank + if fields[0] == "rank" and fields[1].isdigit(): + self._rank = int(fields[1]) # type: ignore[assignment] + self._worker_name = None + else: + raise ValueError(PARSE_ERROR) + elif len(fields) > 2: + raise ValueError(PARSE_ERROR) + + @staticmethod + def _is_valid_local_device(device): + # Check for torch.device + try: + torch.device(device) + return True + except Exception: + return False + + def worker_name(self) -> Optional[str]: + """Return the name of remote worker representing the remote device and ``None`` if no worker name is available.""" + return self._worker_name + + def rank(self) -> Optional[int]: + """ + Returns the rank of remote worker representing the remote device. + Returns ``None`` if no rank is available. + """ + return self._rank + + def device(self) -> torch.device: + """Return the local device on the remote worker.""" + return self._device # type: ignore[return-value] + + def __repr__(self): + if self._device is not None: + if self._worker_name is not None: + return f'{self._worker_name}/{self._device}' + elif self._rank is not None: + return f'rank:{self._rank}/{self._device}' + else: + return str(self._device) + else: + if self._worker_name is not None: + return f'{self._worker_name}' + elif self._rank is not None: + return f'{self._rank}' + else: + raise RuntimeError('Invalid state!') + + def __eq__(self, other): + if not isinstance(other, _remote_device): + return False + + if ( + self._worker_name == other._worker_name + and self._device == other._device + and self._rank == other._rank + ): + return True + + return False + + + def __hash__(self): + return hash(self._worker_name) ^ \ + hash(self._device) ^ \ + hash(self._rank) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rendezvous.py b/venv/lib/python3.10/site-packages/torch/distributed/rendezvous.py new file mode 100644 index 0000000000000000000000000000000000000000..9e701067d8807a73baf8f63daa9aea3f8a72b72d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rendezvous.py @@ -0,0 +1,256 @@ +try: + from urllib.parse import urlparse, urlunparse +except ImportError as e: + raise ImportError( + "urllib cannot be found, urlparse from python2 is no longer supported." + ) from e + +import numbers +import os +import sys +from datetime import timedelta +from typing import Dict, Optional, Callable, Iterator, Tuple + +from torch.distributed import FileStore, PrefixStore, Store, TCPStore + +from .constants import default_pg_timeout + + +_rendezvous_handlers: Dict[str, Callable[..., Iterator[Tuple[Store, int, int]]]] = {} + + +def register_rendezvous_handler(scheme, handler): + """ + Register a new rendezvous handler. + + Before we can run collective algorithms, participating processes + need to find each other and exchange information to be able to + communicate. We call this process rendezvous. + + The outcome of the rendezvous process is a triplet containing a + shared key/value store, the rank of the process, and the total + number of participating processes. + + If none of the bundled rendezvous methods apply to your execution + environment you can opt to register your own rendezvous handler. + Pick a unique name and use the URL scheme to identify it when + calling the `rendezvous()` function. + + Args: + scheme (str): URL scheme to identify your rendezvous handler. + handler (function): Handler that is invoked when the + `rendezvous()` function is called with a URL that uses + the corresponding scheme. It must be a generator function + that yields the triplet. + """ + global _rendezvous_handlers + if scheme in _rendezvous_handlers: + raise RuntimeError( + f"Rendezvous handler for {scheme}:// already registered" + ) + _rendezvous_handlers[scheme] = handler + + +# Query will have format "rank=0&world_size=1" and is +# converted into {"rank": 0, "world_size": 1} +def _query_to_dict(query: str) -> Dict[str, str]: + return {pair[0]: pair[1] for pair in (pair.split("=") for pair in filter(None, query.split("&")))} + + +def _rendezvous_helper(url: str, rank: int, world_size_opt: Optional[int], **kwargs): + result = urlparse(url) + if world_size_opt is None: + world_size = -1 + if result.scheme == "env": + rank = int(os.environ.get("RANK", rank)) + # If the world_size env variable is not present then it is a dynamic group + world_size = int(os.environ.get("WORLD_SIZE", world_size)) + else: + world_size = world_size_opt + if rank != -1 or world_size != -1 or world_size_opt is None: + query_dict = _query_to_dict(result.query) + assert ( + "rank" not in query_dict and "world_size" not in query_dict + ), f"The url: {url} has node-specific arguments(rank, world_size) already." + if rank != -1: + query_dict["rank"] = str(rank) + if world_size != -1 or world_size_opt is None: + query_dict["world_size"] = str(world_size) + result = result._replace( + query=f"{'&'.join([f'{k}={v}' for k, v in query_dict.items()])}" + ) + url = urlunparse(result) + + if result.scheme not in _rendezvous_handlers: + raise RuntimeError(f"No rendezvous handler for {result.scheme}://") + return _rendezvous_handlers[result.scheme](url, **kwargs) + + +def rendezvous(url: str, rank: int = -1, world_size: int = -1, **kwargs): + if not isinstance(url, (str, bytes)): + raise RuntimeError(f"`url` must be a string. {type(url)}: {url}") + + if not isinstance(rank, numbers.Integral): + raise RuntimeError(f"`rank` must be an integer. {rank}") + + if not isinstance(world_size, numbers.Integral): + raise RuntimeError(f"`world_size` must be an integer. {world_size}") + + return _rendezvous_helper(url, rank, world_size, **kwargs) + + +def _create_store_from_options(backend_options, rank): + store, _, _ = next(_rendezvous_helper(backend_options.init_method, rank, None)) + return store + + +def _rendezvous_error(msg): + return ValueError("Error initializing torch.distributed using " + msg) + + +def _file_rendezvous_handler(url: str, **kwargs): + def _error(msg): + return _rendezvous_error("file:// rendezvous: " + msg) + + result = urlparse(url) + path = result.path + if sys.platform == "win32": + import urllib.request + + full_path = result.netloc + result.path + path = urllib.request.url2pathname(full_path) + if path: + # Normalizing an empty string produces ".", which is not expected. + path = os.path.normpath(path) + + if not path: + raise _error("path missing") + query_dict = _query_to_dict(result.query) + if "rank" not in query_dict: + raise _error("rank parameter missing") + if "world_size" not in query_dict: + raise _error("world size parameter missing") + + rank = int(query_dict["rank"]) + world_size = int(query_dict["world_size"]) + store = FileStore(path, world_size) + yield (store, rank, world_size) + + # If this configuration is invalidated, there is nothing we can do about it + raise RuntimeError("Unable to perform rerendezvous using file:// method") + + +def _torchelastic_use_agent_store() -> bool: + return os.environ.get("TORCHELASTIC_USE_AGENT_STORE", None) == str(True) + + +def _create_c10d_store(hostname, port, rank, world_size, timeout, use_libuv=False) -> Store: + """ + Smartly creates a c10d Store object on ``rank`` based on whether we need to re-use agent store. + + The TCPStore server is assumed to be hosted + on ``hostname:port``. + + If ``torchelastic_use_agent_store()`` is ``True``, then it is assumed that + the agent leader (node rank 0) hosts the TCPStore server (for which the + endpoint is specified by the given ``hostname:port``). Hence + ALL ranks will create and return a TCPStore client (e.g. ``start_daemon=False``). + + If ``torchelastic_use_agent_store()`` is ``False``, then rank 0 will host + the TCPStore (with multi-tenancy) and it is assumed that rank 0's hostname + and port are correctly passed via ``hostname`` and ``port``. All + non-zero ranks will create and return a TCPStore client. + """ + # check if port is uint16_t + if not 0 <= port < 2**16: + raise ValueError(f"port must have value from 0 to 65535 but was {port}.") + + if _torchelastic_use_agent_store(): + attempt = os.environ["TORCHELASTIC_RESTART_COUNT"] + tcp_store = TCPStore(hostname, port, world_size, False, timeout) + return PrefixStore(f"/worker/attempt_{attempt}", tcp_store) + else: + start_daemon = rank == 0 + return TCPStore( + hostname, port, world_size, start_daemon, timeout, multi_tenant=True, use_libuv=use_libuv + ) + + +def _tcp_rendezvous_handler( + url: str, timeout: timedelta = default_pg_timeout, **kwargs +): + def _error(msg): + return _rendezvous_error("tcp:// rendezvous: " + msg) + + result = urlparse(url) + if not result.port: + raise _error("port number missing") + query_dict = _query_to_dict(result.query) + if "rank" not in query_dict: + raise _error("rank parameter missing") + if "world_size" not in query_dict: + raise _error("world size parameter missing") + + rank = int(query_dict["rank"]) + world_size = int(query_dict["world_size"]) + use_libuv = query_dict.get("use_libuv", "0") == "1" + assert result.hostname is not None + + store = _create_c10d_store(result.hostname, result.port, rank, world_size, timeout, use_libuv) + + yield (store, rank, world_size) + + # If this configuration is invalidated, there is nothing we can do about it + raise RuntimeError("Unable to perform re-rendezvous using tcp:// method") + + +def _env_rendezvous_handler( + url: str, timeout: timedelta = default_pg_timeout, **kwargs +): + def _error(msg): + return _rendezvous_error("env:// rendezvous: " + msg) + + def _env_error(var): + return _error(f"environment variable {var} expected, but not set") + + def _get_env_or_raise(env_var: str) -> str: + env_val = os.environ.get(env_var, None) + if not env_val: + raise _env_error(env_var) + else: + return env_val + + result = urlparse(url) + query_dict = _query_to_dict(result.query) + + rank: int + world_size: int + master_port: int + master_addr: str + + if "rank" in query_dict: + rank = int(query_dict["rank"]) + else: + rank = int(_get_env_or_raise("RANK")) + + if "world_size" in query_dict: + world_size = int(query_dict["world_size"]) + else: + world_size = int(_get_env_or_raise("WORLD_SIZE")) + + + master_addr = _get_env_or_raise("MASTER_ADDR") + master_port = int(_get_env_or_raise("MASTER_PORT")) + use_libuv = query_dict.get("use_libuv", os.environ.get("USE_LIBUV", "0")) == "1" + + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout, use_libuv) + + yield (store, rank, world_size) + + # If this configuration is invalidated, there is nothing we can do about it + raise RuntimeError("Unable to perform re-rendezvous using env:// method") + + +register_rendezvous_handler("tcp", _tcp_rendezvous_handler) +register_rendezvous_handler("env", _env_rendezvous_handler) +register_rendezvous_handler("file", _file_rendezvous_handler) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..de8153e19c01f1a5fad4d3af807e1fe9eb346f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py @@ -0,0 +1,249 @@ +from datetime import timedelta +import logging +import os +import threading +import warnings +from typing import Generator, Tuple +from urllib.parse import urlparse + +import torch +import torch.distributed as dist + +logger = logging.getLogger(__name__) + + +_init_counter = 0 +_init_counter_lock = threading.Lock() + +__all__ = ["is_available"] + +def is_available() -> bool: + return hasattr(torch._C, "_rpc_init") + + +if is_available() and not torch._C._rpc_init(): + raise RuntimeError("Failed to initialize torch.distributed.rpc") + + +if is_available(): + from torch._C._distributed_c10d import Store + from torch._C._distributed_rpc import ( + _disable_jit_rref_pickle, + _enable_jit_rref_pickle, + _disable_server_process_global_profiler, + _enable_server_process_global_profiler, + _set_and_start_rpc_agent, + _reset_current_rpc_agent, + _delete_all_user_and_unforked_owner_rrefs, + _destroy_rref_context, + _set_profiler_node_id, + _is_current_rpc_agent_set, + _rref_context_get_debug_info, + _cleanup_python_rpc_handler, + _invoke_rpc_builtin, + _invoke_rpc_python_udf, + _invoke_rpc_torchscript, + _invoke_remote_builtin, + _invoke_remote_python_udf, + _invoke_remote_torchscript, + _set_rpc_timeout, + _get_current_rpc_agent, + get_rpc_timeout, + enable_gil_profiling, + RpcBackendOptions, + _TensorPipeRpcBackendOptionsBase, + RpcAgent, + PyRRef, + TensorPipeAgent, + RemoteProfilerManager, + WorkerInfo, + _DEFAULT_INIT_METHOD, + _DEFAULT_NUM_WORKER_THREADS, + _UNSET_RPC_TIMEOUT, + _DEFAULT_RPC_TIMEOUT_SEC, + ) # noqa: F401 + + from . import api, backend_registry, functions + from .api import * # noqa: F401,F403 + import numbers + + import torch.distributed.autograd as dist_autograd + + from .backend_registry import BackendType + from .options import TensorPipeRpcBackendOptions # noqa: F401 + from .server_process_global_profiler import ( + _server_process_global_profile, + ) + + rendezvous_iterator: Generator[Tuple[Store, int, int], None, None] + + __all__ += ["init_rpc", "BackendType", "TensorPipeRpcBackendOptions"] + __all__ = __all__ + api.__all__ + backend_registry.__all__ # noqa: PLE0605 + + def init_rpc( + name, + backend=None, + rank=-1, + world_size=None, + rpc_backend_options=None, + ): + r""" + Initializes RPC primitives such as the local RPC agent + and distributed autograd, which immediately makes the current + process ready to send and receive RPCs. + + Args: + name (str): a globally unique name of this node. (e.g., + ``Trainer3``, ``ParameterServer2``, ``Master``, ``Worker1``) + Name can only contain number, alphabet, underscore, colon, + and/or dash, and must be shorter than 128 characters. + backend (BackendType, optional): The type of RPC backend + implementation. Supported values is + ``BackendType.TENSORPIPE`` (the default). + See :ref:`rpc-backends` for more information. + rank (int): a globally unique id/rank of this node. + world_size (int): The number of workers in the group. + rpc_backend_options (RpcBackendOptions, optional): The options + passed to the RpcAgent constructor. It must be an agent-specific + subclass of :class:`~torch.distributed.rpc.RpcBackendOptions` + and contains agent-specific initialization configurations. By + default, for all agents, it sets the default timeout to 60 + seconds and performs the rendezvous with an underlying process + group initialized using ``init_method = "env://"``, + meaning that environment variables ``MASTER_ADDR`` and + ``MASTER_PORT`` need to be set properly. See + :ref:`rpc-backends` for more information and find which options + are available. + """ + torch._C._log_api_usage_once("torch.distributed.init_rpc") + if backend is not None and not isinstance( + backend, backend_registry.BackendType + ): + raise TypeError("Argument backend must be a member of BackendType") + + if rpc_backend_options is not None and not isinstance( + rpc_backend_options, RpcBackendOptions + ): + raise TypeError( + "Argument rpc_backend_options must be an instance of RpcBackendOptions" + ) + + # Try to detect the backend from the options + if backend is None and rpc_backend_options is not None: + for candidate_backend in BackendType: + if isinstance( + rpc_backend_options, + type( + backend_registry.construct_rpc_backend_options( + candidate_backend + ) + ), + ): + backend = candidate_backend + break + else: + raise TypeError( + f"Could not infer backend for options {rpc_backend_options}" + ) + # Ignore type error because mypy doesn't handle dynamically generated type objects (#4865) + if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined] + logger.warning( + "RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined] + "corresponding to %(backend)s, hence that backend will be used " + "instead of the default BackendType.TENSORPIPE. To silence this " + "warning pass `backend=%(backend)s` explicitly.", + {'backend': backend} + ) + + if backend is None: + backend = BackendType.TENSORPIPE # type: ignore[attr-defined] + + if rpc_backend_options is None: + # default construct a set of RPC backend options. + rpc_backend_options = backend_registry.construct_rpc_backend_options( + backend + ) + + # Create store, performs rendezvous for static RPC group. + if not world_size: + # If world_size is not set in construction and also not set in environment variables + # The store will be created for the dynamic group setting + store = dist._create_store_from_options(rpc_backend_options, rank) + else: + # This rendezvous state sometimes is destroyed before all processes + # finishing handshaking. To avoid that issue, we make it global to + # keep it alive. + global rendezvous_iterator + rendezvous_iterator = dist.rendezvous( + rpc_backend_options.init_method, rank=rank, world_size=world_size + ) + store, _, _ = next(rendezvous_iterator) + # Use same timeout as RPC. + store.set_timeout(timedelta(seconds=rpc_backend_options.rpc_timeout)) + + # Use a PrefixStore to distinguish multiple invocations. + with _init_counter_lock: + global _init_counter + store = dist.PrefixStore(str(f"rpc_prefix_{_init_counter}"), store) + _init_counter += 1 + + # Initialize autograd before RPC since _init_rpc_backend guarantees all + # processes sync via the store. If we initialize autograd after RPC, + # there could be a race where some nodes might have initialized autograd + # and others might not have. As a result, a node calling + # torch.distributed.autograd.backward() would run into errors since + # other nodes might not have been initialized. + dist_autograd._init(rank) + + _set_profiler_node_id(rank) + # Initialize RPC. + _init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options) + + def _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options): + type_mapping = { + backend: backend_registry.BackendType, + store: dist.Store, + name: str, + rank: numbers.Integral, + # world_size can be None for a dynamic group + world_size: (numbers.Integral, type(None)), + rpc_backend_options: RpcBackendOptions, + } + for arg, arg_type in type_mapping.items(): + if not isinstance(arg, arg_type): # type: ignore[arg-type] + raise RuntimeError( + f"Argument {arg} must be of type {arg_type} but got type {type(arg)}" + ) + + def _init_rpc_backend( + backend=BackendType.TENSORPIPE, # type: ignore[attr-defined] + store=None, + name=None, + rank=-1, + world_size=None, + rpc_backend_options=None, + ): + + _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options) + + if _is_current_rpc_agent_set(): + raise RuntimeError("RPC is already initialized") + + # Initialize RPC. + rpc_agent = backend_registry.init_backend( + backend, + store=store, + name=name, + rank=rank, + world_size=world_size, + rpc_backend_options=rpc_backend_options, + ) + + api._init_rpc_states(rpc_agent) + + @api._require_initialized + def _get_debug_info(): + info = _rref_context_get_debug_info() + info.update(api._get_current_rpc_agent().get_debug_info()) + info.update(dist_autograd._get_debug_info()) + return info diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5755b99c7571bc4decbcd8671141dc65fddee1b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py @@ -0,0 +1,18 @@ + +import torch + + +def is_available(): + return hasattr(torch._C, "_faulty_agent_init") + + +if is_available() and not torch._C._faulty_agent_init(): + raise RuntimeError("Failed to initialize torch.distributed.rpc._testing") + +if is_available(): + # Registers FAULTY_TENSORPIPE RPC backend. + from . import faulty_agent_backend_registry + from torch._C._distributed_rpc_testing import ( + FaultyTensorPipeRpcBackendOptions, + FaultyTensorPipeAgent, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5f3a428b247f3255031033361065c7f09bc3094 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db31e3d2f166771cb79b61a98de3fa81e36c2fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..b02a6a2ff8ac30d198c7846f772579dd496ee289 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import torch.distributed as dist +import torch.distributed.rpc as rpc + +def _faulty_tensorpipe_construct_rpc_backend_options_handler( + rpc_timeout, + init_method, + num_worker_threads, + messages_to_fail, + messages_to_delay, + num_fail_sends, + **kwargs +): + from . import FaultyTensorPipeRpcBackendOptions + + return FaultyTensorPipeRpcBackendOptions( + num_worker_threads=num_worker_threads, + rpc_timeout=rpc_timeout, + init_method=init_method, + messages_to_fail=messages_to_fail, + messages_to_delay=messages_to_delay, + num_fail_sends=num_fail_sends, + ) + + +def _faulty_tensorpipe_init_backend_handler( + store, name, rank, world_size, rpc_backend_options +): + from . import FaultyTensorPipeAgent + from . import FaultyTensorPipeRpcBackendOptions + from torch.distributed.rpc import api + + if not isinstance(store, dist.Store): + raise TypeError(f"`store` must be a c10d::Store. {store}") + + if not isinstance( + rpc_backend_options, FaultyTensorPipeRpcBackendOptions + ): + raise TypeError( + f"`rpc_backend_options` must be a `FaultyTensorPipeRpcBackendOptions`. {rpc_backend_options}" + ) + + agent = FaultyTensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + {}, # reverse_device_map + [], # devices + ) + api._init_rpc_states(agent) + + return agent + + +rpc.backend_registry.register_backend( + "FAULTY_TENSORPIPE", + _faulty_tensorpipe_construct_rpc_backend_options_handler, + _faulty_tensorpipe_init_backend_handler, +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a532897969d40d8ac2e0306a3bb172108c6c5b42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py @@ -0,0 +1,37 @@ +from contextlib import contextmanager +from typing import cast +import logging +from . import api +from . import TensorPipeAgent + +logger = logging.getLogger(__name__) + +@contextmanager +def _group_membership_management(store, name, is_join): + token_key = "RpcGroupManagementToken" + join_or_leave = "join" if is_join else "leave" + my_token = f"Token_for_{name}_{join_or_leave}" + while True: + # Retrieve token from store to signal start of rank join/leave critical section + returned = store.compare_set(token_key, "", my_token).decode() + if returned == my_token: + # Yield to the function this context manager wraps + yield + # Finished, now exit and release token + # Update from store to signal end of rank join/leave critical section + store.set(token_key, "") + # Other will wait for this token to be set before they execute + store.set(my_token, "Done") + break + else: + # Store will wait for the token to be released + try: + store.wait([returned]) + except RuntimeError: + logger.error("Group membership token %s timed out waiting for %s to be released.", my_token, returned) + raise + +def _update_group_membership(worker_info, my_devices, reverse_device_map, is_join): + agent = cast(TensorPipeAgent, api._get_current_rpc_agent()) + ret = agent._update_group_membership(worker_info, my_devices, reverse_device_map, is_join) + return ret diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/api.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/api.py new file mode 100644 index 0000000000000000000000000000000000000000..0f317829b207cf8094ef66090bb5366022e2491e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/api.py @@ -0,0 +1,947 @@ +__all__ = ["shutdown", "get_worker_info", "remote", "rpc_sync", + "rpc_async", "RRef", "AllGatherStates", "method_factory", "new_method"] + +import collections +import contextlib +import functools +import inspect +import logging +import threading +from typing import Dict, Generic, TypeVar, Set, Any, TYPE_CHECKING + +import torch +from torch.futures import Future + +from torch._C._distributed_rpc import ( + PyRRef, + RemoteProfilerManager, + WorkerInfo, + TensorPipeAgent, + get_rpc_timeout, + _cleanup_python_rpc_handler, + _delete_all_user_and_unforked_owner_rrefs, + _destroy_rref_context, + _get_current_rpc_agent, + _invoke_remote_builtin, + _invoke_remote_python_udf, + _invoke_remote_torchscript, + _invoke_rpc_builtin, + _invoke_rpc_python_udf, + _invoke_rpc_torchscript, + _is_current_rpc_agent_set, + _reset_current_rpc_agent, + _set_and_start_rpc_agent, +) + +from .internal import ( + PythonUDF, + RPCExecMode, + _internal_rpc_pickler, + _build_rpc_profiling_key, +) + +from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT + +from ._utils import _group_membership_management, _update_group_membership + +logger = logging.getLogger(__name__) + +# NB: Ignoring RRef leaks during shutdown. Without this, applications have to +# make sure there is no references to any RRef in the application code and +# Python GC has done its job to delete those RRefs. This is could result in bad +# debugging experiences especially when for large applications. Therefore, by +# default, we are going to ignore RRef leaks during shutdown. This is usually +# fine as shutdown means applications have done training and no longer care +# about states. +# +# To enable RRef leak checking, set this _ignore_rref_leak to False +_ignore_rref_leak = True +_default_pickler = _internal_rpc_pickler + +@contextlib.contextmanager +def _use_rpc_pickler(rpc_pickler): + r""" + rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler + """ + global _default_pickler + _default_pickler = rpc_pickler + try: + yield + finally: + _default_pickler = _internal_rpc_pickler + + +def _require_initialized(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not _is_current_rpc_agent_set(): + raise RuntimeError( + "RPC has not been initialized. Call " + "torch.distributed.rpc.init_rpc first." + ) + return func(*args, **kwargs) + + return wrapper + + +class AllGatherStates: + def __init__(self): + # Each `gathered_objects` is an empty dict at beginning. + # The leader worker is elected as the first worker in a sorted worker + # name list. Whenever there is a worker entering `_all_gather()`, it + # runs `_gather_to_leader()` on the leader to add its own name and + # data obj to this dict. The leader also adds itself's name to the dict + # on calling `_all_gather()`. + # Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader + # will broadcast the gathered dict to all follower workers and set their + # `gathered_objects` field and the `proceed_signal` field. + self.gathered_objects = {} + # All workers wait on this signal until it receives all gathered + # objects. + self.proceed_signal = threading.Event() + + +# States used by `def _all_gather()`. +# `_ALL_WORKER_NAMES` is initialized on initializing RPC layer. +_ALL_WORKER_NAMES: Set[Any] = set() +_all_gather_dict_lock = threading.RLock() +_all_gather_sequence_id: Dict[str, int] = {} +_all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict(AllGatherStates) + + +def _init_rpc_states(agent): + worker_infos = agent.get_worker_infos() + global _ALL_WORKER_NAMES + _ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos} + + # NB: backend implementation might have already set the rpc_agent. + if not _is_current_rpc_agent_set(): + _set_and_start_rpc_agent(agent) + + +def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None): + with _all_gather_dict_lock: + if not worker_names: + worker_names = _ALL_WORKER_NAMES + assert ( + worker_name in worker_names + ), f"{worker_name} is not expected by leader." + states = _all_gather_sequence_id_to_states[sequence_id] + assert ( + worker_name not in states.gathered_objects + ), f"{worker_name} reported intent sequence id {sequence_id} twice. " + states.gathered_objects[worker_name] = obj + if worker_names == set(states.gathered_objects.keys()): + states.proceed_signal.set() + + +def _broadcast_to_followers(sequence_id, objects_map): + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states[sequence_id] + + assert ( + not states.proceed_signal.is_set() + ), f"Termination signal sequence id {sequence_id} got set twice." + states.gathered_objects = objects_map + states.proceed_signal.set() + +_thread_local_var = threading.local() + + +@contextlib.contextmanager +def _wait_all(): + r""" + A context manager that collects all futures returned by ``rpc_async`` and + waits them on the context manager's exit; relieving the user of needing + to explicitly call wait. + + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> with rpc._wait_all(): + >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + >>> #fut_1 and fut_2 are waited on + """ + _thread_local_var.future_list = [] + try: + yield + finally: + try: + torch.futures.wait_all(_thread_local_var.future_list) + finally: + del _thread_local_var.future_list + + +@_require_initialized +def _all_gather(obj, worker_names=None, timeout: float = UNSET_RPC_TIMEOUT): + r""" + This is similar to torch.distributed.all_gather(), but is using RPC. It + picks the worker with the smallest name (alphabetic order) as the leader. + Then all followers send their data ``obj`` to the leader. After the leader + has received all, it will broadcast the results back to all followers. This + function blocks until all workers have received the gathered results. + """ + if not worker_names: + assert ( + _ALL_WORKER_NAMES is not None + ), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`." + worker_names = _ALL_WORKER_NAMES + leader_name = min(worker_names) + + self_name = _get_current_rpc_agent().get_worker_info().name + + with _all_gather_dict_lock: + concat_names = "".join(sorted(worker_names)) + sequence_num = _all_gather_sequence_id.get(concat_names, 0) + _all_gather_sequence_id[concat_names] = sequence_num + 1 + sequence_id = concat_names + str(sequence_num) + + is_leader = leader_name == self_name + + if timeout == UNSET_RPC_TIMEOUT: + # Timeout is specified by agent for RPC calls + rpc_timeout = get_rpc_timeout() + # No timeout for signal + signal_timeout = None + elif timeout == DEFAULT_SHUTDOWN_TIMEOUT: + # No timeout for RPC + rpc_timeout = timeout + # No timeout for signal + signal_timeout = None + else: + # Signal and RPC timeout use the same timeout + signal_timeout = rpc_timeout = timeout + + # Phase 1: Followers send it's object to the leader + if is_leader: + _gather_to_leader(sequence_id, self_name, obj, worker_names) + else: + rpc_sync( + leader_name, + _gather_to_leader, + args=(sequence_id, self_name, obj, worker_names), + timeout=rpc_timeout, + ) + + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states[sequence_id] + + # Timeout is either set by function parameter or None (which is indefinite) + states.proceed_signal.wait(timeout=signal_timeout) + + # Phase 2: Leader broadcast gathered results to all followers + # Leader's signal is the first to be unblocked, after receiving all + # followers' data objects. + if is_leader: + worker_name_to_response_future_dict = {} + for follower_name in worker_names - {leader_name}: + fut = rpc_async( + follower_name, + _broadcast_to_followers, + args=(sequence_id, states.gathered_objects), + timeout=rpc_timeout + ) + worker_name_to_response_future_dict[follower_name] = fut + + errors = [] + for follower_name, fut in worker_name_to_response_future_dict.items(): + try: + fut.wait() + except RuntimeError as ex: + errors.append((follower_name, ex)) + + if errors: + raise RuntimeError( + f"Followers {[e[0] for e in errors]} timed out in _all_gather " + f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}" + ) + + # Clean up for the states using the sequence_id + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states.pop(sequence_id) + return states.gathered_objects + + +@_require_initialized +def _barrier(worker_names): + r""" + Synchronizes local and remote RPC processes. + + This will block until all local and remote RPC processes specified under worker_names + reach this method to wait for all outstanding work to complete. + + Args: + worker_names (List[str]): The set of workers to synchronize. + + """ + try: + _all_gather(None, set(worker_names)) + except RuntimeError as ex: + logger.error( + "Failed to complete barrier, got error %s", ex + ) + + +@_require_initialized +def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT): + r""" + Block until all local and remote RPC processes reach this method and wait + for all outstanding work to complete. Every RPC process must call this + method before exit to perform a graceful shutdown. This should be used to + terminate the RPC framework, and there is no guarantee that the RPC + framework will work after this method returns. + """ + try: + _all_gather(None, timeout=timeout) + except RuntimeError as ex: + logger.error( + "Failed to respond to 'Shutdown Proceed' in time, got error %s", ex + ) + raise ex + + +@_require_initialized +def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT): + r""" + Perform a shutdown of the RPC agent, and then destroy the RPC agent. This + stops the local agent from accepting outstanding requests, and shuts + down the RPC framework by terminating all RPC threads. If ``graceful=True``, + this will block until all local and remote RPC processes reach this method + and wait for all outstanding work to complete. Otherwise, if + ``graceful=False``, this is a local shutdown, and it does not wait for other + RPC processes to reach this method. + + .. warning:: + For :class:`~torch.futures.Future` objects returned by + :meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not + be called after ``shutdown()``. + + Args: + graceful (bool): Whether to do a graceful shutdown or not. If True, + this will 1) wait until there is no pending system + messages for ``UserRRefs`` and delete them; 2) block + until all local and remote RPC processes have reached + this method and wait for all outstanding work to + complete. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> # do some work + >>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1)) + >>> # ready to shutdown + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> # wait for worker 0 to finish work, and then shutdown. + >>> rpc.shutdown() + """ + if graceful: + try: + agent = _get_current_rpc_agent() + if not isinstance(agent, TensorPipeAgent) or agent.is_static_group: + _wait_all_workers(timeout) + _delete_all_user_and_unforked_owner_rrefs() + agent.join(shutdown=True, timeout=timeout) + else: + # This is a dynamic group so we need to grab the token for the operation + my_worker_info = agent.get_worker_info() + my_name = my_worker_info.name + with _group_membership_management(agent.store, my_name, False): + all_worker_infos = agent.get_worker_infos() + for worker in all_worker_infos: + if worker.name != my_name: + rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False)) + agent.join(shutdown=True, timeout=timeout) + finally: + # In case of errors, continue to complete the local shutdown. + _finalize_shutdown() + else: + _finalize_shutdown() + + +def _finalize_shutdown(): + try: + # This raises a `TORCH_CHECK()` exception on RRef leak detected. + _destroy_rref_context(_ignore_rref_leak) + finally: + _get_current_rpc_agent().shutdown() + # clean up python rpc handler in shutdown(), see comments in + # PythonRpcHandler::cleanup(), call it in python API because the + # cleanup() function has python dependency, it assumes python + # interpreter exists. + # No matter if RRef leak exception is raised, this clean-up code + # must run to avoid destruction segfault in Python 3.5. + # + # future.wait() should not be called after shutdown(). + # pythonRpcHandler is cleaned up in shutdown(), after + # shutdown(), python objects returned from rpc python call can not be + # resolved. + _cleanup_python_rpc_handler() + _reset_current_rpc_agent() + + +@_require_initialized +def get_worker_info(worker_name=None): + r""" + Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name. + Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an + expensive string on every invocation. + + Args: + worker_name (str): the string name of a worker. If ``None``, return the + the id of the current worker. (default ``None``) + + Returns: + :class:`~torch.distributed.rpc.WorkerInfo` instance for the given + ``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the + current worker if ``worker_name`` is ``None``. + """ + if worker_name is not None: + return _get_current_rpc_agent().get_worker_info(worker_name) + else: + return _get_current_rpc_agent().get_worker_info() + + +def _to_worker_info(to): + if isinstance(to, WorkerInfo): + return to + elif isinstance(to, (str, int)): + return get_worker_info(to) + else: + raise ValueError(f"Cannot get WorkerInfo from name {to}") + + +def _rref_typeof_on_owner(rref, blocking: bool = True): + rref_type = type(rref.local_value()) + if blocking: + return rref_type + else: + # Wrap result into a completed Future. This is so that if blocking=`False` + # is specified, we return a future regardless of if this call is on user + # or owner. + future = Future[type]() + future.set_result(rref_type) + return future + + +def _rref_typeof_on_user(rref, timeout: float = UNSET_RPC_TIMEOUT, blocking: bool = True): + fut = rpc_async( + rref.owner(), + _rref_typeof_on_owner, + args=(rref,), + timeout=timeout + ) + if blocking: + return fut.wait() + else: + return fut + + +T = TypeVar("T") +GenericWithOneTypeVar = Generic[T] + + +if TYPE_CHECKING: + class RRef(PyRRef[T], Generic[T]): + pass +else: + try: + # Combine the implementation class and the type class. + class RRef(PyRRef, Generic[T]): + pass + except TypeError: + # TypeError: metaclass conflict: the metaclass of a derived class + # must be a (non-strict) subclass of the metaclasses of all its bases + # Mypy doesn't understand __class__ (mypy bug #4177) + class RRefMeta(PyRRef.__class__, GenericWithOneTypeVar.__class__): # type: ignore[name-defined, misc, valid-type] + pass + + # Combine the implementation class and the type class. + # Types for classes expecting a certain generic parameter (mypy bug #7791) + class RRef(PyRRef, GenericWithOneTypeVar, metaclass=RRefMeta): # type: ignore[misc, no-redef, valid-type] + pass + + +# Install docstrings from `PyRRef` to `RRef`. +# +# This is for the fact that pybind11 generates the parameter +# `self` as type `rpc.PyRRef`, so a `:inherited-members:` +# under `.. autoclass:: RRef` does not work. +# we have to do the following process to replace `rpc.PyRRef` with `rpc.RRef`. +# +def method_factory(method_name, docstring): + def method(self, *args, **kwargs): + return getattr(super(RRef, self), method_name)(*args, **kwargs) + + if method.__doc__: + method.__doc__ = docstring + return method + + +for method_name, method in inspect.getmembers(PyRRef): + # Ignore magic methods, except "__str__". + if method_name.startswith("_") and method_name != "__str__": + continue + + # Get pybind11 generated docstring. + # It's like, + """ + to_here(self: torch.distributed.rpc.PyRRef, timeout: float=-1.0) -> object + + Blocking call that copies the value of the RRef from the owner + to the local node and returns it. If the current node is the + owner, returns a reference to the local value. + """ + docstring = getattr(method, "__doc__", None) + assert docstring is not None, "RRef user-facing methods should all have docstrings." + + # Do surgery on pybind11 generated docstrings. + docstring = docstring.replace("torch.distributed.rpc.PyRRef", "torch.distributed.rpc.RRef") + + # Attach user-facing RRef method with modified docstring. + new_method = method_factory(method_name, docstring) + setattr(RRef, method_name, new_method) + + +@_require_initialized +def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT): + r""" + Make a remote call to run ``func`` on worker ``to`` and return an + :class:`~torch.distributed.rpc.RRef` to the result value immediately. + Worker ``to`` will be the owner of the returned + :class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is + a user. The owner manages the global reference count of its + :class:`~torch.distributed.rpc.RRef`, and the owner + :class:`~torch.distributed.rpc.RRef` is only destructed when globally there + are no living references to it. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + + timeout (float, optional): timeout in seconds for this remote call. If the + creation of this + :class:`~torch.distributed.rpc.RRef` on worker + ``to`` is not successfully processed on this + worker within this timeout, then the next time + there is an attempt to use the RRef (such as + ``to_here()``), a timeout will be raised + indicating this failure. A value of 0 indicates + an infinite timeout, i.e. a timeout error will + never be raised. If not provided, the default + value set during initialization or with + ``_set_rpc_timeout`` is used. + + Returns: + A user :class:`~torch.distributed.rpc.RRef` instance to the result + value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here` + to retrieve the result value locally. + + .. warning :: + The ``remote`` API does not copy storages of argument tensors until + sending them over the wire, which could be done by a different thread + depending on the RPC backend type. The caller should make sure that the + contents of those tensors stay intact until the returned RRef is + confirmed by the owner, which can be checked using the + :meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API. + + .. warning :: + Errors such as timeouts for the ``remote`` API are handled on a + best-effort basis. This means that when remote calls initiated by + ``remote`` fail, such as with a timeout error, we take a best-effort + approach to error handling. This means that errors are handled and set + on the resulting RRef on an asynchronous basis. If the RRef has not been + used by the application before this handling (such as ``to_here`` or + fork call), then future uses of the ``RRef`` will appropriately raise + errors. However, it is possible that the user application will use the + ``RRef`` before the errors are handled. In this case, errors may not be + raised as they have not yet been handled. + + Example:: + + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) + >>> x = rref1.to_here() + rref2.to_here() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> rref.to_here() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + torch._C._log_api_usage_once("torch.distributed.rpc_remote") + qualified_name = torch.jit._builtins._find_builtin(func) + dst_worker_info = _to_worker_info(to) + should_profile = _get_should_profile() + + ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, RPCExecMode.REMOTE, dst_worker_info) + + with ctx_manager as rf: + args = args if args else () + kwargs = kwargs if kwargs else {} + + is_async_exec = hasattr(func, "_wrapped_async_rpc_function") + + if is_async_exec: + wrapped = func._wrapped_async_rpc_function + if isinstance(wrapped, torch.jit.ScriptFunction): + func = wrapped + + if qualified_name is not None: + rref = _invoke_remote_builtin(dst_worker_info, qualified_name, timeout, *args, **kwargs) + elif isinstance(func, torch.jit.ScriptFunction): + rref = _invoke_remote_torchscript( + dst_worker_info.name, + torch._jit_internal._qualified_name(func), + timeout, + is_async_exec, + *args, + **kwargs, + ) + else: + (pickled_python_udf, tensors) = _default_pickler.serialize( + PythonUDF(func, args, kwargs) + ) + rref = _invoke_remote_python_udf( + dst_worker_info, + pickled_python_udf, + tensors, + timeout, + is_async_exec + ) + # attach profiling information + if should_profile: + assert torch.autograd._profiler_enabled() + assert rf is not None + fut = rf._call_end_callbacks_on_future(rref._get_future()) + rref._set_profiling_future(fut) + + return rref + + +def _invoke_rpc(to, func, rpc_type, args=None, kwargs=None, rpc_timeout: float = UNSET_RPC_TIMEOUT): + if not callable(func): + raise TypeError("function should be callable.") + + qualified_name = torch.jit._builtins._find_builtin(func) + dst_worker_info = _to_worker_info(to) + + should_profile = _get_should_profile() + + ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info) + + with ctx_manager as rf: + args = args if args else () + kwargs = kwargs if kwargs else {} + + is_async_exec = hasattr(func, "_wrapped_async_rpc_function") + + if is_async_exec: + wrapped = func._wrapped_async_rpc_function + if isinstance(wrapped, torch.jit.ScriptFunction): + func = wrapped + + if qualified_name is not None: + fut = _invoke_rpc_builtin( + dst_worker_info, + qualified_name, + rpc_timeout, + *args, + **kwargs + ) + elif isinstance(func, torch.jit.ScriptFunction): + fut = _invoke_rpc_torchscript( + dst_worker_info.name, + torch._jit_internal._qualified_name(func), + args, + kwargs, + rpc_timeout, + is_async_exec + ) + else: + (pickled_python_udf, tensors) = _default_pickler.serialize( + PythonUDF(func, args, kwargs) + ) + fut = _invoke_rpc_python_udf( + dst_worker_info, + pickled_python_udf, + tensors, + rpc_timeout, + is_async_exec + ) + if should_profile: + assert torch.autograd._profiler_enabled() + assert rf is not None + # Schedule profiling callbacks to run when the future completes. + # This returns a future that is completed when the original future + # completes and the profiling callbacks have been completed as well, + # to guarantee that fut.wait() completes the profiling. This new + # future will contain the same value as the original future. + fut = rf._call_end_callbacks_on_future(fut) + return fut + + +@_require_initialized +def rpc_sync(to, func, args=None, kwargs=None, timeout: float = UNSET_RPC_TIMEOUT): + r""" + Make a blocking RPC call to run function ``func`` on worker ``to``. RPC + messages are sent and received in parallel to execution of Python code. This + method is thread-safe. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + timeout (float, optional): timeout in seconds to use for this RPC. If + the RPC does not complete in this amount of + time, an exception indicating it has + timed out will be raised. A value of 0 + indicates an infinite timeout, i.e. a timeout + error will never be raised. If not provided, + the default value set during initialization + or with ``_set_rpc_timeout`` is used. + + Returns: + Returns the result of running ``func`` with ``args`` and ``kwargs``. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + """ + torch._C._log_api_usage_once("torch.distributed.rpc_sync") + fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout) + return fut.wait() + + +@_require_initialized +def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT): + r""" + Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC + messages are sent and received in parallel to execution of Python code. This + method is thread-safe. This method will immediately return a + :class:`~torch.futures.Future` that can be awaited on. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + timeout (float, optional): timeout in seconds to use for this RPC. If + the RPC does not complete in this amount of + time, an exception indicating it has + timed out will be raised. A value of 0 + indicates an infinite timeout, i.e. a timeout + error will never be raised. If not provided, + the default value set during initialization + or with ``_set_rpc_timeout`` is used. + + + Returns: + Returns a :class:`~torch.futures.Future` object that can be waited + on. When completed, the return value of ``func`` on ``args`` and + ``kwargs`` can be retrieved from the :class:`~torch.futures.Future` + object. + + .. warning :: + Using GPU tensors as arguments or return values of ``func`` is not + supported since we don't support sending GPU tensors over the wire. You + need to explicitly copy GPU tensors to CPU before using them as + arguments or return values of ``func``. + + .. warning :: + The ``rpc_async`` API does not copy storages of argument tensors until + sending them over the wire, which could be done by a different thread + depending on the RPC backend type. The caller should make sure that the + contents of those tensors stay intact until the returned + :class:`~torch.futures.Future` completes. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3)) + >>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2)) + >>> result = fut1.wait() + fut2.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> ret = fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + torch._C._log_api_usage_once("torch.distributed.rpc_async") + fut = _invoke_rpc(to, func, RPCExecMode.ASYNC, args, kwargs, timeout) + if hasattr(_thread_local_var, "future_list"): + _thread_local_var.future_list.append(fut) + return fut + + +def _get_should_profile(): + # Legacy profiler should be enabled. RPC profiling is not supported with + # Kineto profiler. + ActiveProfilerType = torch._C._profiler.ActiveProfilerType + return ( + torch.autograd._profiler_enabled() and + torch._C._autograd._profiler_type() == ActiveProfilerType.LEGACY # type: ignore[attr-defined] + ) + + +def _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info): + ctx_manager = contextlib.nullcontext() + + if should_profile: + # Create appropriate string representation based on type of func + # (builtin, script, python) + if qualified_name is None: + func_name = ( + torch._jit_internal._qualified_name(func) + if isinstance(func, torch.jit.ScriptFunction) + else func.__qualname__ + ) + else: + func_name = qualified_name + # Build RPC profiling key. + rpc_profiling_key = _build_rpc_profiling_key( + rpc_type, + func_name, + get_worker_info().name, + dst_worker_info.name, + ) + RemoteProfilerManager.set_current_profiling_key(rpc_profiling_key) + # Mypy doesn't support re-def of a variable not in the same block (#1174) + ctx_manager = torch.autograd.profiler.record_function(rpc_profiling_key) # type: ignore[assignment] + + return ctx_manager diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..d09ec399e390e1ae63b7c1bec04d26e5286c6bd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py @@ -0,0 +1,395 @@ +__all__ = ["init_backend", "backend_registered", "construct_rpc_backend_options", "register_backend", "BackendType", "BackendValue"] + +import collections +import enum +from typing import cast, Dict, List, Set, Tuple + +import torch +import torch.distributed as dist +from ._utils import _group_membership_management, _update_group_membership + +from . import api +from . import constants as rpc_constants + +__all__ = ["backend_registered", "register_backend", "construct_rpc_backend_options", "init_backend", + "BackendValue", "BackendType"] + +BackendValue = collections.namedtuple( + "BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"] +) + + +def _backend_type_repr(self): + return "BackendType." + self.name + + +_backend_type_doc = """ + An enum class of available backends. + + PyTorch ships with a builtin ``BackendType.TENSORPIPE`` backend. + Additional ones can be registered using the + :func:`~torch.distributed.rpc.backend_registry.register_backend` function. +""" + +# Create an enum type, `BackendType`, with empty members. +# Can't handle Function Enum API (mypy bug #9079) +BackendType = enum.Enum(value="BackendType", names=dict()) # type: ignore[misc] +# Unable to assign a function a method (mypy bug #2427) +BackendType.__repr__ = _backend_type_repr # type: ignore[assignment] + +if BackendType.__doc__: + BackendType.__doc__ = _backend_type_doc + +def backend_registered(backend_name): + """ + Checks if backend_name is registered as an RPC backend. + + Args: + backend_name (str): string to identify the RPC backend. + Returns: + True if the backend has been registered with ``register_backend``, else + False. + """ + return backend_name in BackendType.__members__.keys() + + +def register_backend( + backend_name, construct_rpc_backend_options_handler, init_backend_handler +): + """Registers a new RPC backend. + + Args: + backend_name (str): backend string to identify the handler. + construct_rpc_backend_options_handler (function): + Handler that is invoked when + rpc_backend.construct_rpc_backend_options(**dict) is called. + init_backend_handler (function): Handler that is invoked when the + `_init_rpc_backend()` function is called with a backend. + This returns the agent. + """ + global BackendType + if backend_registered(backend_name): + raise RuntimeError(f"RPC backend {backend_name}: already registered") + # Create a new enum type, `BackendType`, with extended members. + existing_enum_dict = {member.name: member.value for member in BackendType} + extended_enum_dict = dict( + { + backend_name: BackendValue( + construct_rpc_backend_options_handler=construct_rpc_backend_options_handler, + init_backend_handler=init_backend_handler, + ) + }, + **existing_enum_dict + ) + # Can't handle Function Enum API (mypy bug #9079) + BackendType = enum.Enum(value="BackendType", names=extended_enum_dict) # type: ignore[misc] + # Unable to assign a function a method (mypy bug #2427) + BackendType.__repr__ = _backend_type_repr # type: ignore[assignment] + if BackendType.__doc__: + BackendType.__doc__ = _backend_type_doc + return BackendType[backend_name] + +def construct_rpc_backend_options( + backend, + rpc_timeout=rpc_constants.DEFAULT_RPC_TIMEOUT_SEC, + init_method=rpc_constants.DEFAULT_INIT_METHOD, + **kwargs +): + + return backend.value.construct_rpc_backend_options_handler( + rpc_timeout, init_method, **kwargs + ) + +def init_backend(backend, *args, **kwargs): + return backend.value.init_backend_handler(*args, **kwargs) + +def _init_process_group(store, rank, world_size): + # Initialize ProcessGroup. + process_group_timeout = rpc_constants.DEFAULT_PROCESS_GROUP_TIMEOUT + + # We're using a bunch of private APIs here since `new_group` requires the + # default group to be initialized. + group = dist.ProcessGroupGloo(store, rank, world_size, process_group_timeout) + + assert group is not None, "Failed to initialize default ProcessGroup." + + if (rank != -1) and (rank != group.rank()): + raise RuntimeError( + f"rank argument {rank} doesn't match pg rank {group.rank()}" + ) + if (world_size != -1) and (world_size != group.size()): + raise RuntimeError( + f"world_size argument {world_size} doesn't match pg size {group.size()}" + ) + return group + +def _tensorpipe_construct_rpc_backend_options_handler( + rpc_timeout, + init_method, + num_worker_threads=rpc_constants.DEFAULT_NUM_WORKER_THREADS, + _transports=None, + _channels=None, + **kwargs +): + from . import TensorPipeRpcBackendOptions + + return TensorPipeRpcBackendOptions( + rpc_timeout=rpc_timeout, + init_method=init_method, + num_worker_threads=num_worker_threads, + _transports=_transports, + _channels=_channels, + ) + + +def _tensorpipe_validate_devices(devices, device_count): + return all( + d.type == "cpu" or (d.type == "cuda" and 0 <= d.index < device_count) + for d in devices + ) + + +# detect if any worker has invalid device_map configurations, and return +# reverse device maps +def _tensorpipe_exchange_and_check_all_device_maps( + my_name, my_device_count, my_device_maps, my_devices, group +): + gathered: List[Tuple[ + str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device] + ]] = [("", 0, {}, []) for _ in range(group.size())] + dist.all_gather_object( + gathered, (my_name, my_device_count, my_device_maps, my_devices), group + ) + all_names = [name for name, _, _, _ in gathered] + all_device_counts = {name: count for name, count, _, _ in gathered} + all_device_maps = {name: map_ for name, _, map_, _ in gathered} + all_devices = {name: devices for name, _, _, devices in gathered} + + _validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices) + + # passed all checked, construct reverse mapping and get list of devices handled by this agent + reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps) + my_devices = _create_device_list(my_devices, my_device_maps, reverse_device_maps) + return reverse_device_maps, my_devices + +def _validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices, is_static_group=True): + for node in all_names: + devices = all_devices[node] + if len(set(devices)) != len(devices): + raise ValueError( + f"Node {node} has duplicated devices\n" + f"devices = {devices}" + ) + if not _tensorpipe_validate_devices(devices, all_device_counts[node]): + raise ValueError( + f"Node {node} has devices with invalid indices\n" + f"devices = {devices}\n" + f"device count = {all_device_counts[node]}" + ) + + for source_node in all_names: + # For dynamic group (non-static) do not check the target node name since it may not have joined yet + if is_static_group and not set(all_device_maps[source_node].keys()).issubset(all_names): + raise ValueError( + f"Node {source_node} has invalid target node names in its device maps\n" + f"device maps = {all_device_maps[source_node].keys()}\n" + f"node names = {all_names}" + ) + for target_node, map_ in all_device_maps[source_node].items(): + if len(set(map_.values())) != len(map_): + raise ValueError( + f"Node {source_node} has duplicated target devices " + f"in its device map for {target_node}\n" + f"device map = {map_}" + ) + if all_devices[source_node]: + if not set(map_.keys()).issubset(all_devices[source_node]): + raise ValueError( + f"Node {source_node} has unexpected source devices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"devices = {all_devices[source_node]}" + ) + elif not _tensorpipe_validate_devices( + map_.keys(), all_device_counts[source_node] + ): + raise ValueError( + f"Node {source_node} has source devices with invalid indices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"device count = {all_device_counts[source_node]}" + ) + if all_devices.get(target_node, []): + if not set(map_.values()).issubset(all_devices[target_node]): + raise ValueError( + f"Node {source_node} has unexpected target devices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"devices = {all_devices[target_node]}" + ) + elif target_node in all_device_counts and not _tensorpipe_validate_devices( + map_.values(), all_device_counts[target_node] + ): + raise ValueError( + f"Node {source_node} has target devices with invalid indices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"device count = {all_device_counts[target_node]}" + ) + +def _create_device_list(my_devices, my_device_maps, reverse_device_maps): + if not my_devices: + devices_set: Set[torch.device] = set() + for map_ in my_device_maps.values(): + devices_set.update(map_.keys()) + for map_ in reverse_device_maps.values(): + devices_set.update(map_.keys()) + devices_set.discard(torch.device("cpu")) + my_devices = list(devices_set) + my_devices = sorted(my_devices, key=lambda d: d.index) + return my_devices + +def _create_reverse_mapping(my_name, all_names, all_device_maps): + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {} + for node in all_names: + if my_name in all_device_maps[node]: + reverse_device_maps[node] = { + v: k for k, v in all_device_maps[node][my_name].items() + } + return reverse_device_maps + +def _get_device_infos(): + from . import TensorPipeAgent + agent = cast(TensorPipeAgent, api._get_current_rpc_agent()) + opts = agent._get_backend_options() + device_count = torch.cuda.device_count() + if torch.cuda.is_available() and opts.devices: + torch.cuda.init() + return device_count, opts.device_maps, opts.devices + +def _set_devices_and_reverse_device_map(agent): + from . import TensorPipeAgent + agent = cast(TensorPipeAgent, agent) + # Group state is retrieved from local agent + # On initialization, tensorpipe agent retrieves information from all existing workers, so group state is valid + my_worker_info = agent.get_worker_info() + my_name = my_worker_info.name + all_worker_infos = agent.get_worker_infos() + # One round to get device_maps of all workers and construct reverse device maps + all_device_counts, all_device_maps, all_devices, all_names = {}, {}, {}, [] + for worker_info in all_worker_infos: + worker_name = worker_info.name + if worker_name != my_name: + # TODO: make async? + device_count, device_map, devices = api.rpc_sync(worker_name, _get_device_infos) + else: + opts = agent._get_backend_options() + device_count, device_map, devices = torch.cuda.device_count(), opts.device_maps, opts.devices + all_device_counts[worker_name] = device_count + all_device_maps[worker_name] = device_map + all_devices[worker_name] = devices + all_names.append(worker_name) + + _validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices, is_static_group=False) + reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps) + + # Perform RPC call to all workers, including itself, to include newly joined worker information and device maps + for worker_name in all_names: + # Set device list for each worker + all_devices[worker_name] = _create_device_list(all_devices[worker_name], all_device_maps[worker_name], reverse_device_maps) + api.rpc_sync(worker_name, _update_group_membership, + args=(my_worker_info, all_devices[worker_name], reverse_device_maps, True)) + +def _tensorpipe_init_backend_handler(store, name, rank, world_size, rpc_backend_options): + from . import TensorPipeAgent + from . import TensorPipeRpcBackendOptions + if not isinstance(store, dist.Store): + raise TypeError(f"`store` must be a c10d::Store. {store}") + + if not isinstance( + rpc_backend_options, TensorPipeRpcBackendOptions + ): + raise TypeError( + f"`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`. {rpc_backend_options}" + ) + + device_count = torch.cuda.device_count() + + is_static_group = True if world_size else False + # world_size is specified so this is a static group (ranks cannot join and leave) + if is_static_group: + # The agent's join method is required to behave like a barrier and perform + # collective operations, for which it relies on a process group, instead of + # re-implementing this on top of RPCs. + group = _init_process_group(store, rank, world_size) + + reverse_device_maps, devices = _tensorpipe_exchange_and_check_all_device_maps( + name, + device_count, + rpc_backend_options.device_maps, + rpc_backend_options.devices, + group, + ) + + if torch.cuda.is_available() and devices: + # It's necessary to initialize PyTorch CUDA states here (e.g., + # CUDACachingAllocator). If this is missing, we could hit errors like + # "allocator not initialized", because other processes might send + # CUDA-related RPC request to this process before user code in this + # process initializes its PyTorch CUDA states. + torch.cuda.init() + + # TODO: add try-except and destroy _agent in all processes if any fails. + agent = TensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + reverse_device_maps, + devices, + ) + + api._init_rpc_states(agent) + + # Run one dummy round of RPC to initialize channels/transports. Without + # this, it's easy to hit timeout in rpc.shutdown() if there is no other RPC + # on that process before rpc.shutdown(), as the agent initialization can + # take longer than 5s. + api._all_gather(None, timeout=rpc_backend_options.rpc_timeout) + # Need a barrier here to make sure no peers leave before the rank0 finishes + # _all_gather + group.barrier().wait() + + return agent + # initialization for dynamic rpc (ranks can join and leave) + else: + with _group_membership_management(store, name, True): + # Construct TPAgent with empty reverse_device_map and devices + # these properties will be updated after initialization + agent = TensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + {}, + [], + ) + api._init_rpc_states(agent) + + try: + # Notify all workers in group this rank has joined and set devices and reverse_device_map + # This is a synchronous operation that completes once all existing ranks are updated + _set_devices_and_reverse_device_map(agent) + pass + except Exception: + api.shutdown() + raise + return agent + +register_backend( + "TENSORPIPE", + _tensorpipe_construct_rpc_backend_options_handler, + _tensorpipe_init_backend_handler, +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/constants.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc525b70d9bb15f6a87c2c82943c3b1597d2277 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/constants.py @@ -0,0 +1,24 @@ +from datetime import timedelta +from typing import List +from torch._C._distributed_rpc import ( + _DEFAULT_INIT_METHOD, + _DEFAULT_NUM_WORKER_THREADS, + _DEFAULT_RPC_TIMEOUT_SEC, + _UNSET_RPC_TIMEOUT, +) + + +# For any RpcAgent. +DEFAULT_RPC_TIMEOUT_SEC: float = _DEFAULT_RPC_TIMEOUT_SEC +DEFAULT_INIT_METHOD: str = _DEFAULT_INIT_METHOD +DEFAULT_SHUTDOWN_TIMEOUT: float = 0 + +# For TensorPipeAgent. +DEFAULT_NUM_WORKER_THREADS: int = _DEFAULT_NUM_WORKER_THREADS +# Ensure that we don't time out when there are long periods of time without +# any operations against the underlying ProcessGroup. +DEFAULT_PROCESS_GROUP_TIMEOUT: timedelta = timedelta(milliseconds=2 ** 31 - 1) +# Value indicating that timeout is not set for RPC call, and the default should be used. +UNSET_RPC_TIMEOUT: float = _UNSET_RPC_TIMEOUT + +__all__: List[str] = [] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/functions.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..b1c85c47853d7e106a185efc5e157b8948437b07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/functions.py @@ -0,0 +1,166 @@ +import functools + + +def async_execution(fn): + r""" + A decorator for a function indicating that the return value of the function + is guaranteed to be a :class:`~torch.futures.Future` object and this + function can run asynchronously on the RPC callee. More specifically, the + callee extracts the :class:`~torch.futures.Future` returned by the wrapped + function and installs subsequent processing steps as a callback to that + :class:`~torch.futures.Future`. The installed callback will read the value + from the :class:`~torch.futures.Future` when completed and send the + value back as the RPC response. That also means the returned + :class:`~torch.futures.Future` only exists on the callee side and is never + sent through RPC. This decorator is useful when the wrapped function's + (``fn``) execution needs to pause and resume due to, e.g., containing + :meth:`~torch.distributed.rpc.rpc_async` or waiting for other signals. + + .. note:: To enable asynchronous execution, applications must pass the + function object returned by this decorator to RPC APIs. If RPC detected + attributes installed by this decorator, it knows that this function + returns a ``Future`` object and will handle that accordingly. + However, this does not mean this decorator has to be outmost one when + defining a function. For example, when combined with ``@staticmethod`` + or ``@classmethod``, ``@rpc.functions.async_execution`` needs to be the + inner decorator to allow the target function be recognized as a static + or class function. This target function can still execute asynchronously + because, when accessed, the static or class method preserves attributes + installed by ``@rpc.functions.async_execution``. + + + Example:: + The returned :class:`~torch.futures.Future` object can come from + :meth:`~torch.distributed.rpc.rpc_async`, + :meth:`~torch.futures.Future.then`, or :class:`~torch.futures.Future` + constructor. The example below shows directly using the + :class:`~torch.futures.Future` returned by + :meth:`~torch.futures.Future.then`. + + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> @rpc.functions.async_execution + >>> def async_add_chained(to, x, y, z): + >>> # This function runs on "worker1" and returns immediately when + >>> # the callback is installed through the `then(cb)` API. In the + >>> # mean time, the `rpc_async` to "worker2" can run concurrently. + >>> # When the return value of that `rpc_async` arrives at + >>> # "worker1", "worker1" will run the lambda function accordingly + >>> # and set the value for the previously returned `Future`, which + >>> # will then trigger RPC to send the result back to "worker0". + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> # On worker0 + >>> # xdoctest: +SKIP + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> async_add_chained, + >>> args=("worker2", torch.ones(2), 1, 1) + >>> ) + >>> print(ret) # prints tensor([3., 3.]) + + When combined with TorchScript decorators, this decorator must be the + outmost one. + + >>> from torch import Tensor + >>> from torch.futures import Future + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> @torch.jit.script + >>> def script_add(x: Tensor, y: Tensor) -> Tensor: + >>> return x + y + >>> + >>> @rpc.functions.async_execution + >>> @torch.jit.script + >>> def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]: + >>> return rpc.rpc_async(to, script_add, (x, y)) + >>> + >>> # On worker0 + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> async_add, + >>> args=("worker2", torch.ones(2), 1) + >>> ) + >>> print(ret) # prints tensor([2., 2.]) + + When combined with static or class method, this decorator must be the + inner one. + + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> class AsyncExecutionClass: + >>> + >>> @staticmethod + >>> @rpc.functions.async_execution + >>> def static_async_add(to, x, y, z): + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> @classmethod + >>> @rpc.functions.async_execution + >>> def class_async_add(cls, to, x, y, z): + >>> ret_fut = torch.futures.Future() + >>> rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: ret_fut.set_result(fut.wait() + z) + >>> ) + >>> return ret_fut + >>> + >>> @rpc.functions.async_execution + >>> def bound_async_add(self, to, x, y, z): + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> # On worker0 + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> AsyncExecutionClass.static_async_add, + >>> args=("worker2", torch.ones(2), 1, 2) + >>> ) + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> AsyncExecutionClass.class_async_add, + >>> args=("worker2", torch.ones(2), 1, 2) + >>> ) + >>> print(ret) # prints tensor([4., 4.]) + + This decorator also works with RRef helpers, i.e., . + :meth:`torch.distributed.rpc.RRef.rpc_sync`, + :meth:`torch.distributed.rpc.RRef.rpc_async`, and + :meth:`torch.distributed.rpc.RRef.remote`. + + >>> from torch.distributed import rpc + >>> + >>> # reuse the AsyncExecutionClass class above + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.rpc_sync().static_async_add("worker2", torch.ones(2), 1, 2) + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.rpc_async().static_async_add("worker2", torch.ones(2), 1, 2).wait() + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.remote().static_async_add("worker2", torch.ones(2), 1, 2).to_here() + >>> print(ret) # prints tensor([4., 4.]) + """ + @functools.wraps(fn) + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + # Can't declare and use attributes of function objects (mypy#2087) + wrapper._wrapped_async_rpc_function = fn # type: ignore[attr-defined] + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/internal.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/internal.py new file mode 100644 index 0000000000000000000000000000000000000000..6e00a4d18521667139c7e8d745287514ce254134 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/internal.py @@ -0,0 +1,281 @@ +import collections +import copyreg +import io +import pickle +import sys +import threading +import traceback +from enum import Enum + +import torch +import torch.distributed as dist +from torch._C._distributed_rpc import _get_current_rpc_agent + +__all__ = ["RPCExecMode", "serialize", "deserialize", "PythonUDF", "RemoteException"] + +# Thread local tensor tables to store tensors while pickling torch.Tensor +# objects +_thread_local_tensor_tables = threading.local() +_pickler = pickle.Pickler +_unpickler = pickle.Unpickler + + +class RPCExecMode(Enum): + SYNC = "sync" + ASYNC = "async" + ASYNC_JIT = "async_jit" + REMOTE = "remote" + + +class _InternalRPCPickler: + r""" + This class provides serialize() and deserialize() interfaces to serialize + data to be "binary string + tensor table" format + So for RPC python UDF function and args, non tensor data will be serialized + into regular binary string, tensor data will be put into thread local tensor + tables, this serialization format is consistent with builtin operator and args + using JIT pickler. This format will make tensor handling in C++ much easier, + e.g. attach tensor to distributed autograd graph in C++ + """ + + def __init__(self): + # Ignore type error because dispatch_table is defined in third-party package + self._dispatch_table = copyreg.dispatch_table.copy() # type: ignore[attr-defined] + self._dispatch_table[torch.Tensor] = self._tensor_reducer + # Used for registering customized picklers. + self._class_reducer_dict = {} + + def _register_reducer(self, obj_class, reducer): + # For the same class, only register the reducer once. + if obj_class not in self._class_reducer_dict: + self._class_reducer_dict[obj_class] = reducer + + @classmethod + def _tensor_receiver(cls, tensor_index): + global _thread_local_tensor_tables + return _thread_local_tensor_tables.recv_tables[tensor_index] + + def _tensor_reducer(self, tensor): + global _thread_local_tensor_tables + _thread_local_tensor_tables.send_tables.append(tensor) + tensor_index = len(_thread_local_tensor_tables.send_tables) - 1 + return (_InternalRPCPickler._tensor_receiver, (tensor_index,)) + + @classmethod + def _py_rref_receiver(cls, rref_fork_data): + return dist.rpc.PyRRef._deserialize(rref_fork_data) + + def _py_rref_reducer(self, py_rref): + rref_fork_data = py_rref._serialize() + return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,)) + + def _rref_reducer(self, rref): + return self._py_rref_reducer(rref) + + @classmethod + def _script_module_receiver(cls, script_module_serialized): + """ + Given a serialized representation of a ScriptModule created with torch.jit.save, + loads and returns the ScriptModule. + """ + f = io.BytesIO(script_module_serialized) + m = torch.jit.load(f) + return m + + def _script_module_reducer(self, script_module): + """ + Serializes a ScriptModule. + """ + f = io.BytesIO() + torch.jit.save(script_module, f) + return (_InternalRPCPickler._script_module_receiver, (f.getvalue(),)) + + def serialize(self, obj): + r""" + Serialize non tensor data into binary string, tensor data into + tensor table + """ + f = io.BytesIO() + p = _pickler(f) + p.dispatch_table = self._dispatch_table + + # rpc api could accept user picklers inheriting from _InternalRPCPickler to serialize rref, + # user picklers could have different initialization function from _InternalRPCPickler, + # but all the user picklers should call serialize() and use _rref_reducer to pickle rref + # in python. also, when _internal_rpc_pickler is imported to rpc/api.py, rpc.RRef is not + # compiled yet, it is not good place to access rpc.RRef inside _InternalRPCPickler constructor, + # so putting rref's dispatch table here + # + # The return value of a `rpc.remote(..)` call is type of `rpc.PyRRef`. + # The deserialized RRef object on an RPC receiver side is type of `rpc.PyRRef`. + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[dist.rpc.PyRRef] = self._py_rref_reducer # type: ignore[index] + # An RRef created locally by RRef Python constructor is type of `rpc.RRef`. + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[dist.rpc.RRef] = self._rref_reducer # type: ignore[index] + + # Add dispatch pickling for ScriptModule or its subclass. + if isinstance(obj, torch.jit.ScriptModule): + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[obj.__class__] = self._script_module_reducer # type: ignore[index] + + # Install customized picklers. + for class_name in self._class_reducer_dict.keys(): + p.dispatch_table[class_name] = self._class_reducer_dict[class_name] # type: ignore[index] + + # save _thread_local_tensor_tables.send_tables if it is in nested call + global _thread_local_tensor_tables + if hasattr(_thread_local_tensor_tables, "send_tables"): + old_send_tables = _thread_local_tensor_tables.send_tables + else: + old_send_tables = None + _thread_local_tensor_tables.send_tables = [] + + p.dump(obj) + + # restore _thread_local_tensor_tables.send_tables if return + # from nested call, otherwise clean up the table + tensors = _thread_local_tensor_tables.send_tables + if old_send_tables is not None: + _thread_local_tensor_tables.send_tables = old_send_tables + else: + del _thread_local_tensor_tables.send_tables + + return (f.getvalue(), tensors) + + def deserialize(self, binary_data, tensor_table): + r""" + Deserialize binary string + tensor table to original obj + """ + # save _thread_local_tensor_tables.recv_tables if it is in nested call + global _thread_local_tensor_tables + if hasattr(_thread_local_tensor_tables, "recv_tables"): + old_recv_tables = _thread_local_tensor_tables.recv_tables + else: + old_recv_tables = None + _thread_local_tensor_tables.recv_tables = tensor_table + + try: + unpickler = _unpickler(io.BytesIO(binary_data)) + ret = unpickler.load() + except AttributeError as e: + # Occurs when function is not found on module/class during + # unpickling. + except_str = ( + str(e) + + """ Default RPC pickler does not serialize + function code. Ensure that UDFs are defined on both caller and + callee modules.""" + ) + ret = AttributeError(except_str) + # Ensure the stack trace gets preserved + ret.__cause__ = e + + # restore _thread_local_tensor_tables.recv_tables if return + # from nested call, otherwise clean up the table + if old_recv_tables is not None: + _thread_local_tensor_tables.recv_tables = old_recv_tables + else: + del _thread_local_tensor_tables.recv_tables + + return ret + + +# Create _internal_rpc_pickler only once to initialize _dispatch_table only once +_internal_rpc_pickler = _InternalRPCPickler() + + +def serialize(obj): + return _internal_rpc_pickler.serialize(obj) + + +def deserialize(binary_data, tensor_table): + return _internal_rpc_pickler.deserialize(binary_data, tensor_table) + + +def _run_function(python_udf): + r""" + This function is exclusively called from C++. + See ``torch/csrc/distributed/rpc/python_rpc_handler.cpp``. + + Runs a Python UDF and returns its return value. + Wraps any exception in ``RemoteException`` if the function raises. + """ + try: + if isinstance(python_udf, AttributeError): + raise python_udf + result = python_udf.func(*python_udf.args, **python_udf.kwargs) + except Exception as e: + # except str = exception info + traceback string + except_str = ( + f"On {_get_current_rpc_agent().get_worker_info()}:\n" + f"{repr(e)}\n{traceback.format_exc()}" + ) + print(except_str, file=sys.stderr) + result = RemoteException(except_str, type(e)) + return result + + +def _handle_exception(result): + if isinstance(result, RemoteException): + exception_msg = result.msg.encode("utf-8").decode("unicode_escape") + # We wrap exception re-creation here in case some exception classes + # cannot be constructed directly from a string. + exc = None + try: + exc = result.exception_type(exception_msg) + except BaseException as e: + raise RuntimeError( # noqa: B904 + f"Failed to create original exception type. Error msg was {str(e)}" + f" Original exception on remote side was {exception_msg}" + ) from e + + if exc is not None: + raise exc + + +def _build_rpc_profiling_key( + exec_type, func_name, current_worker_name, dst_worker_name +): + """ + Builds the key that RPC calls are profiled with using the autograd profiler. + This will be the name of the corresponding Event recorded in the profiler. + + Args: + exec_type (RPCExecMode): Type of RPC/RRef call + func_name (str): Name of function being profiled. + current_worker_name (str): Name of current worker. + dst_worker_name (str): Name of the destination worker. + + Returns: + String representing profiling key + """ + profile_key = f"rpc_{exec_type.value}#{func_name}({current_worker_name} -> {dst_worker_name})" + return profile_key + + +def _start_record_function(exec_type, func_name, current_worker_name, dest_worker_name): + """ + This function should be called from RPC/RRef functions to create a + RecordFunction object for profiling. This function also runs the before + callbacks that start the profiling, though the user is responsible for + running the appropriate callbacks when the function to be profiled finishes. + + Args: + exec_type (RPCExecMode): Type of RPC/RRef call + func_name (str): Name of function being profiled. + current_worker_name (str): Name of current worker. + dest_worker_name (str): Name of the destination worker. + + Returns: + An instance of `torch.autograd._RecordFunction`. + """ + assert torch.autograd._profiler_enabled(), "Autograd profiler should be enabled." + profile_key = f"rpc_{exec_type.value}#{str(func_name)}({current_worker_name} -> {dest_worker_name})" + rf = torch.autograd._RecordFunction() # type: ignore[attr-defined] + torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined] + return rf + + +PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"]) +RemoteException = collections.namedtuple("RemoteException", ["msg", "exception_type"]) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/options.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/options.py new file mode 100644 index 0000000000000000000000000000000000000000..67892d14e07508326b4890f8527ea9ef6e89608d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/options.py @@ -0,0 +1,172 @@ +from typing import Dict, List, Optional, Union + +import torch +from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase +from . import constants as rpc_contants + + +DeviceType = Union[int, str, torch.device] + +__all__ = ["TensorPipeRpcBackendOptions"] + +def _to_device(device: DeviceType) -> torch.device: + device = torch.device(device) + if device.type != "cuda": + raise ValueError( + "`set_devices` expect a list of CUDA devices, but got " + f"device type {device.type}." + ) + return device + + +def _to_device_map( + device_map: Dict[DeviceType, DeviceType] +) -> Dict[torch.device, torch.device]: + full_device_map: Dict[torch.device, torch.device] = {} + reverse_map: Dict[torch.device, torch.device] = {} + for k, v in device_map.items(): + k, v = torch.device(k), torch.device(v) + if v in reverse_map: + raise ValueError( + "`device_map` only supports 1-to-1 mapping, " + f"trying to map {k} and {reverse_map[v]} to {v}" + ) + full_device_map[k] = v + reverse_map[v] = k + return full_device_map + + +def _to_device_list(devices: List[DeviceType]) -> List[torch.device]: + return list(map(_to_device, devices)) + + +class TensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase): + r""" + The backend options for + :class:`~torch.distributed.rpc.TensorPipeAgent`, derived from + :class:`~torch.distributed.rpc.RpcBackendOptions`. + + Args: + num_worker_threads (int, optional): The number of threads in the + thread-pool used by + :class:`~torch.distributed.rpc.TensorPipeAgent` to execute + requests (default: 16). + rpc_timeout (float, optional): The default timeout, in seconds, + for RPC requests (default: 60 seconds). If the RPC has not + completed in this timeframe, an exception indicating so will + be raised. Callers can override this timeout for individual + RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and + :meth:`~torch.distributed.rpc.rpc_async` if necessary. + init_method (str, optional): The URL to initialize the distributed + store used for rendezvous. It takes any value accepted for the + same argument of :meth:`~torch.distributed.init_process_group` + (default: ``env://``). + device_maps (Dict[str, Dict], optional): Device placement mappings from + this worker to the callee. Key is the callee worker name and value + the dictionary (``Dict`` of ``int``, ``str``, or ``torch.device``) + that maps this worker's devices to the callee worker's devices. + (default: ``None``) + devices (List[int, str, or ``torch.device``], optional): all local + CUDA devices used by RPC agent. By Default, it will be initialized + to all local devices from its own ``device_maps`` and corresponding + devices from its peers' ``device_maps``. When processing CUDA RPC + requests, the agent will properly synchronize CUDA streams for + all devices in this ``List``. + """ + + def __init__( + self, + *, + num_worker_threads: int = rpc_contants.DEFAULT_NUM_WORKER_THREADS, + rpc_timeout: float = rpc_contants.DEFAULT_RPC_TIMEOUT_SEC, + init_method: str = rpc_contants.DEFAULT_INIT_METHOD, + device_maps: Optional[Dict[str, Dict[DeviceType, DeviceType]]] = None, + devices: Optional[List[DeviceType]] = None, + _transports: Optional[List] = None, + _channels: Optional[List] = None, + ): + full_device_maps = ( + {} + if device_maps is None + else {k: _to_device_map(v) for k, v in device_maps.items()} + ) + full_device_list = [] if devices is None else _to_device_list(devices) + super().__init__( + num_worker_threads, + _transports, + _channels, + rpc_timeout, + init_method, + full_device_maps, + full_device_list, + ) + + def set_device_map(self, to: str, device_map: Dict[DeviceType, DeviceType]): + r""" + Set device mapping between each RPC caller and callee pair. This + function can be called multiple times to incrementally add + device placement configurations. + + Args: + to (str): Callee name. + device_map (Dict of int, str, or torch.device): Device placement + mappings from this worker to the callee. This map must be + invertible. + + Example: + >>> # xdoctest: +SKIP("distributed") + >>> # both workers + >>> def add(x, y): + >>> print(x) # tensor([1., 1.], device='cuda:1') + >>> return x + y, (x + y).to(2) + >>> + >>> # on worker 0 + >>> options = TensorPipeRpcBackendOptions( + >>> num_worker_threads=8, + >>> device_maps={"worker1": {0: 1}} + >>> # maps worker0's cuda:0 to worker1's cuda:1 + >>> ) + >>> options.set_device_map("worker1", {1: 2}) + >>> # maps worker0's cuda:1 to worker1's cuda:2 + >>> + >>> rpc.init_rpc( + >>> "worker0", + >>> rank=0, + >>> world_size=2, + >>> backend=rpc.BackendType.TENSORPIPE, + >>> rpc_backend_options=options + >>> ) + >>> + >>> x = torch.ones(2) + >>> rets = rpc.rpc_sync("worker1", add, args=(x.to(0), 1)) + >>> # The first argument will be moved to cuda:1 on worker1. When + >>> # sending the return value back, it will follow the invert of + >>> # the device map, and hence will be moved back to cuda:0 and + >>> # cuda:1 on worker0 + >>> print(rets[0]) # tensor([2., 2.], device='cuda:0') + >>> print(rets[1]) # tensor([2., 2.], device='cuda:1') + """ + full_device_map = _to_device_map(device_map) + curr_device_maps = super().device_maps + + if to in curr_device_maps: + for k, v in full_device_map.items(): + if k in curr_device_maps[to] and v != curr_device_maps[to][k]: + raise ValueError( + "`set_device_map` only supports 1-to-1 mapping, trying" + f" to map {k} to {v} and {curr_device_maps[to][k]}" + ) + + super()._set_device_map(to, full_device_map) + + def set_devices(self, devices: List[DeviceType]): + r""" + Set local devices used by the TensorPipe RPC agent. When processing + CUDA RPC requests, the TensorPipe RPC agent will properly synchronize + CUDA streams for all devices in this ``List``. + + Args: + devices (List of int, str, or torch.device): local devices used by + the TensorPipe RPC agent. + """ + self.devices = _to_device_list(devices) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..89986be8b9287d28fc5d4ad057de8be2d3f8b065 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py @@ -0,0 +1,74 @@ +from functools import partial + +from . import functions +from . import rpc_async + +import torch +from .constants import UNSET_RPC_TIMEOUT +from torch.futures import Future + +def _local_invoke(rref, func_name, args, kwargs): + return getattr(rref.local_value(), func_name)(*args, **kwargs) + +@functions.async_execution +def _local_invoke_async_execution(rref, func_name, args, kwargs): + return getattr(rref.local_value(), func_name)(*args, **kwargs) + +def _invoke_rpc(rref, rpc_api, func_name, timeout, *args, **kwargs): + def _rref_type_cont(rref_fut): + rref_type = rref_fut.value() + + _invoke_func = _local_invoke + # Bypass ScriptModules when checking for async function attribute. + bypass_type = issubclass(rref_type, torch.jit.ScriptModule) or issubclass( + rref_type, torch._C.ScriptModule + ) + if not bypass_type: + func = getattr(rref_type, func_name) + if hasattr(func, "_wrapped_async_rpc_function"): + _invoke_func = _local_invoke_async_execution + + return rpc_api( + rref.owner(), + _invoke_func, + args=(rref, func_name, args, kwargs), + timeout=timeout + ) + + rref_fut = rref._get_type(timeout=timeout, blocking=False) + + if rpc_api != rpc_async: + rref_fut.wait() + return _rref_type_cont(rref_fut) + else: + # A little explanation on this. + # rpc_async returns a Future pointing to the return value of `func_name`, it returns a `Future[T]` + # Calling _rref_type_cont from the `then` lambda causes Future wrapping. IOW, `then` returns a `Future[Future[T]]` + # To address that, we return a Future that is completed with the result of the async call. + result: Future = Future() + + def _wrap_rref_type_cont(fut): + try: + _rref_type_cont(fut).then(_complete_op) + except BaseException as ex: + result.set_exception(ex) + + def _complete_op(fut): + try: + result.set_result(fut.value()) + except BaseException as ex: + result.set_exception(ex) + + rref_fut.then(_wrap_rref_type_cont) + return result + +# This class manages proxied RPC API calls for RRefs. It is entirely used from +# C++ (see python_rpc_handler.cpp). +class RRefProxy: + def __init__(self, rref, rpc_api, timeout=UNSET_RPC_TIMEOUT): + self.rref = rref + self.rpc_api = rpc_api + self.rpc_timeout = timeout + + def __getattr__(self, func_name): + return partial(_invoke_rpc, self.rref, self.rpc_api, func_name, self.rpc_timeout) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py b/venv/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..dc3f4c19ef1e591fdc45e4e2e04905190040ad75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py @@ -0,0 +1,177 @@ +#!/usr/bin/python3 + +import itertools + +import torch +from torch.autograd.profiler_legacy import profile +from typing import List + +from . import ( + _disable_server_process_global_profiler, + _enable_server_process_global_profiler, +) + +__all__: List[str] = [] + +class _server_process_global_profile(profile): + """ + It has the same API as ``torch.autograd.profiler.profile`` class, + except that it enables profiling on all threads running RPC server request callbacks. + + Context manager that manages autograd profiler state and holds a summary of results. + Under the hood it just records events of functions being executed in C++ and + exposes those events to Python. You can wrap any code into it and it will + only report runtime of PyTorch functions. + Note: profiler is thread local and is automatically propagated into the async tasks + + Args: + enabled (bool, optional): Setting this to False makes this context manager a no-op. + Default: ``True``. + + use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. + Adds approximately 4us of overhead to each tensor operation. + Default: ``False`` + + record_shapes (bool, optional): If shapes recording is set, information + about input dimensions will be collected. This allows one to see which + dimensions have been used under the hood and further group by them + using prof.key_averages(group_by_input_shape=True). Please note that + shape recording might skew your profiling data. It is recommended to + use separate runs with and without shape recording to validate the timing. + Most likely the skew will be negligible for bottom most events (in a case + of nested function calls). But for higher level functions the total + self cpu time might be artificially increased because of the shape + collection. + + profile_memory (bool, optional): Whether to report memory usage, default: ``False`` + + .. warning: + Enabling memory profiling incurs additional profiler overhead + + .. warning: + Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), + one cannot use the profiler with ``use_cuda = True`` to benchmark + DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, + please use ``use_cuda = False`` or ``num_workers = 0``. + + Example: + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> x, y = torch.tensor(1), torch.tensor(2) + >>> outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + >>> outer_profile_rref.rpc_sync().__enter__() + >>> rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + >>> inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + >>> inner_profile_rref.rpc_sync().__enter__() + >>> rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + >>> inner_profile_rref.rpc_sync().__exit__(None, None, None) + >>> outer_profile_rref.rpc_sync().__exit__(None, None, None) + >>> print(inner_profile_rref.rpc_sync().key_averages()) + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls + --------- --------------- --------------- --------------- --------------- --------------- --------------- + sub 85.06% 76.275us 100.00% 89.667us 89.667us 1 + empty 14.94% 13.392us 14.94% 13.392us 13.392us 1 + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Self CPU time total: 89.667us + >>> print(outer_profile_rref.rpc_sync().key_averages()) + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls + --------- --------------- --------------- --------------- --------------- --------------- --------------- + sub 35.65% 76.275us 41.91% 89.667us 89.667us 1 + empty 12.67% 27.101us 12.67% 27.101us 13.551us 2 + add 51.68% 110.550us 58.09% 124.259us 124.259us 1 + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Self CPU time total: 213.926us + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> # wait for worker 0 to finish work, and then shutdown. + >>> rpc.shutdown() + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __enter__(self): + """ + Turn on server-side process-global profiling. + This enables thread-local profiler on all RPC threads running server-side request callbacks. + """ + if not self.enabled: + return + + if self.entered: # type: ignore[has-type] + raise RuntimeError("autograd profiler traces are not reentrant") + self.entered = True + + profiler_kind = ( + torch.autograd.ProfilerState.CUDA + if self.use_cuda + else torch.autograd.ProfilerState.CPU + ) + profiler_config = torch.autograd.ProfilerConfig( + profiler_kind, + self.record_shapes, + self.profile_memory, + False, + False, + False, + torch.profiler._ExperimentalConfig()) + _enable_server_process_global_profiler(profiler_config) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Turn off server-side process-global profiling. + Aggregate all profiling events recorded by RPC threads. + + These attributes are assigned on exiting context. + + Attributes: + function_events (torch.autograd.profiler.EventList). It's a list that has helper + methods, like 1) show record items in a pretty-print table. + 2) do averaging by grouping on keys. 3) and more. + + process_global_function_events (List[torch.autograd.profiler.FunctionEvent]). + It's a list of ``FunctionEvent`` elements. Every element is a profiling result + of an RPC request handling within the profiling range. + """ + if not self.enabled: + return + + process_global_events = _disable_server_process_global_profiler() + + # Every element in this list is a thread profiling result from an RPC request handling. + process_global_function_events = [] + for thread_local_events in process_global_events: + # Parse from ``Event``s to ``FunctionEvent``s. + thread_local_function_events = torch.autograd.profiler_legacy._parse_legacy_records( + thread_local_events + ) + thread_local_function_events.sort( + key=lambda function_event: [ + function_event.time_range.start, + -(function_event.time_range.end), + ] + ) + process_global_function_events.append(thread_local_function_events) + + flattened_function_events = list( + itertools.chain.from_iterable(process_global_function_events) + ) + self.function_events = torch.autograd.profiler_util.EventList( + flattened_function_events, + use_cuda=self.use_cuda, + profile_memory=self.profile_memory, + ) + self.function_events._build_tree() + + self.process_global_function_events = process_global_function_events + + return False diff --git a/venv/lib/python3.10/site-packages/torch/distributed/run.py b/venv/lib/python3.10/site-packages/torch/distributed/run.py new file mode 100644 index 0000000000000000000000000000000000000000..4928f6c4119e600c6d06ed3da30bbc7c4ecd6bd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/run.py @@ -0,0 +1,883 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Superset of ``torch.distributed.launch``. + +``torchrun`` provides a superset of the functionality as ``torch.distributed.launch`` +with the following additional functionalities: + +1. Worker failures are handled gracefully by restarting all workers. + +2. Worker ``RANK`` and ``WORLD_SIZE`` are assigned automatically. + +3. Number of nodes is allowed to change between minimum and maximum sizes (elasticity). + +.. note:: ``torchrun`` is a python + `console script `_ + to the main module + `torch.distributed.run `_ + declared in the ``entry_points`` configuration in + `setup.py `_. + It is equivalent to invoking ``python -m torch.distributed.run``. + + +Transitioning from torch.distributed.launch to torchrun +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +``torchrun`` supports the same arguments as ``torch.distributed.launch`` **except** +for ``--use-env`` which is now deprecated. To migrate from ``torch.distributed.launch`` +to ``torchrun`` follow these steps: + +1. If your training script is already reading ``local_rank`` from the ``LOCAL_RANK`` environment variable. + Then you need simply omit the ``--use-env`` flag, e.g.: + + +--------------------------------------------------------------------+--------------------------------------------+ + | ``torch.distributed.launch`` | ``torchrun`` | + +====================================================================+============================================+ + | | | + | .. code-block:: shell-session | .. code-block:: shell-session | + | | | + | $ python -m torch.distributed.launch --use-env train_script.py | $ torchrun train_script.py | + | | | + +--------------------------------------------------------------------+--------------------------------------------+ + +2. If your training script reads local rank from a ``--local-rank`` cmd argument. + Change your training script to read from the ``LOCAL_RANK`` environment variable as + demonstrated by the following code snippet: + + +-------------------------------------------------------+----------------------------------------------------+ + | ``torch.distributed.launch`` | ``torchrun`` | + +=======================================================+====================================================+ + | | | + | .. code-block:: python | .. code-block:: python | + | | | + | | | + | import argparse | import os | + | parser = argparse.ArgumentParser() | local_rank = int(os.environ["LOCAL_RANK"]) | + | parser.add_argument("--local-rank", type=int) | | + | args = parser.parse_args() | | + | | | + | local_rank = args.local_rank | | + | | | + +-------------------------------------------------------+----------------------------------------------------+ + +The aformentioned changes suffice to migrate from ``torch.distributed.launch`` to ``torchrun``. +To take advantage of new features such as elasticity, fault-tolerance, and error reporting of ``torchrun`` +please refer to: + +* :ref:`elastic_train_script` for more information on authoring training scripts that are ``torchrun`` compliant. +* the rest of this page for more information on the features of ``torchrun``. + + +Usage +-------- + +Single-node multi-worker +++++++++++++++++++++++++++++++ + +:: + + torchrun + --standalone + --nnodes=1 + --nproc-per-node=$NUM_TRAINERS + YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...) + +Stacked single-node multi-worker ++++++++++++++++++++++++++++++++++++ + +To run multiple instances (separate jobs) of single-node, multi-worker on the +same host, we need to make sure that each instance (job) is +setup on different ports to avoid port conflicts (or worse, two jobs being merged +as a single job). To do this you have to run with ``--rdzv-backend=c10d`` +and specify a different port by setting ``--rdzv-endpoint=localhost:$PORT_k``. +For ``--nodes=1``, its often convenient to let ``torchrun`` pick a free random +port automatically instead of manually assigning different ports for each run. + +:: + + torchrun + --rdzv-backend=c10d + --rdzv-endpoint=localhost:0 + --nnodes=1 + --nproc-per-node=$NUM_TRAINERS + YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...) + + +Fault tolerant (fixed sized number of workers, no elasticity, tolerates 3 failures) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +:: + + torchrun + --nnodes=$NUM_NODES + --nproc-per-node=$NUM_TRAINERS + --max-restarts=3 + --rdzv-id=$JOB_ID + --rdzv-backend=c10d + --rdzv-endpoint=$HOST_NODE_ADDR + YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...) + +``HOST_NODE_ADDR``, in form [:] (e.g. node1.example.com:29400), specifies the node and +the port on which the C10d rendezvous backend should be instantiated and hosted. It can be any +node in your training cluster, but ideally you should pick a node that has a high bandwidth. + +.. note:: + If no port number is specified ``HOST_NODE_ADDR`` defaults to 29400. + +Elastic (``min=1``, ``max=4``, tolerates up to 3 membership changes or failures) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +:: + + torchrun + --nnodes=1:4 + --nproc-per-node=$NUM_TRAINERS + --max-restarts=3 + --rdzv-id=$JOB_ID + --rdzv-backend=c10d + --rdzv-endpoint=$HOST_NODE_ADDR + YOUR_TRAINING_SCRIPT.py (--arg1 ... train script args...) + +``HOST_NODE_ADDR``, in form [:] (e.g. node1.example.com:29400), specifies the node and +the port on which the C10d rendezvous backend should be instantiated and hosted. It can be any +node in your training cluster, but ideally you should pick a node that has a high bandwidth. + +.. note:: + If no port number is specified ``HOST_NODE_ADDR`` defaults to 29400. + +Note on rendezvous backend +------------------------------ + +For multi-node training you need to specify: + +1. ``--rdzv-id``: A unique job id (shared by all nodes participating in the job) +2. ``--rdzv-backend``: An implementation of + :py:class:`torch.distributed.elastic.rendezvous.RendezvousHandler` +3. ``--rdzv-endpoint``: The endpoint where the rendezvous backend is running; usually in form + ``host:port``. + +Currently ``c10d`` (recommended), ``etcd-v2``, and ``etcd`` (legacy) rendezvous backends are +supported out of the box. To use ``etcd-v2`` or ``etcd``, setup an etcd server with the ``v2`` api +enabled (e.g. ``--enable-v2``). + +.. warning:: + ``etcd-v2`` and ``etcd`` rendezvous use etcd API v2. You MUST enable the v2 API on the etcd + server. Our tests use etcd v3.4.3. + +.. warning:: + For etcd-based rendezvous we recommend using ``etcd-v2`` over ``etcd`` which is functionally + equivalent, but uses a revised implementation. ``etcd`` is in maintenance mode and will be + removed in a future version. + +Definitions +-------------- + +1. ``Node`` - A physical instance or a container; maps to the unit that the job manager works with. + +2. ``Worker`` - A worker in the context of distributed training. + +3. ``WorkerGroup`` - The set of workers that execute the same function (e.g. trainers). + +4. ``LocalWorkerGroup`` - A subset of the workers in the worker group running on the same node. + +5. ``RANK`` - The rank of the worker within a worker group. + +6. ``WORLD_SIZE`` - The total number of workers in a worker group. + +7. ``LOCAL_RANK`` - The rank of the worker within a local worker group. + +8. ``LOCAL_WORLD_SIZE`` - The size of the local worker group. + +9. ``rdzv_id`` - A user-defined id that uniquely identifies the worker group for a job. This id is + used by each node to join as a member of a particular worker group. + +9. ``rdzv_backend`` - The backend of the rendezvous (e.g. ``c10d``). This is typically a strongly + consistent key-value store. + +10. ``rdzv_endpoint`` - The rendezvous backend endpoint; usually in form ``:``. + +A ``Node`` runs ``LOCAL_WORLD_SIZE`` workers which comprise a ``LocalWorkerGroup``. The union of +all ``LocalWorkerGroups`` in the nodes in the job comprise the ``WorkerGroup``. + +Environment Variables +---------------------- + +The following environment variables are made available to you in your script: + +1. ``LOCAL_RANK`` - The local rank. + +2. ``RANK`` - The global rank. + +3. ``GROUP_RANK`` - The rank of the worker group. A number between 0 and ``max_nnodes``. When + running a single worker group per node, this is the rank of the node. + +4. ``ROLE_RANK`` - The rank of the worker across all the workers that have the same role. The role + of the worker is specified in the ``WorkerSpec``. + +5. ``LOCAL_WORLD_SIZE`` - The local world size (e.g. number of workers running locally); equals to + ``--nproc-per-node`` specified on ``torchrun``. + +6. ``WORLD_SIZE`` - The world size (total number of workers in the job). + +7. ``ROLE_WORLD_SIZE`` - The total number of workers that was launched with the same role specified + in ``WorkerSpec``. + +8. ``MASTER_ADDR`` - The FQDN of the host that is running worker with rank 0; used to initialize + the Torch Distributed backend. + +9. ``MASTER_PORT`` - The port on the ``MASTER_ADDR`` that can be used to host the C10d TCP store. + +10. ``TORCHELASTIC_RESTART_COUNT`` - The number of worker group restarts so far. + +11. ``TORCHELASTIC_MAX_RESTARTS`` - The configured maximum number of restarts. + +12. ``TORCHELASTIC_RUN_ID`` - Equal to the rendezvous ``run_id`` (e.g. unique job id). + +13. ``PYTHON_EXEC`` - System executable override. If provided, the python user script will + use the value of ``PYTHON_EXEC`` as executable. The `sys.executable` is used by default. + +Deployment +------------ + +1. (Not needed for the C10d backend) Start the rendezvous backend server and get the endpoint (to be + passed as ``--rdzv-endpoint`` to the launcher script) + +2. Single-node multi-worker: Start the launcher on the host to start the agent process which + creates and monitors a local worker group. + +3. Multi-node multi-worker: Start the launcher with the same arguments on all the nodes + participating in training. + +When using a job/cluster manager the entry point command to the multi-node job should be this +launcher. + +Failure Modes +--------------- + +1. Worker failure: For a training job with ``n`` workers, if ``k<=n`` workers fail all workers + are stopped and restarted up to ``max_restarts``. + +2. Agent failure: An agent failure results in a local worker group failure. It is up to the job + manager to fail the entire job (gang semantics) or attempt to replace the node. Both behaviors + are supported by the agent. + +3. Node failure: Same as agent failure. + +Membership Changes +-------------------- + +1. Node departure (scale-down): The agent is notified of the departure, all existing workers are + stopped, a new ``WorkerGroup`` is formed, and all workers are started with a new ``RANK`` and + ``WORLD_SIZE``. + +2. Node arrival (scale-up): The new node is admitted to the job, all existing workers are stopped, + a new ``WorkerGroup`` is formed, and all workers are started with a new ``RANK`` and + ``WORLD_SIZE``. + +Important Notices +-------------------- + +1. This utility and multi-process distributed (single-node or + multi-node) GPU training currently only achieves the best performance using + the NCCL distributed backend. Thus NCCL backend is the recommended backend to + use for GPU training. + +2. The environment variables necessary to initialize a Torch process group are provided to you by + this module, no need for you to pass ``RANK`` manually. To initialize a process group in your + training script, simply run: + +:: + + >>> # xdoctest: +SKIP("stub") + >>> import torch.distributed as dist + >>> dist.init_process_group(backend="gloo|nccl") + +3. In your training program, you can either use regular distributed functions + or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your + training program uses GPUs for training and you would like to use + :func:`torch.nn.parallel.DistributedDataParallel` module, + here is how to configure it. + +:: + + local_rank = int(os.environ["LOCAL_RANK"]) + model = torch.nn.parallel.DistributedDataParallel(model, + device_ids=[local_rank], + output_device=local_rank) + +Please ensure that ``device_ids`` argument is set to be the only GPU device id +that your code will be operating on. This is generally the local rank of the +process. In other words, the ``device_ids`` needs to be ``[int(os.environ("LOCAL_RANK"))]``, +and ``output_device`` needs to be ``int(os.environ("LOCAL_RANK"))`` in order to use this +utility + + +4. On failures or membership changes ALL surviving workers are killed immediately. Make sure to + checkpoint your progress. The frequency of checkpoints should depend on your job's tolerance + for lost work. + +5. This module only supports homogeneous ``LOCAL_WORLD_SIZE``. That is, it is assumed that all + nodes run the same number of local workers (per role). + +6. ``RANK`` is NOT stable. Between restarts, the local workers on a node can be assigned a + different range of ranks than before. NEVER hard code any assumptions about the stable-ness of + ranks or some correlation between ``RANK`` and ``LOCAL_RANK``. + +7. When using elasticity (``min_size!=max_size``) DO NOT hard code assumptions about + ``WORLD_SIZE`` as the world size can change as nodes are allowed to leave and join. + +8. It is recommended for your script to have the following structure: + +:: + + def main(): + load_checkpoint(checkpoint_path) + initialize() + train() + + def train(): + for batch in iter(dataset): + train_step(batch) + + if should_checkpoint: + save_checkpoint(checkpoint_path) + +9. (Recommended) On worker errors, this tool will summarize the details of the error + (e.g. time, rank, host, pid, traceback, etc). On each node, the first error (by timestamp) + is heuristically reported as the "Root Cause" error. To get tracebacks as part of this + error summary print out, you must decorate your main entrypoint function in your + training script as shown in the example below. If not decorated, then the summary + will not include the traceback of the exception and will only contain the exitcode. + For details on torchelastic error handling see: https://pytorch.org/docs/stable/elastic/errors.html + +:: + + from torch.distributed.elastic.multiprocessing.errors import record + + @record + def main(): + # do train + pass + + if __name__ == "__main__": + main() + +""" +import logging +import os +import sys +import uuid +import importlib.metadata as metadata +from argparse import REMAINDER, ArgumentParser +from typing import Callable, List, Tuple, Type, Union, Optional, Set + +import torch +from torch.distributed.argparse_util import check_env, env +from torch.distributed.elastic.multiprocessing import DefaultLogsSpecs, LogsSpecs, Std +from torch.distributed.elastic.multiprocessing.errors import record +from torch.distributed.elastic.rendezvous.utils import _parse_rendezvous_config +from torch.distributed.elastic.utils import macros +from torch.distributed.elastic.utils.logging import get_logger +from torch.distributed.launcher.api import LaunchConfig, elastic_launch +from torch.utils.backend_registration import _get_custom_mod_func + +log = get_logger(__name__) + + +def get_args_parser() -> ArgumentParser: + """Parse the command line options.""" + parser = ArgumentParser(description="Torch Distributed Elastic Training Launcher") + + # + # Worker/node size related arguments. + # + + parser.add_argument( + "--nnodes", + action=env, + type=str, + default="1:1", + help="Number of nodes, or the range of nodes in form :.", + ) + parser.add_argument( + "--nproc-per-node", + "--nproc_per_node", + action=env, + type=str, + default="1", + help="Number of workers per node; supported values: [auto, cpu, gpu, int].", + ) + + # + # Rendezvous related arguments + # + + parser.add_argument( + "--rdzv-backend", + "--rdzv_backend", + action=env, + type=str, + default="static", + help="Rendezvous backend.", + ) + parser.add_argument( + "--rdzv-endpoint", + "--rdzv_endpoint", + action=env, + type=str, + default="", + help="Rendezvous backend endpoint; usually in form :.", + ) + parser.add_argument( + "--rdzv-id", + "--rdzv_id", + action=env, + type=str, + default="none", + help="User-defined group id.", + ) + parser.add_argument( + "--rdzv-conf", + "--rdzv_conf", + action=env, + type=str, + default="", + help="Additional rendezvous configuration (=,=,...).", + ) + parser.add_argument( + "--standalone", + action=check_env, + help="Start a local standalone rendezvous backend that is represented by a C10d TCP store " + "on a free port. Useful when launching single-node, multi-worker job. If specified " + "--rdzv-backend, --rdzv-endpoint, --rdzv-id are auto-assigned and any explicitly set values " + "are ignored.", + ) + + # + # User-code launch related arguments. + # + + parser.add_argument( + "--max-restarts", + "--max_restarts", + action=env, + type=int, + default=0, + help="Maximum number of worker group restarts before failing.", + ) + parser.add_argument( + "--monitor-interval", + "--monitor_interval", + action=env, + type=float, + default=5, + help="Interval, in seconds, to monitor the state of workers.", + ) + parser.add_argument( + "--start-method", + "--start_method", + action=env, + type=str, + default="spawn", + choices=["spawn", "fork", "forkserver"], + help="Multiprocessing start method to use when creating workers.", + ) + parser.add_argument( + "--role", + action=env, + type=str, + default="default", + help="User-defined role for the workers.", + ) + parser.add_argument( + "-m", + "--module", + action=check_env, + help="Change each process to interpret the launch script as a Python module, executing " + "with the same behavior as 'python -m'.", + ) + parser.add_argument( + "--no-python", + "--no_python", + action=check_env, + help="Skip prepending the training script with 'python' - just execute it directly. Useful " + "when the script is not a Python script.", + ) + + parser.add_argument( + "--run-path", + "--run_path", + action=check_env, + help="Run the training script with runpy.run_path in the same interpreter." + " Script must be provided as an abs path (e.g. /abs/path/script.py)." + " Takes precedence over --no-python.", + ) + parser.add_argument( + "--log-dir", + "--log_dir", + action=env, + type=str, + default=None, + help="Base directory to use for log files (e.g. /var/log/torch/elastic). The same " + "directory is re-used for multiple runs (a unique job-level sub-directory is created with " + "rdzv_id as the prefix).", + ) + parser.add_argument( + "-r", + "--redirects", + action=env, + type=str, + default="0", + help="Redirect std streams into a log file in the log directory (e.g. [-r 3] redirects " + "both stdout+stderr for all workers, [-r 0:1,1:2] redirects stdout for local rank 0 and " + "stderr for local rank 1).", + ) + parser.add_argument( + "-t", + "--tee", + action=env, + type=str, + default="0", + help="Tee std streams into a log file and also to console (see --redirects for format).", + ) + + parser.add_argument( + "--local-ranks-filter", + "--local_ranks_filter", + action=env, + type=str, + default="", + help="Only show logs from specified ranks in console (e.g. [--local_ranks_filter=0,1,2] will " + "only show logs from rank 0, 1 and 2). This will only apply to stdout and stderr, not to" + "log files saved via --redirect or --tee", + ) + + # + # Backwards compatible parameters with caffe2.distributed.launch. + # + + parser.add_argument( + "--node-rank", + "--node_rank", + type=int, + action=env, + default=0, + help="Rank of the node for multi-node distributed training.", + ) + parser.add_argument( + "--master-addr", + "--master_addr", + default="127.0.0.1", + type=str, + action=env, + help="Address of the master node (rank 0) that only used for static rendezvous. It should " + "be either the IP address or the hostname of rank 0. For single node multi-proc training " + "the --master-addr can simply be 127.0.0.1; IPv6 should have the pattern " + "`[0:0:0:0:0:0:0:1]`.", + ) + parser.add_argument( + "--master-port", + "--master_port", + default=29500, + type=int, + action=env, + help="Port on the master node (rank 0) to be used for communication during distributed " + "training. It is only used for static rendezvous.", + ) + parser.add_argument( + "--local-addr", + "--local_addr", + default=None, + type=str, + action=env, + help="Address of the local node. If specified, will use the given address for connection. " + "Else, will look up the local node address instead. Else, it will be default to local " + "machine's FQDN.", + ) + + parser.add_argument( + "--logs-specs", + "--logs_specs", + default=None, + type=str, + help="torchrun.logs_specs group entrypoint name, value must be type of LogsSpecs. " + "Can be used to override custom logging behavior.", + ) + + # + # Positional arguments. + # + + parser.add_argument( + "training_script", + type=str, + help="Full path to the (single GPU) training program/script to be launched in parallel, " + "followed by all the arguments for the training script.", + ) + + # Rest from the training program. + parser.add_argument("training_script_args", nargs=REMAINDER) + + return parser + + +def parse_args(args): + parser = get_args_parser() + return parser.parse_args(args) + + +def parse_min_max_nnodes(nnodes: str): + arr = nnodes.split(":") + + if len(arr) == 1: + min_nodes = max_nodes = int(arr[0]) + elif len(arr) == 2: + min_nodes = int(arr[0]) + max_nodes = int(arr[1]) + else: + raise RuntimeError(f'nnodes={nnodes} is not in "MIN:MAX" format') # noqa: E231 + + return min_nodes, max_nodes + + +def determine_local_world_size(nproc_per_node: str): + try: + logging.info("Using nproc_per_node=%s.", nproc_per_node) + return int(nproc_per_node) + except ValueError as e: + if nproc_per_node == "cpu": + num_proc = os.cpu_count() + device_type = "cpu" + elif nproc_per_node == "gpu": + if not torch.cuda.is_available(): + raise ValueError("Cuda is not available.") from e + device_type = "gpu" + num_proc = torch.cuda.device_count() + elif nproc_per_node == torch._C._get_privateuse1_backend_name(): + if not _get_custom_mod_func("is_available")(): + raise ValueError(f"{nproc_per_node} is not available.") from e + device_type = nproc_per_node + num_proc = _get_custom_mod_func("device_count")() + elif nproc_per_node == "auto": + if torch.cuda.is_available(): + num_proc = torch.cuda.device_count() + device_type = "gpu" + elif hasattr(torch, torch._C._get_privateuse1_backend_name()) and \ + _get_custom_mod_func("is_available")(): + num_proc = _get_custom_mod_func("device_count")() + device_type = torch._C._get_privateuse1_backend_name() + else: + num_proc = os.cpu_count() + device_type = "cpu" + else: + raise ValueError(f"Unsupported nproc_per_node value: {nproc_per_node}") from e + + log.info( + "Using nproc_per_node=%s," + " setting to %s since the instance " + "has %s %s", + nproc_per_node, num_proc, os.cpu_count(), device_type + ) + return num_proc + + +def get_rdzv_endpoint(args): + if args.rdzv_backend == "static" and not args.rdzv_endpoint: + return f"{args.master_addr}:{args.master_port}" # noqa: E231 + return args.rdzv_endpoint + + +def get_use_env(args) -> bool: + """ + Retrieve ``use_env`` from the args. + + ``use_env`` is a legacy argument, if ``use_env`` is False, the + ``--node-rank`` argument will be transferred to all worker processes. + ``use_env`` is only used by the ``torch.distributed.launch`` and will + be deprecated in future releases. + """ + if not hasattr(args, "use_env"): + return True + return args.use_env + + +def _get_logs_specs_class(logs_specs_name: Optional[str]) -> Type[LogsSpecs]: + """ + Attemps to load `torchrun.logs_spec` entrypoint with key of `logs_specs_name` param. + Provides plugin mechanism to provide custom implementation of LogsSpecs. + + Returns `DefaultLogsSpecs` when logs_spec_name is None. + Raises ValueError when entrypoint for `logs_spec_name` can't be found in entrypoints. + """ + logs_specs_cls = None + if logs_specs_name is not None: + eps = metadata.entry_points() + if hasattr(eps, "select"): # >= 3.10 + group = eps.select(group="torchrun.logs_specs") + if group.select(name=logs_specs_name): + logs_specs_cls = group[logs_specs_name].load() + + elif specs := eps.get("torchrun.logs_specs"): # < 3.10 + if entrypoint_list := [ep for ep in specs if ep.name == logs_specs_name]: + logs_specs_cls = entrypoint_list[0].load() + + if logs_specs_cls is None: + raise ValueError(f"Could not find entrypoint under 'torchrun.logs_specs[{logs_specs_name}]' key") + + logging.info("Using logs_spec '%s' mapped to %s", logs_specs_name, str(logs_specs_cls)) + else: + logs_specs_cls = DefaultLogsSpecs + + return logs_specs_cls + + +def config_from_args(args) -> Tuple[LaunchConfig, Union[Callable, str], List[str]]: + # If ``args`` not passed, defaults to ``sys.argv[:1]`` + min_nodes, max_nodes = parse_min_max_nnodes(args.nnodes) + assert 0 < min_nodes <= max_nodes + assert args.max_restarts >= 0 + + if hasattr(args, "master_addr") and args.rdzv_backend != "static" and not args.rdzv_endpoint: + log.warning( + "master_addr is only used for static rdzv_backend and when rdzv_endpoint " + "is not specified." + ) + + nproc_per_node = determine_local_world_size(args.nproc_per_node) + if "OMP_NUM_THREADS" not in os.environ and nproc_per_node > 1: + omp_num_threads = 1 + log.warning( + "\n*****************************************\n" + "Setting OMP_NUM_THREADS environment variable for each process to be " + "%s in default, to avoid your system being overloaded, " + "please further tune the variable for optimal performance in " + "your application as needed. \n" + "*****************************************", + omp_num_threads + ) + # This env variable will be passed down to the subprocesses + os.environ["OMP_NUM_THREADS"] = str(omp_num_threads) + + log_line_prefix_template = os.getenv("TORCHELASTIC_LOG_LINE_PREFIX_TEMPLATE") + + rdzv_configs = _parse_rendezvous_config(args.rdzv_conf) + + if args.rdzv_backend == "static": + rdzv_configs["rank"] = args.node_rank + + rdzv_endpoint = get_rdzv_endpoint(args) + + ranks: Optional[Set[int]] = None + if args.local_ranks_filter: + try: + ranks = set(map(int, args.local_ranks_filter.split(","))) + assert ranks + except Exception as e: + raise Exception( + "--local_ranks_filter must be a comma-separated list of integers e.g. --local_ranks_filter=0,1,2" + ) from e + + logs_specs_cls: Type[LogsSpecs] = _get_logs_specs_class(args.logs_specs) + logs_specs = logs_specs_cls( + log_dir=args.log_dir, + redirects=Std.from_str(args.redirects), + tee=Std.from_str(args.tee), + local_ranks_filter=ranks, + ) + + config = LaunchConfig( + min_nodes=min_nodes, + max_nodes=max_nodes, + nproc_per_node=nproc_per_node, + run_id=args.rdzv_id, + role=args.role, + rdzv_endpoint=rdzv_endpoint, + rdzv_backend=args.rdzv_backend, + rdzv_configs=rdzv_configs, + max_restarts=args.max_restarts, + monitor_interval=args.monitor_interval, + start_method=args.start_method, + log_line_prefix_template=log_line_prefix_template, + local_addr=args.local_addr, + logs_specs=logs_specs, + ) + + with_python = not args.no_python + cmd: Union[Callable, str] + cmd_args = [] + use_env = get_use_env(args) + if args.run_path: + cmd = run_script_path + cmd_args.append(args.training_script) + else: + if with_python: + cmd = os.getenv("PYTHON_EXEC", sys.executable) + cmd_args.append("-u") + if args.module: + cmd_args.append("-m") + cmd_args.append(args.training_script) + else: + if args.module: + raise ValueError( + "Don't use both the '--no-python' flag" + " and the '--module' flag at the same time." + ) + cmd = args.training_script + if not use_env: + cmd_args.append(f"--local-rank={macros.local_rank}") + cmd_args.extend(args.training_script_args) + + return config, cmd, cmd_args + + +def run_script_path(training_script: str, *training_script_args: str): + """ + Run the provided `training_script` from within this interpreter. + + Usage: `script_as_function("/abs/path/to/script.py", "--arg1", "val1")` + """ + import runpy + import sys + + sys.argv = [training_script] + [*training_script_args] + runpy.run_path(sys.argv[0], run_name="__main__") + + +def run(args): + if args.standalone: + args.rdzv_backend = "c10d" + args.rdzv_endpoint = "localhost:0" + args.rdzv_id = str(uuid.uuid4()) + log.info( + "\n**************************************\n" + "Rendezvous info:\n" + "--rdzv-backend=%s " + "--rdzv-endpoint=%s " + "--rdzv-id=%s\n" + "**************************************\n", + args.rdzv_backend, args.rdzv_endpoint, args.rdzv_id + ) + + config, cmd, cmd_args = config_from_args(args) + elastic_launch( + config=config, + entrypoint=cmd, + )(*cmd_args) + + +@record +def main(args=None): + args = parse_args(args) + run(args) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/distributed/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f47908d96c747b9a209adda9d0b53180210f3abd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/utils.py @@ -0,0 +1,339 @@ +import dataclasses +import traceback +from typing import Any, Callable, Container, Dict, List, Optional, OrderedDict, Tuple, TypeVar, overload + +import torch +import torch.distributed as dist +from torch import nn +from torch.nn.parallel._functions import _get_stream +from torch.nn.parallel.scatter_gather import _is_namedtuple +from torch.nn.utils.rnn import PackedSequence + +__all__ = [] # type: ignore[var-annotated] + + +def _pack_kwargs(*args: Any, **kwargs: Any) -> Tuple[Tuple[Any, ...], Tuple[str, ...]]: + """ + Turn argument list into separate key list and value list (unpack_kwargs does the opposite). + + Inspiration: https://github.com/facebookresearch/fairscale/blob/eeb6684/fairscale/internal/containers.py#L70 + Usage:: + + kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) + assert kwarg_keys == ("a", "b") + assert flat_args == (1, 2, 3, 4) + args, kwargs = unpack_kwargs(kwarg_keys, flat_args) + assert args == (1, 2) + assert kwargs == {"a": 3, "b": 4} + Returns: + Tuple[Tuple[Any, ...], Tuple[str, ...]]: The first tuple element gives + gives both positional args and kwarg values, where the positional args + proceed kwarg values and kwarg values are ordered consistently with the + kwarg keys. The second tuple element gives the kwarg keys. + The second tuple element's length is at most the first tuple element's length. + """ + kwarg_keys: List[str] = [] + flat_args: List[Any] = list(args) + for k, v in kwargs.items(): + kwarg_keys.append(k) + flat_args.append(v) + + return tuple(flat_args), tuple(kwarg_keys) + +def _cast_forward_inputs( + dtype: Optional[torch.dtype], + *args: Any, + **kwargs: Any, +) -> Tuple[Any, Any]: + """ + Cast floating point tensors in ``args`` and ``kwargs`` to ``input_dtype``. + + This respects the existing ``requires_grad`` on the tensors. + """ + if dtype is None: + return args, kwargs + + def cast_fn(x: torch.Tensor) -> torch.Tensor: + if not torch.is_floating_point(x) or x.dtype == dtype: + return x + return x.to(dtype) + + return (_apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs)) + +def _unpack_kwargs(flat_args: Tuple[Any, ...], kwarg_keys: Tuple[str, ...]) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: + """See _pack_kwargs.""" + assert len(kwarg_keys) <= len( + flat_args + ), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}" + if len(kwarg_keys) == 0: + return flat_args, {} + args = flat_args[: -len(kwarg_keys)] + kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :])) + return args, kwargs + + +S = TypeVar("S", dict, list, tuple) +T = TypeVar("T", torch.Tensor, PackedSequence) + + +@overload +def _recursive_to(inputs: S, target_device: torch.device, use_side_stream_for_tensor_copies: bool) -> List[S]: + ... + + +@overload +def _recursive_to(inputs: T, target_device: torch.device, use_side_stream_for_tensor_copies: bool) -> Tuple[T]: + ... + + +def _recursive_to(inputs, target_device, use_side_stream_for_tensor_copies): + r"""Recursively moves input to the target_device.""" + + def to_map(obj): + if isinstance(obj, (torch.Tensor, PackedSequence)): + device = obj.data.device if isinstance(obj, PackedSequence) else obj.device + if device == target_device: + return (obj,) + if not use_side_stream_for_tensor_copies: + return (obj.to(target_device),) + else: + # If the custom module is not registered to torch, stream is not used for acceleration + device_mod = getattr(torch, device.type, None) + if device.type == "cpu" or device_mod is None: + return (obj.to(target_device),) + # Perform CPU -> target_device copies in a background stream. This code is + # motivated from similar logic in torch/nn/parallel/_functions.py + stream = _get_stream(target_device) + with device_mod.stream(stream): + output = obj.to(target_device) + # synchronize with the copy stream + with device_mod.device(target_device.index): + current_stream = device_mod.current_stream() + # Sync the current stream with the copy stream + current_stream.wait_stream(stream) + # Ensure tensor memory is not reused until work on + # main stream is complete + if isinstance(obj, PackedSequence): + output.data.record_stream(current_stream) # type: ignore[arg-type] + else: + assert isinstance(output, torch.Tensor) + output.record_stream(current_stream) # type: ignore[arg-type] + return (output,) + if _is_namedtuple(obj): + return [type(obj)(*args) for args in zip(*map(to_map, obj))] + if isinstance(obj, tuple) and len(obj) > 0: + return list(zip(*map(to_map, obj))) + if isinstance(obj, list) and len(obj) > 0: + return [list(i) for i in zip(*map(to_map, obj))] + if isinstance(obj, dict) and len(obj) > 0: + return [type(obj)(i) for i in zip(*map(to_map, obj.items()))] + return [obj] + + # Avoid reference cycle + try: + res = to_map(inputs) + finally: + to_map = None # type: ignore[assignment] + return res + + +def _p_assert(cond: Any, s: str, raise_assertion_error: bool = True) -> None: + """Alternate to ``assert`` when in the backward context to print the error message ``s`` since otherwise, it is swallowed.""" + if not cond: + print(s) + traceback.print_stack() + if raise_assertion_error: + raise AssertionError(s) + + +def _alloc_storage(tensor: torch.Tensor, size: torch.Size) -> None: + """ + Allocate storage for ``tensor`` with the given size. + + Returns: + bool: ``True`` if this method allocated storage and ``False`` if the + storage was already allocated. + """ + with torch.no_grad(): + if ( + not torch.distributed._functional_collectives.is_torchdynamo_compiling() + ): + already_allocated = tensor._typed_storage()._size() == size.numel() + if not already_allocated: + tensor_storage_size = tensor._typed_storage()._size() + _p_assert( + tensor_storage_size == 0, + "Tensor storage should have been resized to be 0 but got PLACEHOLDEr", + ) + tensor._typed_storage()._resize_(size.numel()) + + +def _free_storage(tensor: torch.Tensor): + """ + Frees the underlying storage of ``tensor``. + + Returns: + bool: ``True`` if the method freed the storage and ``False`` if the + storage was already freed. + """ + with torch.no_grad(): + if ( + not torch.distributed._functional_collectives.is_torchdynamo_compiling() + ): + already_freed = tensor._typed_storage()._size() == 0 + if not already_freed: + _p_assert( + tensor.storage_offset() == 0, + "Freeing a tensor's storage is unsafe when it is not the sole occupant\n" + f"storage offset: {tensor.storage_offset()}\n" + f"storage size: {tensor._typed_storage()._size()}\n" + f"tensor shape: {tensor.shape}", + ) + tensor._typed_storage()._resize_(0) + + + +Q = TypeVar("Q") +R = TypeVar("R", dict, list, tuple, set, OrderedDict, PackedSequence, Any) + + +@overload +def _apply_to_tensors(fn: Callable[[torch.Tensor], Q], container: torch.Tensor) -> Q: + ... + + +@overload +def _apply_to_tensors(fn: Callable[[torch.Tensor], Any], container: R) -> R: + ... + + +def _apply_to_tensors(fn, container): + """Recursively apply to all tensor in different kinds of container types.""" + + def apply(x): + if isinstance(x, torch.Tensor): + return fn(x) + elif hasattr(x, "__dataclass_fields__"): + dc = dataclasses.replace(x) + for f in dataclasses.fields(dc): + name = f.name + setattr(dc, name, apply(getattr(dc, name))) + return dc + elif isinstance(x, OrderedDict): + od = x.__class__() + for key, value in x.items(): + od[key] = apply(value) + return od + elif isinstance(x, PackedSequence): + apply(x.data) + return x + elif isinstance(x, dict): + return {key: apply(value) for key, value in x.items()} + elif _is_namedtuple(x): + res = (apply(el) for el in x) + return type(x)(*res) + elif isinstance(x, (list, tuple, set)): + return type(x)(apply(el) for el in x) + else: + return x + + return apply(container) + + +def _to_kwargs( + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + target_device: torch.device, + use_side_stream_for_tensor_copies: bool, +) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]: + moved_inputs = ( + _recursive_to(inputs, target_device, use_side_stream_for_tensor_copies) + if inputs + else [] + ) + moved_kwargs = ( + _recursive_to(kwargs, target_device, use_side_stream_for_tensor_copies) + if kwargs + else [] + ) + if len(moved_inputs) < len(moved_kwargs): + moved_inputs.extend([() for _ in range(len(moved_kwargs) - len(inputs))]) + elif len(moved_kwargs) < len(moved_inputs): + moved_kwargs.extend([{} for _ in range(len(moved_inputs) - len(moved_kwargs))]) + return tuple(moved_inputs), tuple(moved_kwargs) + + +def _verify_param_shape_across_processes( + process_group: dist.ProcessGroup, tensors: List[torch.Tensor], logger: Optional[dist.Logger] = None +): + return dist._verify_params_across_processes(process_group, tensors, logger) + + +def _sync_module_states( + module: nn.Module, + process_group: dist.ProcessGroup, + broadcast_bucket_size: int, + src: int, + params_and_buffers_to_ignore: Container[str], + broadcast_buffers: bool = True, +) -> None: + """ + Sync ``module``'s parameters and buffers state. + + Syncs ``module``'s parameters and buffers state so that all ranks contain + the same module state across all ranks. Note that this API assumes that all + parameter shapes are consistent before running the synchronization. This can + be checked with ``_verify_param_shape_across_processes``. + """ + module_states: List[torch.Tensor] = [] + for name, param in module.named_parameters(): + if name not in params_and_buffers_to_ignore: + module_states.append(param.detach()) + + if broadcast_buffers: + for name, buffer in module.named_buffers(): + if name not in params_and_buffers_to_ignore: + module_states.append(buffer.detach()) + + _sync_params_and_buffers(process_group, module_states, broadcast_bucket_size, src) + + +def _sync_params_and_buffers( + process_group: dist.ProcessGroup, + module_states: List[torch.Tensor], + broadcast_bucket_size: int, + src: int, +) -> None: + """Synchronize ``module_states`` (list of tensors) across all processes by broadcasting them from rank 0.""" + if len(module_states) > 0: + dist._broadcast_coalesced( + process_group, module_states, broadcast_bucket_size, src + ) + + +def _replace_by_prefix( + state_dict: Dict[str, Any], + old_prefix: str, + new_prefix: str, +) -> None: + """ + Replace all keys that match a given old_prefix with a new_prefix (in-place). + + Usage:: + + state_dict = {"layer.xyz": torch.tensor(1)} + replace_by_prefix_(state_dict, "layer.", "module.layer.") + assert state_dict == {"module.layer.xyz": torch.tensor(1)} + """ + if old_prefix == new_prefix: + raise ValueError("old_prefix and new_prefix must be distinct") + for key in list(state_dict.keys()): + if not key.startswith(old_prefix): + continue + new_key = new_prefix + key[len(old_prefix) :] + state_dict[new_key] = state_dict[key] + del state_dict[key] + + +def _data_ptr_allocated(tensor: torch.Tensor) -> bool: + return tensor.untyped_storage().data_ptr() > 0