diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..d5449e125fdcf18429aca5dc413f849b4f569e12 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d741ec339177bb2b66495f4c6fdbde2b4024c35c1473dc5f957082788e4be21 +size 33555627 diff --git a/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..959c306dc70391f30f483c6a1bd7f146e79c0e36 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a575846fbdbc06e7dd8134227ac7cecea4ee564c09b352156d82fe22db7909cf +size 33555533 diff --git a/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2f974080d399700dcae3e814971051095fb22b6d --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75dc3bb752cb92d344aeed81ae1a24a0bcf4aca44cb8dab7dd8e951ed0a672c1 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/4.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cebfd2add4e46522a05f70ebfbe75edeaa2b0638 --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e497ea1e1441ff74a24f84984159fdbe127060190f52ed1efbe1773881c48d35 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e5e628b0522b58ea4fb12554ff3556c40b7a2c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py @@ -0,0 +1,342 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Optional, Sequence + +# Import all builtin dist tensor ops +import torch +import torch.distributed._tensor.ops +import torch.distributed._tensor.random as random +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.api import distribute_module, distribute_tensor, DTensor +from torch.distributed._tensor.ops.utils import normalize_to_torch_size +from torch.distributed._tensor.placement_types import Placement, Replicate, Shard +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh + +# All public APIs from dtensor package +__all__ = [ + "DTensor", + "DeviceMesh", + "distribute_tensor", + "distribute_module", + "init_device_mesh,", + "Shard", + "Replicate", +] + + +def _dtensor_init_helper( + init_op, + size: torch.Size, + device_mesh=None, + placements=None, + **kwargs, +) -> DTensor: + # if device_mesh is None, use the one from mesh resources + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + kwargs["device"] = device_mesh.device_type + + # set default placements to replicated if not specified + placements = placements or tuple(Replicate() for _ in range(device_mesh.ndim)) + + # check device_mesh againts placements + assert device_mesh.ndim == len( + placements + ), "mesh dimension does not match the length of placements" + + assert kwargs["layout"] == torch.strided, "layout value not supported!" + torch_stride = torch._prims_common.make_contiguous_strides_for(size) + + # get local tensor shape + local_shape = compute_local_shape(size, device_mesh, placements) + # initialize the local tensor + if init_op == torch.full: + fill_value = kwargs.pop("fill_value", 0) + local_tensor = init_op(local_shape, fill_value, **kwargs) + elif init_op == torch.rand or init_op == torch.randn: + # this tensor meta is not used except `shape` + dtype = kwargs.get("dtype", torch.get_default_dtype()) + + from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + + tensor_meta = TensorMeta(size, (0,), dtype) + spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta) + + if random.is_rng_supported_mesh(device_mesh) and not random._rng_tracker: + random._rng_tracker = random.OffsetBasedRNGTracker() + + assert random._rng_tracker is not None + with random._rng_tracker._distribute_region(spec): + local_tensor = init_op(local_shape, **kwargs) + else: + local_tensor = init_op(local_shape, **kwargs) + + return DTensor( + local_tensor=local_tensor, + device_mesh=device_mesh, + placements=tuple(placements), + shape=size, + dtype=local_tensor.dtype, + stride=torch_stride, + requires_grad=kwargs["requires_grad"], + ) + + +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with the scalar value 1, with the shape defined + by the variable argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.ones, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def empty( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with uninitialized data. The shape of the :class:`DTensor` + is defined by the variable argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: empty(1,2,3..) or empty([1,2,3..]) or empty((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).\ + layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.empty, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def full( + size, + fill_value, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with ``fill_value``. The scalar value type should match + ``device_mesh.device_type``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + fill_value(Scalar): the value to fill the output tensor with. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.full, + torch_size, + fill_value=fill_value, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def rand( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with random numbers from a uniform distribution + on the interval ``[0, 1)``. The shape of the tensor is defined by the variable + argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.rand, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def randn( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with random numbers from a normal distribution + with mean 0 and variance 1. The shape of the tensor is defined by the variable + argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.randn, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def zeros( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with the scalar value 0. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..)) + Keyword args: + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`. + Default: ``torch.strided``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.zeros, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..752ab65ffbbf238c074c41a0ed51fa2c196ae153 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a093d53f7713df84be32cb0c65418195f1c2358 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e97ae2e301162c703c74de82fb3594abc8dc3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b28a6c9537bcb768ff50b7246a9de78914ee6f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a48f7bd189f0622de2ce44d718eef8d6ac158a99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80ee183a8058361af87d6b054b5a205957f2dbf2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba489a63a245f5288b759e81a7f9ccf20ad0b6f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..732895920dd1865aed9aa3b549f1f60d09c48eb7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae1a56ee1c23629bdfe38bf94dc6b5d3c4202a26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2df0f00acc321bd7afa2607cd8a162796eab8fe8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd38647bcd350d17645e77a5fdecb3b76a6f8c1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/tp_conv.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/tp_conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ec3edbe48e448c3466b553d3365e5d28b108d37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/tp_conv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf8376bd232811dd8d135884c9369dfc6d1ac89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py @@ -0,0 +1,313 @@ +import logging +import math +from dataclasses import dataclass +from functools import lru_cache + +from typing import List, Optional + +import torch +import torch.distributed._tensor.placement_types as placement_types +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh +from torch.distributed.distributed_c10d import ( + all_to_all, + broadcast, + get_global_rank, + get_rank, + get_world_size, + GroupMember, + ProcessGroup, + scatter, + Work, +) + +logger = logging.getLogger(__name__) + + +# TODO: we need to migrate these APIs to be functional collectives + + +def mesh_scatter( + output: torch.Tensor, + scatter_list: List[torch.Tensor], + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + """ + scatter a list of tensors to a device mesh dimension. We by default + use the first rank of the mesh dimension as the source of truth, i.e + for a 2d mesh [[0, 1], [2, 3]], if we scatter on mesh_dim = 1, we will + scatter the tensor list on rank 0 to rank 0/1, and tensor list on rank + 2 to rank 2/3. + + Args: + output (torch.Tensor): the tensor to receive the scattered list. + scatter_list (List[torch.Tensor]): the tensor list to be scattered. + mesh_dim (int, optional): indicate which mesh dimension we want + to scatter on, we by default choose the first rank on the + mesh dimension as source of truth. + + Returns: + A :class:`Work` object + """ + # TODO: Ideally we should use the meta tensor way + # (to register a meta kernel for the collective op) + # so that it would avoid the communication. Need to + # remove the check below once that is done. + if output.is_meta: + return None + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + # src need to be global rank + src_for_dim = 0 + + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, 0) + + if src_for_dim == get_rank(): + fut = scatter( + output, + scatter_list=scatter_list, + src=src_for_dim, + group=dim_group, + async_op=async_op, + ) + else: + fut = scatter( + output, + scatter_list=None, + src=src_for_dim, + group=dim_group, + async_op=async_op, + ) + + return fut + + +def mesh_broadcast( + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + """ + broadcast the tensor to a device mesh dimension. We by default + use the first rank of the mesh dimension as the source of truth, i.e + for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will + broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2 + to rank 2/3. + + Args: + tensor (torch.Tensor): tensor to broadcast. + mesh_dim (int, optional): indicate which mesh dimension we want + to scatter on, we by default choose the first rank on the + mesh dimension as source of truth. + + Returns: + A :class:`Work` object + """ + # TODO: Ideally we should use the meta tensor way + # (to register a meta kernel for the collective op) + # so that it would avoid the communication. Need to + # remove the check below once that is done. + if tensor.is_meta: + return None + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + # src need to be global rank + src_for_dim = 0 + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, 0) + + return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op) + + +# TODO: test uneven split on GLOO and NCCL +def mesh_all_to_all( + output_tensor_list: List[torch.Tensor], + input_tensor_list: List[torch.Tensor], + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + + work = None + # no direct dist.all_to_all support on 'gloo' so we manually do scatters + if mesh.device_type == "cpu": + logger.warning( + "ProcessGroupGloo does not support all_to_all, falling back with scatters!" + ) + # TODO: pull the handle of uneven case in #492 + dim_group_size = get_world_size(dim_group) + for i in range(dim_group_size): + # src need to be global rank + src_for_dim = i + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, i) + + work = scatter( + output_tensor_list[i], + input_tensor_list if mesh.get_rank() == src_for_dim else [], + group=dim_group, + src=src_for_dim, + async_op=async_op, + ) + else: + work = all_to_all( + output_tensor_list, + input_tensor_list, + dim_group, + async_op=async_op, + ) + return work + + +def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int: + assert spec.tensor_meta is not None, "spec should have tensor meta defined!" + return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape) + + +@dataclass +class MeshTopoInfo: + """ + Mesh information for collective cost estimation + """ + + mesh: DeviceMesh + mesh_dim_devices: List[int] + mesh_dim_bandwidth: List[float] + mesh_dim_latency: List[float] + + @staticmethod + @lru_cache(None) + def build_from_mesh(mesh: DeviceMesh) -> "MeshTopoInfo": + # Generate mesh topology info for intra-host/inter-host communication pattern + # Note that we made bunch of assumptions for simplicity: + # 1. we assume the mesh is homogeneous, and it's gpu/nccl model + # 2. we assume gpu arch is Ampere or Hopper + # 3. we assume collectives are all ring base algo for now + num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type) + # the base bw number (intra-node), GB/s + base_bw = 87.7 + mesh_dim_bandwidth = [base_bw] * mesh.ndim + # the latency in terms of us (intra-node, nv-link) + mesh_dim_latency = [0.6] * mesh.ndim + mesh_dim_devices = [1] * mesh.ndim + + total_num_devices = 1 + for mesh_dim in reversed(range(mesh.ndim)): + num_devices = mesh.size(mesh_dim) + mesh_dim_devices[mesh_dim] = num_devices + total_num_devices *= num_devices + if total_num_devices > num_devices_per_host: + # magic number for inter-host communication bandwidth/latency factor + # This number assumes latest GPU arch, i.e. Ampere or Hopper + # TODO: see if we need to tweak this or offer a way for user + # to specify the bandwidths/latency + mesh_dim_bandwidth[mesh_dim] *= 0.22 + # set to ethernet latency for inter-host + mesh_dim_latency[mesh_dim] = 2.7 + + return MeshTopoInfo( + mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency + ) + + +def allgather_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + num_hops = num_devices_on_mesh_dim - 1 + # base latency + comm latency + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] # us + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth # s + return latency + bw * 1e6 # rescale to us + + +def allreduce_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + # allreduce have almost 2x comm bytes compare to allgather/reduce_scatter + num_hops = 2 * num_devices_on_mesh_dim - 1 + + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth + return latency + bw * 1e6 + + +def reduce_scatter_cost( + bytes_gb: float, + mesh_topo: MeshTopoInfo, + mesh_dim: int, +) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + num_hops = num_devices_on_mesh_dim - 1 + # base latency + comm latency + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth + return latency + bw * 1e6 + + +def redistribute_cost( + current_spec: "placement_types.DTensorSpec", + target_spec: "placement_types.DTensorSpec", +) -> float: + """ + This function returns the cost of redistribute from current to target DTensorSpec. + + NOTE: + 1. Only consider communication cost here, since computation costs for redistribute + are quite trival (i.e. we only need to narrow or simple division) + 2. Only consider redistribute cost on same mesh, cross mesh communication cost is + not quite needed for operator strategy estimation/selection. + """ + if current_spec.mesh != target_spec.mesh: + # make infinite cost if meshes are not same + # TODO: see if we want to support this once there's cross mesh communication + return float("inf") + + if current_spec.is_replicated(): + # short-cut: + # comm cost is 0 if current spec is already full replication + return 0.0 + + mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh) + cost = 0.0 + comm_bytes_gb = ( + spec_to_bytes(current_spec) / current_spec.num_shards / 1024 / 1024 / 1024 + ) + # Transformation that considered for redistribute cost: + # 1. allgather 2. alltoall + # 3. allreduce 4. reduce_scatter + for i, (current, target) in enumerate( + zip(current_spec.placements, target_spec.placements) + ): + if current == target: + continue + + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[i] + if current.is_shard() and target.is_replicate(): + # allgather gives larger comm bytes + comm_bytes_gb *= num_devices_on_mesh_dim + # add up allgather comm cost + cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + elif current.is_shard() and target.is_shard(): + # should be alltoall comm, since we haven't implement it yet, add penalty + # to favor allgather instead + cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + 1.0 + elif current.is_partial() and target.is_replicate(): + # add up allreduce comm cost + cost += allreduce_cost(comm_bytes_gb, mesh_topo, i) + elif current.is_partial() and target.is_shard(): + # add up reduce_scatter comm cost + cost += reduce_scatter_cost(comm_bytes_gb, mesh_topo, i) + # after reduce_scatter the comm bytes for further collectives halved. + comm_bytes_gb /= num_devices_on_mesh_dim + elif current.is_shard() and target.is_partial(): + # ban shard -> partial as it does not make sense to perform + # this redistribute + return float("inf") + + return cost diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d256824d7ac47a4fc2e557457acea678fe4ce0e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/_utils.py @@ -0,0 +1,204 @@ +from typing import cast, List, Sequence, Tuple + +import torch +import torch.distributed._tensor.api as dtensor +from torch._prims_common import ShapeType +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +# TODO: audit existing code base to see if we can safely remove this API. +def compute_local_shape( + global_shape: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement] +) -> Tuple[int, ...]: + """ + Compute the shape of a local shard of the given DTensor on its current + coordinate of the mesh. + """ + my_coordinate = mesh.get_coordinate() + + if my_coordinate is None: + # if rank not in the mesh, return empty shape + return (0,) + else: + local_shape = list(global_shape) # start with global shape + ndim = len(global_shape) + for idx, placement in enumerate(placements): + mesh_dim_size = mesh.size(idx) + if isinstance(placement, Shard): + shard_dim = placement.dim + assert ( + shard_dim < ndim + ), f"Sharding dim {shard_dim} greater than tensor ndim {ndim}" + local_shard_size, _ = placement._local_shard_size_on_dim( + local_shape[shard_dim], mesh_dim_size, my_coordinate[idx] + ) + assert isinstance(local_shard_size, int) + local_shape[shard_dim] = local_shard_size + + return tuple(local_shape) + + +def compute_local_shape_and_global_offset( + global_shape: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement] +) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: + """ + Compute the local tensor shape and the global offsets into the original tensor + of a DTensor on its current global rank. This is useful for checkpointing purpose. + + Example (2 host with 4GPUs each): + # Below is a DeviceMesh with mesh_shape of (2, 4) + mesh = DeviceMesh(device_type="cuda", + mesh=[ + [0, 1, 2, 3], + [4, 5, 6, 7] + ], + ) + + Let's say we distribute a global_tensor of shape (8,4) over the above DeviceMesh + with a placements of [Shard(0), Shard(0)]. + The local shape and global offset will be as follows: + rank0 -- local_shape:[1, 4], global_offset:[0, 0] + rank1 -- local_shape:[1, 4], global_offset:[1, 0] + rank2 -- local_shape:[1, 4], global_offset:[2, 0] + rank5 -- local_shape:[1, 4], global_offset:[5, 0] + rank3 -- local_shape:[1, 4], global_offset:[3, 0] + rank4 -- local_shape:[1, 4], global_offset:[4, 0] + rank6 -- local_shape:[1, 4], global_offset:[6, 0] + rank7 -- local_shape:[1, 4], global_offset:[7, 0] + + Let's say we distribute a global_tensor of shape (2) over the above DeviceMesh with + a placements of [Shard(0)]. We will not have non-empty local tensor for all the ranks. + The local shape and global offset will be as follows: + rank0 -- local_shape:[1,], global_offset:[0,] + rank1 -- local_shape:[1,], global_offset:[1,] + rank2 -- local_shape:[0,], global_offset:[2,] + rank5 -- local_shape:[0,], global_offset:[2,] + rank3 -- local_shape:[0,], global_offset:[2,] + rank4 -- local_shape:[0,], global_offset:[2,] + rank6 -- local_shape:[0,], global_offset:[2,] + rank7 -- local_shape:[0,], global_offset:[2,] + """ + my_coordinate = mesh.get_coordinate() + + if my_coordinate is None: + # if rank not in the mesh, return empty offset + return ((), ()) + else: + local_shape = list(global_shape) + global_offset = [0] * len(global_shape) + + for idx, placement in enumerate(placements): + mesh_dim_size = mesh.size(idx) + if isinstance(placement, Shard): + shard_dim = placement.dim + local_offset = [0] * len(global_shape) + assert shard_dim < len( + local_shape + ), f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)}" + shard_size, shard_offset = placement._local_shard_size_on_dim( + local_shape[shard_dim], + mesh_dim_size, + my_coordinate[idx], + return_offset=True, + ) + + local_shape[shard_dim] = shard_size + local_offset[shard_dim] = shard_offset + + # On a given dimension, if the local_offset[shard_dim] is smaller than global_offset[shard_dim], + # it means that this dimension has been already sharded in previous placement. + # Therefore, we cannot simply replace the global_offset[shard_dim] with local_offset[shard_dim]. + # Instead, for the given shard_dim, we need to add local_offset[shard_dim] to existing global_offset[shard_dim]. + if global_offset[shard_dim] <= local_offset[shard_dim]: + global_offset[shard_dim] = local_offset[shard_dim] + else: + global_offset[shard_dim] += local_offset[shard_dim] + + return tuple(local_shape), tuple(global_offset) + + +def compute_global_tensor_info( + tensor: torch.Tensor, mesh: DeviceMesh, placements: Sequence[Placement] +) -> Tuple[List[int], List[int]]: + """ + Compute the global size and stride of a DTensor from the given local tensor. + The local size is multiplited by `world_size` per Sharding dim. + The local stride is multiplited by `world_size` per Sharding dim, as long as the + dimension is outside sharding dim. + + For example, if we have a local tensor with size (4, 8, 2) and stride (16, 1, 8). + If the DTensor placements are [Shard(2)] and world_size is 2; + then the global size is (4, 8, 4) and stride is (16 * 2, 1, 8). + + Args: + tensor (:class:`torch.Tensor`): + Local tensor which DTensor will be constructed from. + mesh (:class:`DeviceMesh`): + Object which describes the mesh topology + of devices for the DTensor. + placements (Sequence[:class:`Placement`]]): + The attribute of the DTensor that describes its layout + on the mesh topology. + + Return: + tensor_shape: A List of int which specifies the size of DTensor which build + on top of the local tensor. + tensor_stride: A List of int which specifies the stride of DTensor. + """ + tensor_shape = list(tensor.size()) + tensor_stride = list(tensor.stride()) + for idx, placement in enumerate(placements): + mesh_dim_size = mesh.size(idx) + if placement.is_shard(): + shard_placement = cast(Shard, placement) + if shard_placement.dim < 0: + raise AssertionError( + "Shard placements should have negative dims normalized in " + f"the user-facing APIs: {shard_placement}" + ) + shard_dim = shard_placement.dim + + assert ( + shard_dim < tensor.ndim + ), f"Sharding dim {shard_dim} greater than tensor ndim {tensor.ndim} for placement number {idx}." + + local_dim_size = tensor_shape[shard_dim] + tensor_shape[shard_dim] = local_dim_size * mesh_dim_size + + # recover tensor stride by modifying the stride that larger than + # the current stride on the shard_dim + for i in range(len(tensor_stride)): + if i != shard_dim and tensor_stride[i] >= tensor_stride[shard_dim]: + # rescale the stride by the shard size + tensor_stride[i] = tensor_stride[i] * mesh_dim_size + elif not isinstance(placement, (Replicate, _Partial)): + raise RuntimeError(f"placement type {type(placement)} not supported!") + return tensor_shape, tensor_stride + + +def try_find_mesh_from_args( + op_call: torch._ops.OpOverload, args: Sequence[object] +) -> DeviceMesh: + """ + Find the device mesh object from args. + It returns None if no mesh is found. + NOTE: we can optimize this search if needed + """ + for arg in args: + if isinstance(arg, (dtensor.DTensor, DTensorSpec)): + return arg.device_mesh + elif ( + isinstance(arg, (list, tuple)) + and len(arg) > 0 + and isinstance(arg[0], (dtensor.DTensor, DTensorSpec)) + ): + return arg[0].device_mesh + + raise ValueError(f"Cannot find device mesh from args for op : {op_call}.") diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/api.py new file mode 100644 index 0000000000000000000000000000000000000000..acafdb1ccb2deaa544dc9448e5f52c12aa69c683 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/api.py @@ -0,0 +1,760 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import inspect +import warnings +from typing import Any, Callable, cast, Optional, Sequence, Tuple + +import torch + +import torch.distributed._tensor.dispatch as op_dispatch +import torch.distributed._tensor.random as random +import torch.nn as nn +from torch.distributed._tensor._collective_utils import mesh_broadcast +from torch.distributed._tensor._utils import compute_global_tensor_info +from torch.distributed._tensor.placement_types import ( + DTensorSpec, + Placement, + Replicate, + Shard, + TensorMeta, +) +from torch.distributed._tensor.random import ( + is_rng_supported_mesh, + OffsetBasedRNGTracker, +) +from torch.distributed._tensor.redistribute import ( + Redistribute, + redistribute_local_tensor, +) +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh + + +__all__ = ["DTensor", "distribute_tensor", "distribute_module"] + +aten = torch.ops.aten + + +# NOTE [Autograd interaction between torch.Tensor] +# +# The autograd functions defined below are being used by the public +# facing APIs (i.e. from_local, to_local) to ensure our DTensor +# works together with torch.Tensor within autograd engine. This +# allows DistributedTensor to exist on part of the module hierarchy +# and still able to calculate gradients across the torch.Tensor and +# DistributedTensor boundary. +# As an example, we have the a module that consists of submodules +# A, B, and C, the execution flow would be like: +# input(torch.Tensor) -> Module A -> Module B -> Module C -> output (torch.Tensor) +# +# Suppose I only want to make Module B be a sharded module with +# DistributedTensor params, we would need to make the following +# flow to work: +# +# input(torch.Tensor) -> Module A +# -> DTensor input -> Sharded Module B -> DTensor output +# -> output (torch.Tensor) -> Module C -> output (torch.Tensor) +# +# We need the conversion from Module A to DTensor input, which is +# `from_local`, and conversion from DTensor output to output, which +# is `to_local`, thus these two functions must be Autograd functions. +# +class _ToTorchTensor(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, + input: "DTensor", + grad_placements: Optional[Sequence[Placement]], + ): + ctx.dtensor_spec = input._spec + ctx.grad_placements = grad_placements + local_tensor = input._local_tensor + + # We need to return a fresh Tensor object there as autograd metadata + # will be inplaced into it. So we don't want to pollute the Tensor + # object stored in the _local_tensor of this DTensor. + return local_tensor.view_as(local_tensor) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): # type: ignore[override] + dtensor_spec = ctx.dtensor_spec + mesh = dtensor_spec.mesh + grad_placements = ctx.grad_placements + dtensor_meta = dtensor_spec.tensor_meta + + _, tensor_stride = compute_global_tensor_info( + grad_output, mesh, dtensor_spec.placements + ) + tensor_stride = tuple(tensor_stride) + grad_placements = grad_placements or dtensor_spec.placements + + return ( + DTensor( + grad_output, + mesh, + grad_placements, + shape=dtensor_meta.shape, + dtype=dtensor_meta.dtype, + requires_grad=grad_output.requires_grad, + stride=tensor_stride, + ), + None, + ) + + +class _FromTorchTensor(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, # pyre-ignore[2]: Parameter must be annotated. + input: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + run_check: bool, + shape: Optional[torch.Size] = None, + stride: Optional[Tuple[int, ...]] = None, + ) -> "DTensor": + ctx.previous_placement = placements + ctx.previous_device_mesh = device_mesh + + if shape and stride: + tensor_shape, tensor_stride = shape, stride + elif not shape and not stride: + # if it's not by default run_check, we assume user is certain that each + # rank has the same tensor shape, and we just use that to calculate the + # global shape + global_shape, global_stride = compute_global_tensor_info( + input, device_mesh, placements + ) + tensor_shape, tensor_stride = torch.Size(global_shape), tuple(global_stride) + else: + raise RuntimeError( + f"Found shape:{shape}, stride:{stride}.", + "Please pass both shape and stride at the same time.", + ) + + if device_mesh.get_coordinate() is None: + # if the global rank is not participating in the device mesh, we + # simply set the local tensor to an empty tensor + input = input.new_empty(0, requires_grad=input.requires_grad) + elif run_check: + # TODO: by default check tensor metas across rank + # TODO: See if we need to make this run_check logic + # have a corresponding backward. + for idx, placement in enumerate(placements): + if placement.is_replicate(): + # broadcast rank 0 tensor to all ranks + # only broadcast if run_check is True + input = input.contiguous() + mesh_broadcast(input, device_mesh, mesh_dim=idx) + + # We want a fresh Tensor object that shares memory with the input tensor + dist_tensor = DTensor( + input.view_as(input), + device_mesh, + placements, + shape=tensor_shape, + dtype=input.dtype, + # requires_grad of the dist tensor depends on if input + # requires_grad or not + requires_grad=input.requires_grad, + stride=tensor_stride, + ) + return dist_tensor + + @staticmethod + def backward(ctx, grad_output: "DTensor"): # type: ignore[override] + previous_placement = ctx.previous_placement + previous_device_mesh = ctx.previous_device_mesh + + # reshard to the placement when creating DistributedTensor + # so that the gradient layout matches, and we could return + # local gradients directly + if grad_output.placements != previous_placement: + current_spec = grad_output._spec + target_spec = DTensorSpec( + previous_device_mesh, + previous_placement, + tensor_meta=grad_output._spec.tensor_meta, + ) + local_tensor = grad_output._local_tensor + output = redistribute_local_tensor( + local_tensor, current_spec, target_spec, is_backward=True + ) + # TODO: return the redistributed local tensor directly without + # differentiable backward. see if this make sense for all cases. + return output, None, None, None, None, None + + # TODO: backward is also differentiable now, add a test + # to test higher level gradients. + return grad_output.to_local(), None, None, None, None, None + + +class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__ + _local_tensor: torch.Tensor + _spec: DTensorSpec + __slots__ = ["_local_tensor", "_spec"] + + # class attribute that handles operator placements propagation + # rules, keyed by aten op name, value is propagation func + _op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher() + + @staticmethod + def __new__( + cls, + local_tensor: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + *, + shape: torch.Size, + dtype: torch.dtype, + requires_grad: bool, + stride: Tuple[int, ...], + ) -> "DTensor": + """ + Construct a DTensor from a local tensor, device mesh, and placement and + other tensor properties (i.e. shape, requires_grad, strides, etc). + Note: This is not a public API and it's only supposed to be used by the + operator implementations and internals. If you want to construct a + DTensor from a local tensor, consider using `DTensor.from_local`, if + you want to construct a DTensor from a "global" tensor (where you + already have tensor initialized and want to shard this tensor), + consider using `distribute_tensor`. + """ + if local_tensor.requires_grad and not requires_grad: + warnings.warn( + "To construct DTensor from torch.Tensor, it's recommended to " + "use local_tensor.detach() and make requires_grad consistent." + ) + + # new method instruct wrapper tensor from local_tensor and add + # placement spec, it does not do actual distribution + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + shape, + strides=stride, + dtype=dtype, + device=local_tensor.device, + layout=local_tensor.layout, + requires_grad=requires_grad, + ) + + tensor_meta = TensorMeta(shape, stride, dtype) + # deepcopy and set spec + r._spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta) + r._local_tensor = local_tensor + return r + + # pyre-fixme[14]: `__repr__` overrides method defined in `DTensor` inconsistently. + # pyre-fixme[3]: Return type must be annotated. + def __repr__(self): + # TODO: consider all_gather the local tensors for better debugging + return f"DTensor(local_tensor={self._local_tensor}, device_mesh={self._spec.mesh}, placements={self._spec.placements})" + + def __tensor_flatten__(self): + """ + protocol to inform how to flatten a DTensor to local tensor + for PT2 tracing + """ + return ["_local_tensor"], (self._spec, self.requires_grad) + + @staticmethod + def __tensor_unflatten__(inner_tensors, flatten_spec, outer_size, outer_stride): + assert ( + flatten_spec is not None + ), "Expecting spec to be not None from `__tensor_flatten__` return value!" + local_tensor = inner_tensors["_local_tensor"] + spec, requires_grad = flatten_spec + return DTensor( + local_tensor, + spec.mesh, + spec.placements, + shape=outer_size, + dtype=spec.tensor_meta.dtype, + requires_grad=requires_grad, + stride=outer_stride, + ) + + @classmethod + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + return DTensor._op_dispatcher.dispatch( + func, + args, + kwargs or {}, + ) + + @staticmethod + def from_local( + local_tensor: torch.Tensor, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, + *, + run_check: bool = True, + shape: Optional[torch.Size] = None, + stride: Optional[Tuple[int, ...]] = None, + ) -> "DTensor": + """ + Create a :class:`DTensor` from a local torch.Tensor on each rank + according to the `device_mesh` and `placements` specified. + + Args: + local_tensor (torch.Tensor): local torch.Tensor on each rank. + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the + tensor, if not specified, must be called under a DeviceMesh + context manager, default: None + placements (List[:class:`Placement`], optional): the placements that + describes how to place the local torch.Tensor on DeviceMesh, must + have the same number of elements as `device_mesh.ndim`. If not + specified, we will by default replicate the tensor across the + `device_mesh` from the first rank of each dimension of the `device_mesh`. + + Keyword args: + run_check (bool, optional): indicate whether to run check across ranks + to check meta information and data. if have :class:`Replicate` in + `placements`, the data on first rank of the device mesh dimension + will be broadcasted to other ranks. + shape (torch.Size, optional): A List of int which specifies the size of + DTensor which build on top of `local_tensor`. Note this needs to be + provided if the shape of `local_tensor` are different across the ranks. + If not provided, `shape` will be computed assuming the given distributed + tensor is evenly sharded across ranks. + stride (tuple, optional): A List of int which specifies the stride of DTensor. + If not provided, `stride` will be computed assuming the given distributed + tensor is evenly sharded across ranks. + + Returns: + A :class:`DTensor` object + + .. note:: `from_local` is differentiable, the `requires_grad` of the created + `DTensor` object will depend on if `local_tensor` requires_grad or not. + """ + # if same shape/dtype, no need to run_check, if not, must allgather + # the metadatas to check the size/dtype across ranks + # There should be no data communication unless there's replication + # strategy, where we broadcast the replication from the first rank + # in the mesh dimension + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + + # convert the local tensor to desired device base on device mesh's device_type + if device_type != local_tensor.device.type and not local_tensor.is_meta: + local_tensor = local_tensor.to(device_type) + + # set default placements to replicated if not specified + if placements is None: + placements = [Replicate() for _ in range(device_mesh.ndim)] + else: + placements = list(placements) + for idx, placement in enumerate(placements): + # normalize shard dim to be positive + if placement.is_shard(): + placement = cast(Shard, placement) + if placement.dim < 0: + placements[idx] = Shard(placement.dim + local_tensor.ndim) + + # `from_local` is differentiable, and the gradient of the dist tensor this function + # created should flow back the gradients to the local_tensor, so we call an autograd + # function to construct the dist tensor instead. + return _FromTorchTensor.apply( # pyre-ignore[16]: autograd func + local_tensor, + device_mesh, + tuple(placements), + run_check, + shape, + stride, + ) + + def to_local( + self, *, grad_placements: Optional[Sequence[Placement]] = None + ) -> torch.Tensor: + """ + Get the local tensor of this DTensor on its current rank. For sharding it returns + a local shard of the logical tensor view, for replication it returns the replica on + its current rank. + + Keyword args: + grad_placements (List[:class:`Placement`], optional): the placements describes + the future layout of any gradient layout of the Tensor returned from this + function. + `to_local` converts DTensor to local tensor and the returned local tensor + might not be used as the original DTensor layout later in the code. This + argument is the hint that user can give to autograd in case the gradient + layout of the returned tensor does not match the original DTensor layout. + If not specified, we will assume the gradient layout remains the same + as the original DTensor and use that for gradient computation. + + Returns: + A :class:`torch.Tensor` or `AsyncCollectiveTensor` object. it represents the + local tensor on its current rank. + + .. note:: `to_local` is differentiable, the `requires_grad` of the local tensor returned + will depend on if the `DTensor` requires_grad or not. + """ + if grad_placements is not None and not isinstance(grad_placements, tuple): + grad_placements = tuple(grad_placements) + return _ToTorchTensor.apply( + self, grad_placements + ) # pyre-ignore[16]: autograd func + + def redistribute( + self, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, + *, + async_op: bool = False, + ) -> "DTensor": + """ + `redistribute` performs necessary collective operations that redistribute the current + DTensor from its current placements to a new placements, or from is current DeviceMesh + to a new DeviceMesh. i.e. we can turn a Sharded DTensor to a Replicated DTensor by + specifying a Replicate placement for each dimension of the DeviceMesh. + + Args: + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the + DTensor, if not specified, must be called under a DeviceMesh + context manager, default: None + placements (List[:class:`Placement`], optional): the new placements that + describes how to place the DTensor into the DeviceMesh, must + have the same number of elements as `device_mesh.ndim`. + + Keyword args: + async_op (bool, optional): whether to perform the DTensor redistribute operation + asynchronously or not. Default: False + + Returns: + A :class:`DTensor` object + + .. note:: `redistribute` is differentiable. + """ + # NOTE: This redistribute API currently only supports out + # of place redistribution, i.e. it always create a new + # DTensor object and leave the original one unchanged. + + # if device_mesh is not specified, use the current device_mesh + device_mesh = device_mesh or self.device_mesh + # raise error if new placements not specified + if placements is None: + raise RuntimeError("placements is needed for redistribute!") + + placements = list(placements) + for i, placement in enumerate(placements): + if placement.is_partial(): + raise RuntimeError( + "Can not redistribute to _Partial, _Partial is for internal use only!" + ) + elif isinstance(placement, Shard) and placement.dim < 0: + # normalize shard dim to be positive + placements[i] = Shard(placement.dim + self.ndim) + placements = tuple(placements) + + # Early return the original DTensor if the placements are the same. + if self._spec.placements == placements: + return self + + # pyre-fixme[16]: `Redistribute` has no attribute `apply`. + return Redistribute.apply(self, device_mesh, placements, async_op) + + def full_tensor( + self, *, grad_placements: Optional[Sequence[Placement]] = None + ) -> torch.Tensor: + """ + Return the full tensor of this DTensor. It will perform necessary collectives + to gather the local tensors from other ranks in its DeviceMesh and concatenate + them together. It's a syntatic sugar of the following code: + + `dtensor.redistribute(placements=[Replicate()] * mesh.ndim).to_local()` + + Keyword args: + grad_placements (List[:class:`Placement`], optional): the placements describes + the future layout of any gradient layout of the full Tensor returned from this + function. + `full_tensor` converts DTensor to a full torch.Tensor and the returned torch.tensor + might not be used as the original replicated DTensor layout later in the code. This + argument is the hint that user can give to autograd in case the gradient + layout of the returned tensor does not match the original replicated DTensor layout. + If not specified, we will assume the gradient layout of the full tensor be replicated. + + Returns: + A :class:`torch.Tensor` object that represents the full tensor of this DTensor. + + .. note:: `full_tensor` is differentiable. + """ + + redist_res = self.redistribute( + placements=[Replicate()] * self.device_mesh.ndim, async_op=False + ) + return _ToTorchTensor.apply(redist_res, grad_placements) + + @property + def device_mesh(self) -> DeviceMesh: + """ + The :class:`DeviceMesh` attribute that associates with this DTensor object. + + .. note:: device_mesh is a read-only property, it can not be set. + """ + return self._spec.mesh + + @property + def placements(self) -> Sequence[Placement]: + """ + The placements attribute of this DTensor that describes the layout of this + DTensor on the its DeviceMesh. + + .. note:: placements is a read-only property, it can not be set. + """ + return self._spec.placements + + +def distribute_tensor( + tensor: torch.Tensor, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Distribute a torch.Tensor to the `device_mesh` according to the `placements` + specified. The rank of `device_mesh` and `placements` must be the same. + + Args: + tensor (torch.Tensor): torch.Tensor to be distributed. Note that if you + want to shard a tensor on a dimension that is not evenly divisible by + the number of devices in that mesh dimension, we use `torch.chunk` + semantic to shard the tensor and scatter the shards. + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to distribute the + tensor, if not specified, must be called under a DeviceMesh context + manager, default: None + placements (List[:class:`Placement`], optional): the placements that + describes how to place the tensor on DeviceMesh, must have the same + number of elements as `device_mesh.ndim`. If not specified, we will + by default replicate the tensor across the `device_mesh` from the + first rank of each dimension of the `device_mesh`. + + Returns: + A :class:`DTensor` or `XLAShardedTensor` object. + + Note: + When initialize the DeviceMesh with the `xla` device_type, `distribute_tensor` + return `XLAShardedTensor` instead. see [link](https://github.com/pytorch/pytorch/issues/92909) + for more details. The XLA integration is experimental and subject to change. + """ + + torch._C._log_api_usage_once("torch.dtensor.distribute_tensor") + + # get default device mesh if there's nothing specified + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + if device_type == "xla": + try: + # call PyTorch/XLA SPMD for `xla` backend type device mesh. + # This returns XLAShardedTensor + from torch_xla.distributed.spmd import ( # type:ignore[import] + xla_distribute_tensor, + ) + + return xla_distribute_tensor( + tensor, device_mesh, placements + ) # type:ignore[return-value] + except ImportError as e: + msg = "To use DTensor API with xla, you must install the torch_xla package!" + raise ImportError(msg) from e + + # instantiate a RNG tracker if haven't. By default DTensor uses an + # OffsetBasedRNGTracker to perform random operators. + # TODO: the value assignment to global variable is not the ideal solution + # we can replace it in future. + if is_rng_supported_mesh(device_mesh) and not random._rng_tracker: + random._rng_tracker = OffsetBasedRNGTracker(device_type) + + if not tensor.is_leaf: + raise RuntimeError( + "`distribute_tensor` should be used to distribute leaf tensors! but found non-leaf tensor!" + ) + + # convert tensor to the corresponding device type if it's not in that device type + if device_type != tensor.device.type and not tensor.is_meta: + tensor = tensor.to(device_type) + + # set default placements to replicated if not specified + if placements is None: + placements = [Replicate() for _ in range(device_mesh.ndim)] + + if len(placements) != device_mesh.ndim: + raise ValueError( + f"`placements` must have the same length as `device_mesh.ndim`! " + f"Found placements length: {len(placements)}, and device_mesh.ndim: {device_mesh.ndim}." + ) + if isinstance(tensor, DTensor): + # if the tensor is already a DTensor, we just need to check if the + # device mesh and placements are the same + if tensor.device_mesh != device_mesh: + raise ValueError( + f"Cannot distribute a DTensor with device mesh {tensor.device_mesh} " + f"to a different device mesh {device_mesh}." + ) + if tensor.placements != tuple(placements): + raise ValueError( + f"Cannot distribute a DTensor with placements {tensor.placements} " + f"to a different placements {placements}. do you want to call " + f"`redistribute` instead?" + ) + return tensor + + local_tensor = tensor + + # distribute the tensor according to the placements. + placements = list(placements) + for idx, placement in enumerate(placements): + if placement.is_shard(): + placement = cast(Shard, placement) + if placement.dim < 0: + # normalize shard placement dim + placement = Shard(placement.dim + tensor.ndim) + placements[idx] = placement + local_tensor = placement._shard_tensor(local_tensor, device_mesh, idx) + elif placement.is_replicate(): + placement = cast(Replicate, placement) + local_tensor = placement._replicate_tensor(local_tensor, device_mesh, idx) + else: + raise RuntimeError( + f"Trying to distribute tensor with unsupported placements {placement} on device mesh dimension {idx}!" + ) + placements = tuple(placements) + + assert local_tensor is not None, "distributing a tensor should not be None" + # detach the local tensor passed to DTensor since after the construction + # of DTensor, autograd would work on top of DTensor instead of local tensor + return DTensor( + local_tensor.detach().requires_grad_(tensor.requires_grad), + device_mesh, + placements, + shape=tensor.size(), + dtype=tensor.dtype, + requires_grad=tensor.requires_grad, + stride=tensor.stride(), + ) + + +def distribute_module( + module: nn.Module, + device_mesh: Optional[DeviceMesh] = None, + partition_fn: Optional[Callable[[str, nn.Module, DeviceMesh], None]] = None, + input_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None, + output_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None, +) -> nn.Module: + """ + This function converts all module parameters to :class:`DTensor` parameters + according to the `partition_fn` specified. It could also control the input or + output of the module by specifying the `input_fn` and `output_fn`. (i.e. convert + the input to :class:`DTensor`, convert the output back to torch.Tensor) + Args: + module (:class:`nn.Module`): user module to be partitioned. + device_mesh (:class:`DeviceMesh`): the device mesh to place the module. + partition_fn (Callable): the function to partition parameters (i.e. shard certain + parameters across the `device_mesh`). If `partition_fn` is not specified, + by default we replicate all module parameters of `module` across the mesh. + input_fn (Callable): specify the input distribution, i.e. could control how the + input of the module is sharded. `input_fn` will be installed as a module + `forward_pre_hook` (pre forward hook). + output_fn (Callable): specify the output distribution, i.e. could control how the + output is sharded, or convert it back to torch.Tensor. output_fn will be + installed as a module `forward_hook` (post forward hook). + + Returns: + A module that contains parameters/buffers that are all `DTensor`s. + + Note: + When initialize the DeviceMesh with the `xla` device_type, `distribute_module` + return nn.Module with PyTorch/XLA SPMD annotated parameters. See [link](https://github.com/pytorch/pytorch/issues/92909) + for more details. The XLA integration is experimental and subject to change. + """ + + torch._C._log_api_usage_once("torch.dtensor.distribute_module") + + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + if device_type == "xla": + try: + # This function annotates all module parameters for auto-partitioning with + # PyTorch/XLA SPMD or explicitly partition to :class:`XLAShardedTensor` parameters + # according to the `partition_fn` specified. + from torch_xla.distributed.spmd import ( # type:ignore[import] + xla_distribute_module, + ) + + return xla_distribute_module( + module, device_mesh, partition_fn, input_fn, output_fn + ) # type:ignore[return-value] + except ImportError as e: + msg = "To use DTensor API with xla, you must install the torch_xla package!" + raise ImportError(msg) from e + + def replicate_module_params_buffers(m: nn.Module, mesh: DeviceMesh) -> None: + # This function loop over the immediate module parameters and + # buffers, replicate all non DTensor params/buffers to DTensor + # parameters/buffers, if they have not been partitioned in the + # partition_fn, we can't easily use `module._apply` here + # because we don't know what happened inside partition_fn as + # user could do anything, i.e. install hooks, and we want to + # preserve those. + full_replicate = [Replicate()] * mesh.ndim + for key, param in m._parameters.items(): + if param is not None and not isinstance(param, DTensor): + m.register_parameter( + key, + nn.Parameter(distribute_tensor(param.data, mesh, full_replicate)), + ) + for key, buffer in m._buffers.items(): + if buffer is not None and not isinstance(buffer, DTensor): + m._buffers[key] = distribute_tensor(buffer, mesh, full_replicate) + + if partition_fn is None: + # if partition_fn not specified, we by default replicate + # all module params/buffers + for name, submod in module.named_modules(): + replicate_module_params_buffers(submod, device_mesh) + else: + # apply partition_fun to submodules + for name, submod in module.named_modules(): + partition_fn(name, submod, device_mesh) + replicate_module_params_buffers(submod, device_mesh) + + # register input_fn as module forward pre hook + if input_fn is not None: + # check the input_fn signature + num_args = len(inspect.signature(input_fn).parameters) + if num_args == 2: + # input_fn only takes in inputs and device mesh + warnings.warn( + "Deprecating input_fn that takes two arguments (inputs, device_mesh), " + "please use input_fn that takes in (module, inputs, device_mesh) instead!", + ) + module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg] + elif num_args == 3: + # input_fn takes in module, inputs, device mesh + module.register_forward_pre_hook( + lambda mod, inputs: input_fn(mod, inputs, device_mesh) + ) + else: + raise ValueError( + f"input_fn should take in 3 arguments, but got {num_args} arguments!" + ) + # register output_fn as module forward hook + if output_fn is not None: + num_args = len(inspect.signature(output_fn).parameters) + if num_args == 2: + # output_fn only takes in outputs and device mesh + warnings.warn( + "Deprecating output_fn that takes two arguments (inputs, device_mesh), " + "please use output_fn that takes in (module, inputs, device_mesh) instead!", + ) + module.register_forward_hook( + lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg] + ) + elif num_args == 3: + module.register_forward_hook( + lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh) + ) + else: + raise ValueError( + f"output_fn should take in 3 arguments, but got {num_args} arguments!" + ) + + return module diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2cd388cf93e4136306aa1cef488ddfe2aa1c4e45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/__init__.py @@ -0,0 +1,14 @@ +from torch.distributed._tensor.api import DTensor + +from torch.distributed._tensor.debug.comm_mode import CommDebugMode + + +def get_sharding_prop_cache_info(): + """ + Get the cache info for the sharding propagation cache, used for debugging purpose only. + This would return a named tuple showing hits, misses, maxsize and cursize of the sharding + propagator cache. + """ + return ( + DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding.cache_info() # type:ignore[attr-defined] + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/comm_mode.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/comm_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..43def0b9d64ae19d875c82e0335f2871d3f0cd17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/comm_mode.py @@ -0,0 +1,91 @@ +from collections import defaultdict +from typing import Any, Dict + +import torch +from torch.distributed._tensor.api import DTensor +from torch.utils._python_dispatch import TorchDispatchMode + + +funcol_native = torch.ops._c10d_functional +funcol_py = torch.ops.c10d_functional + +NATIVE_TO_PY_MAPPING = { + funcol_native.all_gather_into_tensor: funcol_py.all_gather_into_tensor, + funcol_native.all_gather_into_tensor_coalesced: funcol_py.all_gather_into_tensor_coalesced, + funcol_native.all_reduce: funcol_py.all_reduce, + funcol_native.all_to_all_single: funcol_py.all_to_all_single, + funcol_native.broadcast: funcol_py.broadcast, + funcol_native.reduce_scatter_tensor: funcol_py.reduce_scatter_tensor, + funcol_native.reduce_scatter_tensor_coalesced: funcol_py.reduce_scatter_tensor_coalesced, +} + + +class CommDebugMode(TorchDispatchMode): + """ + ``CommDebugMode`` is a context manager that counts the number of + functional collectives within its context. It does this using a + ``TorchDispatchMode``. + + NOTE: this mode only works for functional collective atm and the + distributed_c10d collectives are not supported yet. + + Example usage + + .. code-block:: python + + mod = ... + comm_mode = CommDebugMode() + with comm_mode: + mod.sum().backward() + + """ + + def __init__(self): + self.comm_counts: Dict[Any, int] = defaultdict(int) + self.comm_registry = set() + for native_op, py_op in NATIVE_TO_PY_MAPPING.items(): + self.comm_registry.add(native_op) + self.comm_registry.add(py_op) + + def get_total_counts(self) -> int: + return sum(self.comm_counts.values()) + + def get_comm_counts(self) -> Dict[Any, int]: + """Returns the communication counts as a dictionary. + + Returns: + Dict[Any, int]: The communication counts as a dictionary. + """ + return self.comm_counts + + def __enter__(self): + self.comm_counts.clear() + super().__enter__() + return self + + def __exit__(self, *args): + super().__exit__(*args) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + # When running this mode with DTensor, ordinarily all modes will + # run **before** subclasses get a chance to run. + # Returning NotImplemented here gives us a chance to let DTensor + # run and desugar into comms ops, before CommDebugMode sees them. + if any(t == DTensor for t in types): + return NotImplemented + kwargs = kwargs if kwargs else {} + out = func(*args, **kwargs) + func_packet = func._overloadpacket + # We have many tests that use CommDebugMode to verify the occurrence of + # collectives. These tests do so by querying comm_counts with legacy + # funcol ops as key. For the purpose of native funcol migration, we + # need these tests to work for both legacy and native funcol. To avoid + # the need to modify all tests to accommodate the two implementations, + # we make CommDebugMode translate native funcol ops into legacy funcol + # ops until the migration finishes. + if func_packet in self.comm_registry: + if func_packet in NATIVE_TO_PY_MAPPING: + func_packet = NATIVE_TO_PY_MAPPING[func_packet] + self.comm_counts[func_packet] += 1 + + return out diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/op_coverage.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/op_coverage.py new file mode 100644 index 0000000000000000000000000000000000000000..ab97362a76d24700de472463d7b2ad5054e32070 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/op_coverage.py @@ -0,0 +1,105 @@ +from operator import itemgetter +from typing import List + +from functorch.compile import make_boxed_func + +import torch +import torch.fx +import torch.nn as nn +from torch._functorch.compilers import aot_module +from torch._inductor.decomposition import select_decomp_table +from torch.distributed._tensor import DTensor + + +inductor_decomps = select_decomp_table() + +graphs: List[torch.fx.GraphModule] = [] + + +def fwd_bwd_compiler(fx_g, _): + graphs.append(fx_g) + return make_boxed_func(fx_g) + + +def get_inductor_decomp_graphs(model: nn.Module, args, kwargs): + """ + Obtain forward and backward graphs of a model with inductor decompositions using tracing and aot_module. + + Convenient util to get the fwd and bwd graphs of an arbitrary model + with inductor decompositions. Note that this would simply do tracing + with aot_module and don't ensure correctness. This is useful to track + the ops needed in DTensor. + """ + compiled_mod = aot_module( + model, fw_compiler=fwd_bwd_compiler, decompositions=inductor_decomps + ) + output = compiled_mod(*args, **kwargs) + + if output.ndim != 0: + # if output is not a scalar tensor, by default sum it in order to + # run backward + output = output.sum() + + output.backward() + + # one fwd, one bwd graph + assert len(graphs) == 2 + return graphs + + +def print_op_coverage_summary(model: nn.Module, args, kwargs, *, output_csv=False): + """ + Util to print the operator coverage summary of a certain model with tabulute. + + Must have tabulate module installed. + """ + # python module required for summary + import csv + + from tabulate import tabulate + + fwd_graph, bwd_graph = get_inductor_decomp_graphs(model, args, kwargs) + + op_counts = {} + + for node in fwd_graph.graph.nodes: + if node.op == "call_function" and isinstance( + node.target, torch._ops.OpOverload + ): + if node.target not in op_counts: + op_counts[node.target] = 0 + + op_counts[node.target] += 1 + + for node in bwd_graph.graph.nodes: + if node.op == "call_function" and isinstance( + node.target, torch._ops.OpOverload + ): + if node.target not in op_counts: + op_counts[node.target] = 0 + + op_counts[node.target] += 1 + + op_infos = [] + + for op, count in op_counts.items(): + supported = op in DTensor._op_dispatcher.sharding_propagator.op_to_rules + op_infos.append([op, str(op._schema), count, supported]) + + # sort the op info base on the total count index + count_idx = 2 + op_infos.sort(key=itemgetter(count_idx), reverse=True) + + headers = ["Operator", "Schema", "Total Count", "Supported"] + print(tabulate(op_infos, headers=headers)) + + if output_csv: + # Open a CSV file for writing + with open("op_summary.csv", "w", newline="") as csv_file: + # Create a CSV writer object + csv_writer = csv.writer(csv_file) + + csv_writer.writerow(headers) + # Write each table row to the CSV file + for row in op_infos: + csv_writer.writerow(row) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/visualize_sharding.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/visualize_sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..91bc9c2a382c8944992c1c741d36eca92de1c8cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/debug/visualize_sharding.py @@ -0,0 +1,176 @@ +from typing import List, Sequence, Tuple + +import numpy as np + +from torch._prims_common import ShapeType +from torch.distributed._tensor import DeviceMesh + +from torch.distributed._tensor.placement_types import Placement, Shard + + +def _mesh_to_coordinate(mesh, device_type): + """ + Given a n-dimensional list of device mesh, this function creates a map of + device and its coordinate + """ + # Convert the n-dimensional list to a NumPy array + np_mesh = np.array(mesh.mesh.tolist()) + + # Create a dictionary to map each value to its coordinate + device_to_coordinate_map = {} + for coord, value in np.ndenumerate(np_mesh): + # device is unique in device_mesh + device_to_coordinate_map[f"{device_type}:{str(value)}"] = list(coord) + + return device_to_coordinate_map + + +def _convert_offset_to_ranges(all_offsets): + """ + Using tabulate package to create a table is easier when we specify row and col ranges + This function converts offsets to ranges. + """ + converted_blocks = [] + + for offset in all_offsets: + shape, offset, value = offset + + # Calculate row_range and column_range + row_range = (offset[0], offset[0] + shape[0] - 1) + column_range = (offset[1], offset[1] + shape[1] - 1) + + # Convert value to string to match your desired format + converted_block = { + "row_range": row_range, + "column_range": column_range, + "value": str(value), + } + converted_blocks.append(converted_block) + + return converted_blocks + + +def _create_table(blocks): + """ + Creates a tabulate table given row and column ranges with device name + """ + try: + from tabulate import tabulate + except ImportError as e: + raise ImportError("tabulate package is required to visualize sharding") from e + + # Extract unique row and column ranges + row_ranges = sorted({block["row_range"] for block in blocks}) + col_ranges = sorted({block["column_range"] for block in blocks}) + + # Create a matrix initialized with empty strings + matrix = [["" for _ in col_ranges] for _ in row_ranges] + + # Fill the matrix with values + for block in blocks: + row_index = row_ranges.index(block["row_range"]) + col_index = col_ranges.index(block["column_range"]) + if matrix[row_index][col_index] == "": + matrix[row_index][col_index] = block["value"] + else: + matrix[row_index][col_index] += ", " + block["value"] + + # Prepare headers + row_headers = [f"Row {r[0]}-{r[1]}" for r in row_ranges] + col_headers = [f"Col {c[0]}-{c[1]}" for c in col_ranges] + + return tabulate(matrix, headers=col_headers, showindex=row_headers) + + +def compute_local_shape_and_global_offset( + global_shape: ShapeType, + mesh: DeviceMesh, + placements: Sequence[Placement], + my_coordinate: List[int], +) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: + """ + Same as torch.distributed._tensor._utils.compute_local_shape_and_global_offset but + with custom my_coordinate input. This is the modified implementation for visualize_sharding. + """ + + if my_coordinate is None: + # if rank not in the mesh, return empty offset + return ((), ()) + else: + local_shape = list(global_shape) + global_offset = [0] * len(global_shape) + + for idx, placement in enumerate(placements): + mesh_dim_size = mesh.size(idx) + if isinstance(placement, Shard): + shard_dim = placement.dim + local_offset = [0] * len(global_shape) + assert shard_dim < len( + local_shape + ), f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)}" + shard_size, shard_offset = placement._local_shard_size_on_dim( + local_shape[shard_dim], + mesh_dim_size, + my_coordinate[idx], + return_offset=True, + ) + + local_shape[shard_dim] = shard_size + local_offset[shard_dim] = shard_offset + + # On a given dimension, if the local_offset[shard_dim] is smaller than global_offset[shard_dim], + # it means that this dimension has been already sharded in previous placement. + # Therefore, we cannot simply replace the global_offset[shard_dim] with local_offset[shard_dim]. + # Instead, for the given shard_dim, we need to add local_offset[shard_dim] to existing global_offset[shard_dim]. + if global_offset[shard_dim] <= local_offset[shard_dim]: + global_offset[shard_dim] = local_offset[shard_dim] + else: + global_offset[shard_dim] += local_offset[shard_dim] + + return tuple(local_shape), tuple(global_offset) + + +def visualize_sharding(dtensor, header=""): + """ + Visualizes sharding in 1D-2D dtensors + Requires tabulate, install with `pip install tabulate` + + note: no sharding info will be printed for empty tensors + """ + if dtensor.numel() == 0: # we do not print for empty dtensors + return + + if len(dtensor.shape) >= 3: + raise RuntimeError( + "visualize sharding is only implemented for 1D or 2D dtensor" + ) + placements = dtensor.placements + device_mesh = dtensor.device_mesh + device_type = dtensor.device_mesh.device_type + + if device_mesh.get_coordinate() is None: # current rank is not in the mesh + return + + # Only display the visualization once for each DTensor, on the rank whose + # coordinate is 0 on all dimensions. For example, if the mesh is a full mesh, + # we will only print on rank 0. + local_rank_zero_on_all_dim = all( + device_mesh.get_local_rank(mesh_dim=dim) == 0 for dim in range(device_mesh.ndim) + ) + if not local_rank_zero_on_all_dim: + return + + device_map = _mesh_to_coordinate(device_mesh, device_type) + all_offsets = [] + for device in device_map: + local_shape, global_offset = compute_local_shape_and_global_offset( + dtensor.shape, device_mesh, placements, device_map[device] + ) + all_offsets.append([local_shape, global_offset, device]) + + # Convert offsets to blocks with row_ranges for tabulate + blocks = _convert_offset_to_ranges(all_offsets) + + # Print the table + print(header) + print(_create_table(blocks)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/device_mesh.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/device_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..dc74f679b6e469d84265cff3fb79af78d3539ec9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/device_mesh.py @@ -0,0 +1,6 @@ +from torch.distributed.device_mesh import ( # noqa: F401 + _get_device_handle, + _mesh_resources, + DeviceMesh, + init_device_mesh, +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/dispatch.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..da433f9357010b36b5cc6dbea35da22436f42a40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/dispatch.py @@ -0,0 +1,393 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import functools +import operator +from typing import cast, Dict, List, Optional, Sequence, Tuple + +import torch + +import torch.distributed as dist +import torch.distributed._tensor.api as dtensor +import torch.distributed._tensor.random as random +from torch.distributed._tensor._utils import try_find_mesh_from_args +from torch.distributed._tensor.op_schema import ( + _is_inplace_op, + _is_out_variant_op, + OpInfo, + OpSchema, + OutputSpecType, +) +from torch.distributed._tensor.placement_types import DTensorSpec, Replicate, TensorMeta +from torch.distributed._tensor.random import is_rng_supported_mesh +from torch.distributed._tensor.redistribute import redistribute_local_tensor +from torch.distributed._tensor.sharding_prop import ShardingPropagator +from torch.distributed._tensor.tp_conv import ( + convolution_backward_handler, + convolution_handler, +) +from torch.distributed.device_mesh import DeviceMesh + +try: + from torch.utils import _cxx_pytree as pytree +except ImportError: + from torch.utils import _pytree as pytree # type: ignore[no-redef] + +aten = torch.ops.aten + + +def decompose_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + """ + Decomposes a op to core ATen op, this handler is mostly here + for inference mode usage where the ops are not core aten ops. + """ + r = op_call.decompose(*args, **kwargs) + if r is not NotImplemented: + return r + else: + raise RuntimeError("Decomposition failed") + + +def is_same_size_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> bool: + lhs = cast(torch.Tensor, args[0]) + rhs = cast(torch.Tensor, args[1]) + return lhs.shape == rhs.shape + + +class OpDispatcher: + """ + Op dispatching class instance to handle args/kwargs pre-processing (un-wrapping), sharding + propagation, redistribute local args, local compute, and post-processing (re-wrapping). It + also handles any op specific logic if necessary. + """ + + def __init__(self) -> None: + self.sharding_propagator = ShardingPropagator() + self._random_ops = { + aten.native_dropout.default, + aten.normal_.default, + aten.rand_like.default, + aten.randn_like.default, + aten.randint_like.default, + aten.randint_like.low_dtype, + aten.randint_like.low_dtype_out, + aten.uniform_.default, + aten.bernoulli.default, + aten.bernoulli_.float, + } + self._custom_op_handlers = { + aten.linear.default: decompose_handler, + aten.is_same_size.default: is_same_size_handler, + aten.convolution.default: convolution_handler, + aten.convolution_backward.default: convolution_backward_handler, + } + + # This flag is used internally to control whether we treat the torch.Tensor(non-DTensor) + # as implicitly replicated or we throw error to user. + # NOTE: It is EXTREMELY UNSAFE to turn this flag on by default so we intentionally leave + # it as False by default. + self._allow_implicit_replication = False + + def dispatch( + self, + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> object: + """ + Main dispatching logic + """ + # operators that does not need to go through sharding propagation + if op_call in self._custom_op_handlers: + return self._custom_op_handlers[op_call](op_call, args, kwargs) # type: ignore[operator] + + # extract local tensor and sharding infos to a OpInfo + op_info = self.unwrap_to_op_info(op_call, args, kwargs) + + self.sharding_propagator.propagate(op_info) + output_sharding = op_info.output_sharding + assert output_sharding is not None, "output sharding should not be None" + + mesh = op_info.mesh + if mesh.get_coordinate() is None: + # For a non-participating device, we do: + # 1. if the return type is scalar, set the local result to None. + # The local results from all devices will then be all-gathered + # and a reduce op will be performed on the list of results + # with appropriate operators: + # for bool type, we by default use AND to reduce; + # we can extend for more ops if necessary. + # 2. if the return type is Tensor or List[Tensor], return empty + # tensor(s) with correct dtype. + spec = output_sharding.output_spec + ret_list = op_info.schema.op._schema.returns + + if spec is None: + # For a scalar return type, the non-participating device has None + # as its local result + local_results: object = None + else: + + def default_tensor(spec: DTensorSpec) -> torch.Tensor: + if spec.tensor_meta is not None: + shape = spec.tensor_meta.shape + dtype = spec.tensor_meta.dtype + if len(shape) == 0: + # scalar tensor + return torch.zeros((), dtype=dtype) + else: + # non-scalar tensor + return torch.tensor([], dtype=dtype) + else: + raise RuntimeError(f"{spec} has no tensor metadata.") + + if isinstance(spec, DTensorSpec): + # return a Tensor value + local_results = default_tensor(spec) + elif isinstance(spec, Sequence): + # return a List[Tensor] value + local_results = [ + default_tensor(s) if s is not None else None for s in spec + ] + assert isinstance(local_results, List) + if None in local_results: + ret_type = str(ret_list[0].type) + raise NotImplementedError( + f"return type {ret_type} in DTensor op is not supported" + ) + else: + if output_sharding.needs_redistribute: + # compute locally with redistribute first if needed + assert output_sharding.schema_suggestions is not None + self.redistribute_local_args( + op_info, output_sharding.schema_suggestions[0] + ) + + local_tensor_args = ( + pytree.tree_unflatten( + cast(List[object], op_info.local_args), op_info.args_tree_spec + ) + if op_info.args_tree_spec + else op_info.local_args + ) + + # run local op computation with potentially modified args/kwargs + local_tensor_args = cast(Tuple[object, ...], local_tensor_args) + if op_call in self._random_ops and is_rng_supported_mesh(mesh): + if not random._rng_tracker: + # Default to `OffsetBasedRNGTracker` if the parallelism API + # did not already construct one + random._rng_tracker = random.OffsetBasedRNGTracker(mesh.device_type) + # For DTensor random operator, run it within a distribute region + with random._rng_tracker._distribute_region( + cast(dtensor.DTensor, args[0])._spec + ): + local_results = op_call(*local_tensor_args, **op_info.local_kwargs) + else: + local_results = op_call(*local_tensor_args, **op_info.local_kwargs) + + # communicate the result to all ranks for some operators that return scalar value + if output_sharding.output_spec is None: + if op_call == aten.equal.default: + obj_list = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(obj_list, local_results) # type: ignore[possibly-undefined] + obj_list = list(filter(lambda x: x is not None, obj_list)) + # perform reduce on the collection with AND op + local_results = functools.reduce(operator.and_, obj_list, True) + + if _is_inplace_op(op_call): + # inplace op should return self instead of re-wrapping + if output_sharding.output_spec is not None: + return args[0] + else: + return None + elif _is_out_variant_op(op_call): + # out variant could possibly have multiple out args (i.e. lu_unpack.out) + output_specs = ( + (output_sharding.output_spec,) + if not isinstance(output_sharding.output_spec, tuple) + else output_sharding.output_spec + ) + out_dts = [] + spec_idx = 0 + for argument in op_call._schema.arguments: + if argument.is_out: + out_dt = cast(dtensor.DTensor, kwargs[argument.name]) + out_dt._spec = cast(DTensorSpec, output_specs[spec_idx]) + out_dts.append(out_dt) + spec_idx += 1 + + assert len(out_dts) >= 1, "out variant should have at least one out arg" + return tuple(out_dts) if len(out_dts) > 1 else out_dts[0] + else: + return self.wrap(local_results, output_sharding.output_spec) # type: ignore[possibly-undefined] + + @staticmethod + def redistribute_local_args( + op_info: OpInfo, + suggested_input_schema: OpSchema, + ) -> None: + # NOTE: it's very rare that we need to reshard kwargs so we intentionally skip it + + # TODO: the op schema should probably just remain flattened so that we can avoid this tree flatten + # Need to fix all the ops before doing this. + if op_info.args_tree_spec is not None: + flatten_args_schema_to_reshard = tuple( + pytree.tree_leaves(suggested_input_schema.args_schema) + ) + else: + flatten_args_schema_to_reshard = suggested_input_schema.args_schema + + new_local_args: List[object] = [] + for i, arg_spec in enumerate(op_info.flat_args_schema): + reshard_arg_spec = flatten_args_schema_to_reshard[i] + if isinstance(arg_spec, DTensorSpec): + local_tensor = cast(torch.Tensor, op_info.local_args[i]) + if arg_spec != reshard_arg_spec: + resharded_local_tensor = redistribute_local_tensor( + local_tensor, arg_spec, reshard_arg_spec + ) + new_local_args.append(resharded_local_tensor) + else: + new_local_args.append(local_tensor) + else: + new_local_args.append(reshard_arg_spec) + + op_info.local_args = tuple(new_local_args) + + def unwrap_to_op_info( + self, + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> OpInfo: + # get runtime schema to determine whether to use pytree to flatten inputs + runtime_schema_info = self.sharding_propagator.op_to_schema_info.get( + op_call, None + ) + + if runtime_schema_info is not None and runtime_schema_info.needs_pytree: + # flatten args/kwargs when necessary + tree_args, args_spec = pytree.tree_flatten(args) + args_list: Sequence[object] = tree_args + else: + args_list, args_spec = args, None + + args_schema: List[object] = [] + kwargs_schema: Dict[str, object] = {} + local_args: List[object] = [] + local_kwargs: Dict[str, object] = {} + mesh: Optional[DeviceMesh] = None + + for arg in args_list: + if isinstance(arg, dtensor.DTensor): + args_schema.append(arg._spec) + local_args.append(arg._local_tensor) + if mesh is not None: + if mesh != arg.device_mesh: + raise NotImplementedError( + f"{op_call}: DTensor does not support cross-mesh operation yet!" + ) + else: + mesh = arg.device_mesh + elif isinstance(arg, torch.Tensor): + if arg.ndim == 0 or self._allow_implicit_replication: + mesh = mesh or try_find_mesh_from_args(op_call, args_list) + # scalar tensor can be safely treated as replicated + args_schema.append( + DTensorSpec( + mesh, + (Replicate(),) * mesh.ndim, + tensor_meta=TensorMeta( + shape=arg.shape, stride=arg.stride(), dtype=arg.dtype + ), + ) + ) + local_args.append(arg) + else: + raise RuntimeError( + f"{op_call}: got mixed torch.Tensor and DTensor, need to convert all" + " torch.Tensor to DTensor before calling distributed operators!" + ) + else: + args_schema.append(arg) + local_args.append(arg) + + for k, v in kwargs.items(): + if isinstance(v, dtensor.DTensor): + kwargs_schema[k] = v._spec + local_kwargs[k] = v._local_tensor + if mesh is not None: + if mesh != v.device_mesh: + raise NotImplementedError( + f"{op_call}: DTensor does not support cross-mesh operation yet!" + ) + else: + mesh = v.device_mesh + elif isinstance(v, torch.Tensor): + raise RuntimeError( + f"{op_call}: got mixed torch.Tensor and DTensor, need to convert all" + " torch.Tensor to DTensor before calling distributed operators!" + ) + else: + kwargs_schema[k] = v + local_kwargs[k] = v + + assert mesh is not None, f"found no DeviceMesh from dtensor args for {op_call}!" + op_info = OpInfo( + mesh, + OpSchema( + op_call, + pytree.tree_unflatten(args_schema, args_spec) + if args_spec + else tuple(args_schema), + kwargs_schema, + schema_info=runtime_schema_info, + ), + args_schema, + tuple(local_args), + local_kwargs, + args_spec, + ) + return op_info + + @staticmethod + def wrap(res: object, spec: OutputSpecType) -> object: + if isinstance(res, torch.Tensor): + if spec is not None: + assert isinstance( + spec, DTensorSpec + ), f"output spec does not match with output! Expected DTensorSpec, got {spec}." + assert spec.tensor_meta is not None + return dtensor.DTensor( + res, + spec.mesh, + spec.placements, + shape=spec.tensor_meta.shape, + dtype=spec.tensor_meta.dtype, + requires_grad=res.requires_grad, + stride=spec.tensor_meta.stride, + ) + else: + # if output does not have a DTensorSpec due to specific ops, it must be a scalar tensor + assert res.ndim == 0, "output tensor should be scalar!" + return res + elif isinstance(res, (list, tuple)): + assert spec is not None and isinstance( + spec, (list, tuple) + ), f"output spec does not match with output! Expected list/tuple, got {spec}." + res_list = [] + for e, s in zip(res, spec): + res_list.append(OpDispatcher.wrap(e, s)) + + return tuple(res_list) if isinstance(res, tuple) else res_list + else: + # if the res contains only non tensor values (i.e. int/float/none), we simply return it + # without rewrapping to DTensor. + return res diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a9bbe7ecc8fe4c8359a56e834048b0d5251324 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py @@ -0,0 +1,12 @@ +from contextlib import contextmanager + +from torch.distributed._tensor.api import DTensor + + +@contextmanager +def implicit_replication(): + try: + DTensor._op_dispatcher._allow_implicit_replication = True + yield + finally: + DTensor._op_dispatcher._allow_implicit_replication = False diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab48c8cea0981f58e161a672399f438d5d01fd47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bd131c2f873b737319745a5b5ea25c307998d29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..4b95061598b286a71ca4e30761aefd62c5059367 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py @@ -0,0 +1,547 @@ +import copy +import operator +from typing import Any, cast, Dict, List, Optional, Sequence, Tuple + +import torch +from torch._subclasses.fake_tensor import FakeTensor +from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor +from torch.distributed._tensor.op_schema import ( + DTensorSpec, + OpSchema, + OutputSharding, + OutputSpecType, + PlacementStrategy, +) +from torch.distributed._tensor.placement_types import ( + Placement, + Replicate, + Shard, + TensorMeta, +) +from torch.distributed._tensor.redistribute import redistribute_local_tensor +from torch.distributed.tensor.parallel.style import ColwiseParallel, ParallelStyle +from torch.export import ExportedProgram +from torch.export.exported_program import ExportGraphSignature +from torch.fx import GraphModule +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.node import Node +from torch.fx.passes.infra.pass_base import PassBase, PassResult +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.utils import _pytree as pytree + + +aten = torch.ops.aten + + +def tensor_parallel_transformation( + exported_program: ExportedProgram, + rank: int, + world_size: int, + device_type: str, + parallel_strategies: Dict[str, ParallelStyle], +) -> ExportedProgram: + """ + The entry point function to perform graph transformations on an exported program + to transform a single-device graph into a tensor parallel graph. + + .. warning:: + This API is experimental and subject to change. + """ + + gm = exported_program.graph_module + sig = copy.deepcopy(exported_program.graph_signature) + state_dict = copy.copy(exported_program.state_dict) + + with gm._set_replace_hook(sig.get_replace_hook()): + res = TensorParallelTransformPass( + rank, + world_size, + device_type, + state_dict, + exported_program.graph_signature, + parallel_strategies, + )(gm) + assert res is not None + gm = res.graph_module + + return exported_program._update(gm, sig, state_dict) + + +class TensorParallelTransformPass(PassBase): + """ + This pass is responsible for transforming a single-device graph into a tensor parallel + graph. It will mark the placement strategy of each node in the graph, + partition the graph into distributed graph, then shard the parameters/buffers accordingly. + """ + + def __init__( + self, + rank: int, + world_size: int, + device_type: str, + state_dict: Dict[str, torch.Tensor], + graph_signature: ExportGraphSignature, + parallel_strategies: Dict[str, ParallelStyle], + ) -> None: + super().__init__() + self.rank = rank + self.mesh = DeviceMesh(device_type, torch.arange(world_size)) + self.state_dict: Dict[str, torch.Tensor] = state_dict + self.graph_signature = graph_signature + self.parallel_strategies = parallel_strategies + + def call(self, graph_module) -> PassResult: + gm = copy.deepcopy(graph_module) + + parameter_placements = _generate_parameter_and_buffer_placements( + list(self.state_dict.keys()), self.parallel_strategies + ) + placement_strategies = _mark_sharding( + gm, self.graph_signature, self.mesh, parameter_placements + ) + _partitioner(gm) + _shard_state_dict( + self.state_dict, placement_strategies, self.graph_signature, self.mesh + ) + return PassResult(gm, True) + + +def _generate_parameter_and_buffer_placements( + params_and_buffers: List[str], + parallel_strategies: Dict[str, ParallelStyle], +) -> Dict[str, Placement]: + """ + Build parameter placements based on the give parallel style of linear layers. + """ + parameter_placements: Dict[str, Placement] = {} + for linear_fqn, parallel_style in parallel_strategies.items(): + weight_fqn = f"{linear_fqn}.weight" + bias_fqn = f"{linear_fqn}.bias" + assert weight_fqn in params_and_buffers + parameter_placements[weight_fqn] = ( + Shard(0) if parallel_style == ColwiseParallel else Shard(1) + ) + if bias_fqn in params_and_buffers: + parameter_placements[bias_fqn] = ( + Shard(0) if parallel_style == ColwiseParallel else Replicate() + ) + return parameter_placements + + +def _mark_tensor_parallel_shardings( + gm: GraphModule, + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, + parameter_placements: Dict[str, Placement], +) -> Dict[Node, PlacementStrategy]: + """ + Mark the placement strategies of the parameter and buffer placeholder nodes. + """ + placement_strategies: Dict[Node, PlacementStrategy] = {} + num_params_and_buffers = len(graph_signature.inputs_to_parameters) + len( + graph_signature.inputs_to_buffers + ) + placeholder_idx: int = 0 + for node in gm.graph.nodes: + if node.op == "placeholder": + if placeholder_idx < num_params_and_buffers: + fqn: str = _get_input_node_fqn(node.name, graph_signature) + placement: Placement = ( + parameter_placements[fqn] + if fqn in parameter_placements + else Replicate() + ) + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=(placement,), + ) + placeholder_idx += 1 + else: + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=(Replicate(),), + ) + return placement_strategies + + +def _get_input_node_fqn(input_name: str, graph_signature: ExportGraphSignature) -> str: + """ + Return the FQN of an input node. + """ + if input_name in graph_signature.inputs_to_parameters: + return graph_signature.inputs_to_parameters[input_name] + elif input_name in graph_signature.inputs_to_buffers: + return graph_signature.inputs_to_buffers[input_name] + else: + raise ValueError( + f"{input_name} not found in inputs_to_parameters or inputs_to_buffers" + ) + + +def _mark_sharding( + gm: GraphModule, + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, + parameter_placements: Dict[str, Placement], +) -> Dict[Node, PlacementStrategy]: + """ + Mark the sharding strategy for each node in the graph module. + """ + placement_strategies: Dict[ + Node, PlacementStrategy + ] = _mark_tensor_parallel_shardings(gm, graph_signature, mesh, parameter_placements) + + for node in gm.graph.nodes: + if node.op == "placeholder": + if node not in placement_strategies: + placement_strategies[node] = _create_placement_strategy( + node, mesh, placements=(Replicate(),) + ) + node.meta["sharding"] = placement_strategies[node] + elif node.op == "call_function": + if node.target == operator.getitem: + input_nodes = node.all_input_nodes + assert ( + len(input_nodes) == 1 + ), f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}" + arg_strategy = placement_strategies[input_nodes[0]] + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=arg_strategy.output_spec.placements, + input_specs=_get_input_node_specs(node, placement_strategies), + ) + node.meta["sharding"] = placement_strategies[node] + else: + op_schema = _get_op_schema(node, placement_strategies) + + # get DTensor specs for inputs and outputs + if ( + op_schema.op + not in DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs + and op_schema.op + not in DTensor._op_dispatcher.sharding_propagator.op_to_rules + ): + # Mark all as replicated + output_sharding = _generate_default_output_sharding( + node, + mesh, + op_schema, + ) + else: + output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding( + op_schema, + ) + placement_strategies[node] = PlacementStrategy( + output_specs=_get_output_spec_from_output_sharding(output_sharding), + input_specs=output_sharding.schema_suggestions[0].args_spec + if output_sharding.schema_suggestions is not None + else _get_input_node_specs(node, placement_strategies), + ) + node.meta["sharding"] = placement_strategies[node] + elif node.op == "output": + node.meta["sharding"] = None + else: + raise RuntimeError(f"op code {node.op} not supported") + return placement_strategies + + +def _get_output_spec_from_output_sharding( + output_sharding: OutputSharding, +) -> DTensorSpec: + """ + Util function to extract output spec from output sharding. + """ + if isinstance(output_sharding.output_spec, DTensorSpec): + return output_sharding.output_spec + else: + # For ops that return multiple outputs, the outputs should have the same output spec + assert isinstance(output_sharding.output_spec, Sequence) + assert output_sharding.output_spec[0] is not None + output_sharding.output_spec[0].tensor_meta = None + return output_sharding.output_spec[0] + + +def _create_placement_strategy( + node: Node, + mesh: DeviceMesh, + placements: Tuple[Placement, ...], + input_specs: Optional[Sequence[DTensorSpec]] = None, +) -> PlacementStrategy: + """ + Util function to construct a placement strategy for a given node. + """ + placement = PlacementStrategy( + input_specs=input_specs, + output_specs=DTensorSpec( + mesh=mesh, + placements=placements, + ), + ) + _populate_tensor_meta(node, placement.output_specs) + return placement + + +def _populate_tensor_meta(node: Node, output_spec: OutputSpecType) -> None: + """ + Util function to populate tensor meta of output_spec based on node metadata. + """ + if isinstance(node.meta["val"], Sequence): + assert isinstance(output_spec, Sequence) + for spec, fake_tensor in zip(output_spec, node.meta["val"]): + assert spec is not None + spec.tensor_meta = TensorMeta( + shape=fake_tensor.shape, + stride=fake_tensor.stride(), + dtype=fake_tensor.dtype, + ) + else: + assert isinstance(output_spec, DTensorSpec) + output_spec.tensor_meta = TensorMeta( + shape=node.meta["val"].shape, + stride=node.meta["val"].stride(), + dtype=node.meta["val"].dtype, + ) + + +def _generate_default_output_sharding( + node: Node, + mesh: DeviceMesh, + op_schema: OpSchema, +) -> OutputSharding: + """ + Util function to create a default output sharding that suggests Replicate placement for both args and outputs. + """ + + def update_arg_spec(arg_spec: DTensorSpec) -> DTensorSpec: + return DTensorSpec( + mesh=arg_spec.mesh, + placements=(Replicate(),), + tensor_meta=arg_spec.tensor_meta, + ) + + new_op_schema = OpSchema( + op=op_schema.op, + args_schema=pytree.tree_map_only( + DTensorSpec, update_arg_spec, op_schema.args_schema + ), + kwargs_schema=op_schema.kwargs_schema, + ) + + def create_output_spec(tensor: FakeTensor) -> DTensorSpec: + return DTensorSpec( + mesh=mesh, + placements=(Replicate(),), + tensor_meta=TensorMeta( + shape=tensor.shape, + stride=tensor.stride(), + dtype=tensor.dtype, + ), + ) + + return OutputSharding( + output_spec=pytree.tree_map_only( + FakeTensor, create_output_spec, node.meta["val"] + ), + schema_suggestions=[new_op_schema], + failed_reason=f"{node.op} does not have sharding strategy registered", + needs_redistribute=True, + ) + + +def _partitioner(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + Graph partitioner that partitions the single device graph + to distributed graph + """ + for node in gm.graph.nodes: + node_sharding = node.meta["sharding"] + if node.op == "placeholder": + out_spec = node_sharding.output_spec + local_val = _partition_val(node.meta["val"], out_spec) + # update node value + node.meta["val"] = local_val + elif node.op == "call_function": + out_spec = node_sharding.output_spec + # check if there's misaligned sharding, insert reshard if there is + expected_input_specs = node_sharding.input_specs + for idx, input_arg in enumerate(node.all_input_nodes): + input_arg_sharding = input_arg.meta["sharding"] + input_arg_spec = input_arg_sharding.output_spec + desired_spec = ( + out_spec + if expected_input_specs is None + else expected_input_specs[idx] + ) + if input_arg_spec != desired_spec: + _insert_reshard_gm( + gm, node, input_arg, input_arg_spec, desired_spec + ) + # convert output val to its local component + output_val = node.meta["val"] + node.meta["val"] = _partition_val(output_val, out_spec) + elif node.op == "output": + for input_arg in node.all_input_nodes: + # input args of output should be Replicate, otherwise redistribution is needed. + input_args_to_check: Sequence[Node] = ( + input_arg if isinstance(input_arg, Sequence) else [input_arg] + ) + for arg in input_args_to_check: + arg_sharding = arg.meta["sharding"] + arg_spec = arg_sharding.output_spec + desired_spec = copy.copy(arg_spec) + desired_spec.placements = (Replicate(),) + if arg_spec != desired_spec: + _insert_reshard_gm(gm, node, arg, arg_spec, desired_spec) + else: + raise RuntimeError(f"op code {node} not supported") + + _clean_up_graph_metadata(gm) + gm.graph.lint() + gm.recompile() + return gm + + +def _partition_val(val: Any, spec: DTensorSpec) -> Any: + """ + util function to convert a full tensor val to its local component + """ + if isinstance(val, torch.Tensor): + local_shard = val + if val.ndim == 0: + # If it's already a scalar tensor, it is already local, we don't + # need to do anything + return local_shard + + for idx, placement in enumerate(spec.placements): + if placement.is_shard(): + placement = cast(Shard, placement) + num_chunks = spec.mesh.size(mesh_dim=idx) + my_coord = spec.mesh.get_coordinate() + assert my_coord is not None, "current rank not in mesh!" + my_coord_on_mesh_dim = my_coord[idx] + local_shard = placement._split_tensor( + local_shard, num_chunks, with_padding=False, contiguous=True + )[0][my_coord_on_mesh_dim] + return local_shard + elif isinstance(val, (list, tuple)): + return val.__class__(_partition_val(v, spec) for v in val) + else: + raise RuntimeError(f"val type {type(val)} not supported") + + +def _insert_reshard_gm( + gm: torch.fx.GraphModule, + node: Node, + input_arg: Node, + input_arg_spec: DTensorSpec, + desired_spec: DTensorSpec, +) -> None: + """ + Transform the graph for tensor redistribution. + """ + input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"] + desired_spec.tensor_meta = input_arg.meta["tensor_meta"] + input_arg_tensor = input_arg.meta["val"] + + # insert reshard operation + def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor: + return redistribute_local_tensor( + local_tensor, + input_arg_spec, + desired_spec, + ) + + reshard_gm = make_fx(reshard_fn)(input_arg_tensor) + reshard_gm_nodes = list(reshard_gm.graph.nodes) + input_node = reshard_gm_nodes[0] + with gm.graph.inserting_before(node): + output_node = gm.graph.graph_copy( + reshard_gm.graph, + val_map={ + input_node: input_arg, + }, + ) + node.replace_input_with(input_arg, output_node) + + +def _clean_up_graph_metadata(gm: torch.fx.GraphModule) -> None: + """ + Clean up the graph by removing sharding and partitioning related metadata + """ + for node in gm.graph.nodes: + if "sharding" in node.meta: + del node.meta["sharding"] + if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor): + local_tensor_meta = _extract_tensor_metadata(node.meta["val"]) + node.meta["tensor_meta"] = local_tensor_meta + + +def _get_input_node_specs( + node: Node, placement_strategies: Dict[Node, PlacementStrategy] +) -> Tuple[DTensorSpec, ...]: + """ + Get the input specs of a node. + """ + input_specs_list: List[DTensorSpec] = [] + for input_arg in node.all_input_nodes: + if input_arg in placement_strategies: + output_spec = placement_strategies[input_arg].output_specs + assert isinstance(output_spec, DTensorSpec) + input_specs_list.append(output_spec) + else: + raise ValueError(f"{input_arg} does not have output_spec populated.") + return tuple(input_specs_list) + + +def _get_op_schema( + node: Node, placement_strategies: Dict[Node, PlacementStrategy] +) -> OpSchema: + """ + Util function to construct the operator schema of a node. + """ + args_schema_list = pytree.tree_map_only( + Node, lambda arg: placement_strategies[arg].output_specs, node.args + ) + op_schema = OpSchema( + op=cast(torch._ops.OpOverload, node.target), + args_schema=tuple(args_schema_list), + kwargs_schema=cast(Dict[str, object], node.kwargs), + ) + return op_schema + + +def _shard_state_dict( + state_dict: Dict[str, torch.Tensor], + placement_strategies: Dict[Node, PlacementStrategy], + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, +) -> None: + """ + Inplace partition the weights based on the placement strategy + """ + for node, placement_strategy in placement_strategies.items(): + if node.op != "placeholder": + continue + if node.name in graph_signature.inputs_to_parameters: + fqn = graph_signature.inputs_to_parameters[node.name] + elif node.name in graph_signature.inputs_to_buffers: + fqn = graph_signature.inputs_to_buffers[node.name] + else: + continue + assert fqn in state_dict, f"{fqn} not found in state dict: {state_dict.keys()}" + + original_param = state_dict[fqn] + dtensor_param = distribute_tensor( + original_param, + mesh, + placement_strategy.output_spec.placements, + ) + local_param = dtensor_param.to_local() + state_dict[fqn] = ( + torch.nn.Parameter(local_param) + if isinstance(original_param, torch.nn.Parameter) + else local_param + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8125f000432257e2b5e59e034bd529cd0bc2e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py @@ -0,0 +1,427 @@ +from dataclasses import dataclass +from functools import cached_property +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +from torch._ops import OpOverload +from torch.distributed._tensor.placement_types import DTensorSpec +from torch.distributed.device_mesh import DeviceMesh + +try: + from torch.utils._cxx_pytree import tree_map_only, TreeSpec +except ImportError: + from torch.utils._pytree import ( # type: ignore[no-redef, assignment] + tree_map_only, + TreeSpec, + ) + + +# Common type aliases +ArgsType = Tuple[object, ...] +KwargsType = Dict[str, object] +# ATen op schemas could have Tensor, Tuple[Tensor] and List[Tensor], so output type sould +# be the same set of possibilities. +OutputSpecType = Optional[Union[DTensorSpec, Sequence[Optional[DTensorSpec]]]] + + +def _rebuild_tensor_from_dtensor_meta(arg) -> object: + """ + This is used to propagate tensor metadata, must be under fake mode + """ + assert arg.tensor_meta is not None, "DTensorSpec does not contain tensor_meta." + return torch.empty_strided( + arg.tensor_meta.shape, + arg.tensor_meta.stride, + dtype=arg.tensor_meta.dtype, + ) + + +def _is_inplace_op(op: OpOverload): + # simple analysis of function schema to determine + # if this is an inplace variant, it might not + # be entirely correct, but it's good enough for now. + return op._schema.name[-1] == "_" + + +def _is_out_variant_op(op: OpOverload): + # simple analysis of function schema to determine + # if this is an out variant, it might not + # be entirely correct, but it's good enough for now. + return "out" in op._schema.overload_name + + +def _pretty_print_spec(spec: object) -> str: + if spec is None: + return "None" + elif isinstance(spec, DTensorSpec): + return "".join([str(p) for p in spec.placements]) + elif isinstance(spec, Sequence): + return "(" + ", ".join([_pretty_print_spec(s) for s in spec]) + ")" + else: + raise RuntimeError(f"Unknown spec type to print: spec={spec}") + + +@dataclass +class PlacementStrategy: + """ + A placement strategy describes acceptable sharding placements of the output + and the tensor arguments of an operation. + + note: when the op return value is a single DTensor object, output_specs is + DTensorSpec; when the return value is a tuple of Optional[DTensor], + output_specs is a tuple of Optional[DTensorSpec]. + """ + + output_specs: Union[DTensorSpec, Tuple[Optional[DTensorSpec], ...]] + input_specs: Optional[Sequence[DTensorSpec]] = None + + # redistribute costs for this op placement strategy + # we need a nested list to record the cost for each + # operand of this operator, and for each operand of + # this operator it might have multiple placement strategies + redistribute_cost: Optional[List[List[float]]] = None + + @cached_property + def output_spec(self) -> DTensorSpec: + """ + This function requires that the strategy have exactly one DTensorSpec as the + output spec. If the output_specs is a tuple, we throw an exception. + """ + if isinstance(self.output_specs, DTensorSpec): + return self.output_specs + else: + raise ValueError( + f"function output_spec expects a single DTensorSpec but got: {self.output_specs}" + ) + + def input_spec(self, index: int = 0) -> DTensorSpec: + assert self.input_specs is not None, "input_specs of PlacementStrategy is None!" + assert len(self.input_specs) > index, ( + f"Invalid index {index} for input_specs of length " + f"{len(self.input_specs)}: {self.input_specs}" + ) + return self.input_specs[index] + + def __str__(self) -> str: + input_specs_str = _pretty_print_spec(self.input_specs) + output_spec_str = _pretty_print_spec(self.output_specs) + return f"{input_specs_str} -> {output_spec_str}" + + +class StrategyType: + """ + Base class type for op strategy, We have two StrategyType: + OpStrategy and TupleStrategy + """ + + pass + + +class OpStrategy(StrategyType): + """ + OpStrategy that consists of a list of placement strategies associated with the op + """ + + def __init__(self, strategies: List[PlacementStrategy]) -> None: + super().__init__() + self.strategies: List[PlacementStrategy] = strategies + + def __str__(self) -> str: + strategy_list_str = ", ".join([str(strategy) for strategy in self.strategies]) + mesh_shape = self.output_mesh_shape + return f"OpStrategy:[{strategy_list_str}] @ mesh: {mesh_shape}" + + def max_num_shards(self) -> int: + """ + Returns the max number of shards across all placement strategies + """ + return max([strategy.output_spec.num_shards for strategy in self.strategies]) + + @property + def output_mesh_shape(self): + output_spec = self.strategies[0].output_specs + if isinstance(output_spec, DTensorSpec): + return output_spec.mesh.shape + else: + assert isinstance( + output_spec, tuple + ), "found no DTensorSpec in the OpStrategy!" + assert output_spec[0] is not None + return output_spec[0].mesh.shape + + @property + def output_ndim(self): + return self.strategies[0].output_spec.ndim + + @property + def output_shape(self): + return self.strategies[0].output_spec.shape + + +class TupleStrategy(StrategyType): + """ + TupleStrategy represents the output strategy of this op is a tuple + of strategy, i.e. If the output of this op is a tuple of tensors or list of tensors + with possibly different placement strategies, we should return a TupleStrategy that + contains a tuple of OpStrategy, where each child represents the sharding strategy + of "each element" of the tuple/list of tensors the op returns. + + NOTE: if the output of the op is a List[Tensor] and they share the same placement + strategy, then we should return a single OpStrategy instead of a TupleStrategy + """ + + def __init__(self, childs: Sequence[StrategyType]) -> None: + super().__init__() + self.childs: Sequence[StrategyType] = childs + + def __str__(self) -> str: + child_strategies_str = ", ".join( + [f"{str(strat)}" for idx, strat in enumerate(self.childs)] + ) + return f"TupleStrategy({child_strategies_str})" + + +@dataclass +class RuntimeSchemaInfo: + """ + RuntimeSchemaInfo stores the operator schema related information for runtime (eager) + execution. This is mainly used for two ways: 1. to generate hash for args to determine + whether to re-run sharding prop or not 2. to determine if we need pytree + """ + + # This static_argnum records static arg "starting index" for ops that have non-tensor + # args/kwargs which would affect sharding propagation results. All args starting from + # this index would be hashed to our sharding cache. + # Note that only a few ops need this information, e.g. view, transpose, var.dim, etc. + static_argnum: int = 100 + # This static_kwargkey records static kwarg names which would affect sharding prop + static_kwargkey: Optional[List[str]] = None + # each op can decide if it wants to use pytree flatten/unflatten during operator + # eager execution, by default we don't need to do flatten/unflatten, only if the + # op indicate it needs to, this is to accelate eager performance. + needs_pytree: bool = False + + +@dataclass +class OpSchema: + """ + OpSchema is a data class that describes an operator input schemas, it + includes DTensor DTensorSpecs and non-tensor args/kwargs (positional order + preserved). It is mainly used by the dispatching logic below to run things like + sharding propagation. + + NOTE: this should be used as a read only data class + TODO: make this a frozen dataclass + + Args: + op: the operator overload we are intercepting + args_schema: contains args except that the DTensor args have been replaced + with its DTensorSpec + kwargs_schema: contains kwargs except that the DTensor kwargs have been replaced + with its DTensorSpec + """ + + op: OpOverload + args_schema: ArgsType + kwargs_schema: KwargsType + + schema_info: Optional[RuntimeSchemaInfo] = None + + @property + def args_spec(self) -> Tuple[DTensorSpec, ...]: + """ + args_spec: Tuple[DTensorSpec, ...]: contains a clean list of args spec list + with NO non-DTensor positional arguments (i.e. int/float/tuple, etc) + mainly used by sharding propagation to propagate the output spec + """ + # filter out non-relevant values from args schema to get a clean spec list + # this would mainly be used by sharding propagation rules + return tuple(item for item in self.args_schema if isinstance(item, DTensorSpec)) + + def __repr__(self) -> str: + return ( + f"OpSchema(op={self.op}," + f" args_schema={self.args_schema}," + f" kwargs_schema={self.kwargs_schema})" + ) + + def __str__(self) -> str: + args_sharding: List[str] = [] + mesh_shape = None + for arg in self.args_schema: + if isinstance(arg, DTensorSpec): + args_sharding.append(str(arg)) + mesh_shape = arg.mesh.shape + elif isinstance(arg, OpStrategy): + assert len(arg.strategies) == 1 + args_sharding.append(_pretty_print_spec(arg.strategies[0].output_specs)) + mesh_shape = arg.output_mesh_shape + elif isinstance(arg, TupleStrategy): + first_op_strtgy = arg.childs[0] + assert isinstance(first_op_strtgy, OpStrategy) + mesh_shape = first_op_strtgy.output_mesh_shape + args_sharding.append(str(arg)) + else: + args_sharding.append(str(arg)) + return f"Op(op={self.op}, args_sharding={', '.join(args_sharding)} @ mesh: {mesh_shape})" + + def __post_init__(self) -> None: + has_symints = False + for a in self.args_schema: + if isinstance(a, DTensorSpec) and a.tensor_meta is not None: + if any(isinstance(s, torch.SymInt) for s in a.tensor_meta.shape): + has_symints = True + break + self.has_symints = has_symints + + def arg_type_tensor_or_tensor_list_like(self, arg_idx: int) -> bool: + arg = self.args_schema[arg_idx] + is_tensor = isinstance(arg, DTensorSpec) + if is_tensor: + return True + + if not isinstance(arg, list): + return False + + return all(isinstance(e, DTensorSpec) or e is None for e in arg) + + def return_type_tuple_tensor_like(self) -> bool: + # all dispatch ops could only return Tuple[Tensor] or have None/ints/floats + # in the tuple, but the first element must be a Tensor, so this check is enough + return_types = self.op._schema.returns + return len(return_types) > 1 and isinstance( + return_types[0].type, torch.TensorType + ) + + def return_type_tensor(self) -> bool: + return_types = self.op._schema.returns + # all dispatch ops only return Tensor or Tuple[Tensor] for tensor like + # return types, so this check is enough for tensor like types + return isinstance(return_types[0].type, torch.TensorType) + + def __hash__(self) -> int: + # Only hash args and kwargs that op indicates to hash + if not self.schema_info: + static_argnum = len(self.args_schema) + static_kwargkey = None + else: + static_argnum = self.schema_info.static_argnum + static_kwargkey = self.schema_info.static_kwargkey + + args_to_hash = tuple( + tuple(e) if isinstance(e, list) else e + for i, e in enumerate(self.args_schema) + if self.arg_type_tensor_or_tensor_list_like(i) or i >= static_argnum + ) + if static_kwargkey is not None: + kwargs_to_hash = tuple( + self.kwargs_schema.get(k, None) for k in static_kwargkey + ) + return hash((self.op, args_to_hash, kwargs_to_hash)) + else: + return hash((self.op, args_to_hash)) + + def __eq__(self, other: object) -> bool: + # early return checks + if not isinstance(other, OpSchema): + return False + + if self.op != other.op: + return False + + if len(self.args_schema) != len(other.args_schema): + return False + + # compare each element and early return if any of them is different + if not self.schema_info: + static_argnum = len(self.args_schema) + static_kwargkey = None + else: + static_argnum = self.schema_info.static_argnum + static_kwargkey = self.schema_info.static_kwargkey + + for i, (self_arg, other_arg) in enumerate( + zip(self.args_schema, other.args_schema) + ): + if isinstance(self_arg, DTensorSpec) and self_arg != other_arg: + return False + elif i >= static_argnum and self_arg != other_arg: + return False + + # check kwarg equality when there's a static kwarg key + if static_kwargkey: + for key in static_kwargkey: + if self.kwargs_schema.get(key, None) != other.kwargs_schema.get( + key, None + ): + return False + + return True + + def gen_fake_args(self) -> ArgsType: + """ + gen_fake_args: generate fake args for the operator, this is mainly used + by sharding propagation rules to generate fake args for the operator + to run the local tensor operator and get the output spec. + """ + return tree_map_only( + DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.args_schema + ) + + def gen_fake_kwargs(self) -> KwargsType: + """ + gen_fake_kwargs: generate fake kwargs for the operator, this is mainly used + by sharding propagation rules to generate fake kwargs for the operator + to run the local tensor operator and get the output spec. + """ + return tree_map_only( + DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.kwargs_schema + ) + + def _inplace_rewrap_schema_suggestion(self, origin_schema: "OpSchema") -> None: + suggestion_args_spec = self.args_spec + new_arg_schema: List[object] = [] + idx_of_args_spec = 0 + for arg in origin_schema.args_schema: + if isinstance(arg, DTensorSpec): + new_arg_schema.append(suggestion_args_spec[idx_of_args_spec]) + idx_of_args_spec += 1 + else: + new_arg_schema.append(arg) + self.args_schema = tuple(new_arg_schema) + self.kwargs_schema = origin_schema.kwargs_schema + + +@dataclass +class OutputSharding: + """ + OutputSharding is a data class that is used by the sharding propagation + rules, it could set the output_spec upon successful propagation, and if + it failed, output_spec would become None and sharding propagation rules + could give a list of suggestions for inputs to reshard. + + NOTE: the schema_suggestion generated by sharding propagation should be + exactly the same as the operator OpSchema, except the DTensor DTensorSpecs + """ + + output_spec: OutputSpecType + schema_suggestions: Optional[List[OpSchema]] = None + failed_reason: Optional[str] = None + needs_redistribute: bool = False + + +@dataclass +class OpInfo: + """ + All Runtime Op execution info are packed here + """ + + mesh: DeviceMesh + schema: OpSchema + flat_args_schema: List[object] + local_args: Sequence[object] + local_kwargs: Dict[str, object] + args_tree_spec: Optional[TreeSpec] = None + + # the output sharding info + output_sharding: Optional[OutputSharding] = None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py new file mode 100644 index 0000000000000000000000000000000000000000..8d88d064e8fb69454c5f0d572bad09a93960a6aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py @@ -0,0 +1,620 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from dataclasses import dataclass +from typing import Any, cast, List, NamedTuple, Optional, Tuple + +import torch +import torch.distributed._functional_collectives as funcol +import torch.distributed.distributed_c10d as c10d + +from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter +from torch.distributed.device_mesh import DeviceMesh + + +class Placement: + # base class Placement type + + # convenient utils to check for placement types + def is_shard(self, dim: Optional[int] = None) -> bool: + is_shard_instance = isinstance(self, Shard) + if dim is not None and is_shard_instance: + return cast(Shard, self).dim == dim + else: + return is_shard_instance + + def is_replicate(self) -> bool: + return isinstance(self, Replicate) + + def is_partial(self) -> bool: + return isinstance(self, _Partial) + + +@dataclass(frozen=True) +class Shard(Placement): + # shard placement, shard on a dim + dim: int + + def _split_tensor( + self, + tensor: torch.Tensor, + num_chunks: int, + *, + with_padding: bool = True, + contiguous: bool = True, + ) -> Tuple[List[torch.Tensor], List[int]]: + """ + This function uses torch.chunk to split a tensor into num_chunks shards along + the Shard placement dimension, and return a list of shards with their pad sizes. + + Keyword args: + with_padding (bool, optional): when True, we pad the tensor on the last + few ranks before calling the collectives (i.e. scatter/all_gather, etc.). + This is because collectives usually require equal size tensor inputs + """ + assert ( + self.dim <= tensor.ndim + ), f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}" + + # chunk tensor over dimension `dim` into n slices with padding if necessary + tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim)) + # compute the chunk size inline with ``torch.chunk`` + full_chunk_size = (tensor.size(self.dim) + num_chunks - 1) // num_chunks + + # Compute chunk size for each chunk for ``self.dim`` + chunk_sizes = [ + tensor_list[idx].size(self.dim) if idx < len(tensor_list) else 0 + for idx in range(num_chunks) + ] + # Compute pad size on each chunk + pad_sizes = [full_chunk_size - chunk_size for chunk_size in chunk_sizes] + + # Reuse tensor to fill empty chunk with empty tensor + num_empty_tensors = num_chunks - len(tensor_list) + tensor_size = list(tensor_list[0].size()) + tensor_size = [ + size if idx != self.dim else 0 for idx, size in enumerate(tensor_size) + ] + tensor = tensor.new_zeros(tensor_size) + for _ in range(num_empty_tensors): + tensor_list.append(tensor) + + if with_padding or contiguous: + shard_list = [] + for shard, pad_size in zip(tensor_list, pad_sizes): + # Fill the empty tensor with zeroes with padding. + if with_padding and pad_size > 0: + shard = self._pad_tensor(shard, pad_size) + shard = shard.contiguous() if contiguous else shard + shard_list.append(shard) + return shard_list, pad_sizes + else: + return tensor_list, pad_sizes + + def _pad_tensor( + self, + tensor: torch.Tensor, + pad_size: int, + ) -> torch.Tensor: + if pad_size == 0: + return tensor + pad = [0, 0] * (tensor.ndim - self.dim) + pad[-1] = pad_size + return torch.nn.functional.pad(tensor, pad) + + def _unpad_tensor( + self, + tensor: torch.Tensor, + pad_size: int, + ) -> torch.Tensor: + if pad_size == 0: + return tensor + return tensor.narrow( + self.dim, + start=0, + length=tensor.size(self.dim) - pad_size, + ) + + @staticmethod + def _local_shard_size_on_dim( + size_on_dim: int, + num_chunks: int, + rank: int, + return_offset: bool = False, + ) -> Tuple[int, int]: + """ + returns the local shard size and offset on a given tensor dim + """ + # Compute the chunk size inline with ``torch.chunk`` + if size_on_dim % num_chunks == 0: + full_chunk_size = size_on_dim // num_chunks + return full_chunk_size, full_chunk_size * rank if return_offset else -1 + + # uneven sharding case + full_chunk_size = (size_on_dim + num_chunks - 1) // num_chunks + shard_starting_idx = full_chunk_size * rank + + if size_on_dim < shard_starting_idx: + return 0, size_on_dim if return_offset else -1 + else: + local_shard_size = ( + min(size_on_dim, shard_starting_idx + full_chunk_size) + - shard_starting_idx + ) + return local_shard_size, shard_starting_idx if return_offset else -1 + + def _shard_tensor( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + """ + shard and scatter a tensor on a mesh dimension (use coordinate + 0 on the mesh dimension as source of truth) + """ + my_coordinate = mesh.get_coordinate() + num_chunks = mesh.size(mesh_dim=mesh_dim) + + if my_coordinate is None: + # if rank is not part of mesh, we simply return an empty tensor + return tensor.new_empty(0, requires_grad=tensor.requires_grad) + + scatter_list, pad_sizes = self._split_tensor( + tensor, num_chunks, with_padding=True, contiguous=True + ) + + output = torch.empty_like(scatter_list[my_coordinate[mesh_dim]]) + mesh_scatter(output, scatter_list, mesh, mesh_dim=mesh_dim) + + # Only unpad if the local_tensor was padded on the dimension. + pad_size = pad_sizes[my_coordinate[mesh_dim]] + if pad_size > 0: + output = self._unpad_tensor(output, pad_size) + return output + + def _reduce_shard_tensor( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + reduce_op: c10d.ReduceOp.RedOpType, + mesh_dim: int, + ) -> torch.Tensor: + """ + reduce and scatter a tensor on a mesh dimension + """ + my_coordinate = mesh.get_coordinate() + num_chunks = mesh.size(mesh_dim=mesh_dim) + + if my_coordinate is None: + # if rank is not part of mesh, we simply return local_tensor, + # which should be an empty tensor + return tensor + + is_padded = tensor.size(self.dim) % num_chunks != 0 + if is_padded: + scattered_list, pad_sizes = self._split_tensor( + tensor, num_chunks, with_padding=True, contiguous=True + ) + tensor = torch.cat(scattered_list, dim=self.dim) + elif not tensor.is_contiguous(): + tensor = tensor.contiguous() + + output = funcol.reduce_scatter_tensor( + tensor, reduce_op.name, scatter_dim=self.dim, group=(mesh, mesh_dim) + ) + + if is_padded: + output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined] + return output + + def _to_replicate_tensor( + self, + local_tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + current_logical_shape: List[int], + ) -> torch.Tensor: + """ + This function all_gather all shards and return a tensor that + is replicated on the previously sharded mesh dimension + """ + num_chunks = mesh.size(mesh_dim=mesh_dim) + # check if it's uneven, so we need to pad input tensor before all_gather + local_shape = list(local_tensor.size()) + + logical_dim_size = current_logical_shape[self.dim] + is_padded = logical_dim_size % num_chunks != 0 + + if is_padded: + full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks + pad_size = full_chunk_size - local_shape[self.dim] + local_tensor = self._pad_tensor(local_tensor, pad_size) + + if not local_tensor.is_contiguous(): + local_tensor = local_tensor.contiguous() + + result = funcol.all_gather_tensor( + local_tensor, + gather_dim=self.dim, + group=(mesh, mesh_dim), + ) + if is_padded: + unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined] + result = self._unpad_tensor(result, unpad_size) + return result + + def _replicate_to_shard( + self, + local_tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_index: int, + ) -> torch.Tensor: + """ + transform from replicated tensor to a sharded tensor on + the current rank, which would perform a local chunk + """ + num_chunks = mesh.size(mesh_dim=mesh_dim) + shards, _ = self._split_tensor( + local_tensor, + num_chunks, + with_padding=False, + contiguous=False, + ) + return shards[shard_index].clone() + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Shard): + return False + return self.dim == other.dim + + def __hash__(self) -> int: + return hash(self.dim) + + def __repr__(self) -> str: + """ + machine readable representation of the Shard placement + """ + return f"Shard(dim={self.dim})" + + def __str__(self) -> str: + """human readable representation of the Shard placement""" + return f"S({self.dim})" + + +@dataclass(frozen=True) +class Replicate(Placement): + # replicate placement + def __eq__(self, other: object) -> bool: + if not isinstance(other, Replicate): + return False + return True + + def __hash__(self) -> int: + # every replicate placement is the same + return -1 + + def __repr__(self) -> str: + """ + machine readable representation of the Replicate placement + """ + return "Replicate()" + + def __str__(self) -> str: + """ + human readable representation of the Replicate placement + """ + return "R" + + def _replicate_tensor( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + """ + Replicate (broadcast) a torch.Tensor on a mesh dimension (use + the first coordinate on the mesh dimension as source of truth) + """ + my_coordinate = mesh.get_coordinate() + if my_coordinate is None: + # if rank is not part of mesh, we simply return an empty tensor + return tensor.new_empty(0, requires_grad=tensor.requires_grad) + + tensor = tensor.contiguous() + mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim) + return tensor + + +@dataclass(frozen=True) +class _Partial(Placement): + # This is a default _Partial placement with element-wise reduce op + # _Partial define three contracts: + # 1. _reduce_value: reduce the value of the tensor on the mesh dimension + # 2. _reduce_shard_value: reduce_scatter the value of the tensor on the mesh dimension + # 3. _partition_value: partition the value of a replicated tensor on the mesh dimension + # We can implement custom reductions as needed by subclassing this + # class and override those contracts. + reduce_op: c10d.ReduceOp.RedOpType = c10d.ReduceOp.SUM + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + return funcol.all_reduce( + tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim) + ) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + # by default call reduce_shard_tensor of the shard_spec. + shard_spec = cast(Shard, shard_spec) + return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim) + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # _partition_value is the conjugate operation of _reduce_value + # - i.e. _partition_value on a sum reduce op is just a divison operation + # - the _reduce_value on a sum reduce op would just be a sum(allreduce) operation + # TODO: if the reduce_op is min/max, etc. the _partition_value should be a + # different operation + assert ( + self.reduce_op == c10d.ReduceOp.SUM + ), "only support replicate to PartialSUM for now!" + num_chunks = mesh.size(mesh_dim=mesh_dim) + return tensor / num_chunks + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _Partial): + return False + return self.reduce_op == other.reduce_op + + def __hash__(self) -> int: + return 1 + hash(self.reduce_op) + + def __repr__(self) -> str: + """ + machine readable representation of the Partial placement + """ + return f"_Partial(reduce_op={self.reduce_op})" + + def __str__(self) -> str: + """ + human readable representation of the Partial placement + """ + return "P" + + +class TensorMeta(NamedTuple): + # simple named tuple to represent tensor metadata + # intentionally to stay simple only for sharding + # propagation purposes. + shape: torch.Size + stride: Tuple[int, ...] + dtype: torch.dtype + + +# used internally to propagate the placements +@dataclass +class DTensorSpec: + mesh: DeviceMesh + placements: Tuple[Placement, ...] + + # tensor meta will only be set during sharding propagation + tensor_meta: Optional[TensorMeta] = None + + def __post_init__(self): + if not isinstance(self.placements, tuple): + self.placements = tuple(self.placements) + self._hash: Optional[int] = None + + def __setattr__(self, attr: str, value: Any): + super().__setattr__(attr, value) + # Make sure to recompute the hash in case any of the hashed attributes + # change (though we do not expect `mesh` or `placements` to change) + if hasattr(self, "_hash") and attr in ("mesh", "placements", "tensor_meta"): + self._hash = None + + def _hash_impl(self) -> int: + # hashing and equality check for DTensorSpec are used to cache the sharding + # propagation results. We only need to consider the mesh, placements, shape + # dtype and stride. + # Caveat: we need to keep this in mind and sync hash and eq if we add more + # fields to them. + if self.tensor_meta is not None: + return hash( + ( + self.mesh, + self.placements, + self.tensor_meta.shape, + self.tensor_meta.stride, + self.tensor_meta.dtype, + ) + ) + return hash((self.mesh, self.placements)) + + def __hash__(self) -> int: + # We lazily cache the spec to avoid recomputing the hash upon each + # use, where we make sure to update the hash when the `tensor_meta` + # changes by overriding `__setattr__`. This must be lazy so that Dynamo + # does not try to hash non-singleton `SymInt`s for the stride. + if self._hash is None: + self._hash = self._hash_impl() + return self._hash + + def __eq__(self, __o: object) -> bool: + if not ( + isinstance(__o, DTensorSpec) + and self.mesh == __o.mesh + and self.placements == __o.placements + ): + return False + if self.tensor_meta is None or __o.tensor_meta is None: + return self.tensor_meta == __o.tensor_meta + + return ( + self.tensor_meta.shape == __o.tensor_meta.shape # type: ignore[union-attr] + and self.tensor_meta.stride == __o.tensor_meta.stride # type: ignore[union-attr] + and self.tensor_meta.dtype == __o.tensor_meta.dtype # type: ignore[union-attr] + ) + + def __str__(self) -> str: + """ + human readable representation of the DTensorSpec + """ + if len(self.placements) == 1: + placement_str = str(self.placements[0]) + else: + placement_str = str(self.placements) + + if self.tensor_meta is not None: + tensor_shape = str(tuple(self.tensor_meta.shape)) + else: + tensor_shape = "unknown shape" + + return f"Spec({placement_str} on {tensor_shape})" + + @property + def shape(self) -> torch.Size: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return self.tensor_meta.shape + + @property + def stride(self) -> Tuple[int, ...]: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return self.tensor_meta.stride + + @property + def ndim(self) -> int: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return len(self.tensor_meta.shape) + + @property + def num_shards(self) -> int: + num_shards = 1 + for i, placement in enumerate(self.placements): + if placement.is_shard(): + num_shards *= self.mesh.size(i) + return num_shards + + @property + def device_mesh(self) -> DeviceMesh: + # simple aliasing for the mesh field, make some + # checks that mixes DTensor/DTensorSpec easier + return self.mesh + + @property + def dim_map(self) -> List[int]: + """ + dim_map is a property we derive from `placements` of + the distributed tensor. It simply return a list of ints + where dim_map[i] denotes the sharding mapping to the mesh + dimension, and len(dim_map) == dist_tensor.ndim + dim_map[i] = -1: means tensor dim i replicate on mesh + dim_map[i] = j: means tensor dim i shard on mesh dim j + + For example, we have a dist tensor that have the shape of + [18, 20, 30], and device_mesh([0, 1, 2, 3]), placements: + [Shard(1)], the dim_map of this placement would be: + [-1, 0, -1]. This representation is pretty helpful during + sharding propagation where we could know exactly each + tensor dimension is sharded or not. + + Note that if placements contains `_Partial`, we have to + explicitly deal with it, so that when we create a DTensorSpec + with dim_map, we could properly record the pending sums. + """ + # dims mapping of dist tensor sharding + # return size of tensor ndim, -1 represent replicate + # and int >=0 represent shard on that device mesh dim + r = [-1] * self.ndim + for i, placement in enumerate(self.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + if r[shard_dim] > -1: + raise ValueError( + f"Tensor dim {shard_dim} is already sharded on mesh dim {r[shard_dim]}," + " DTensor operator implementation does not support things like hybrid" + " sharding strategies yet (i.e. [Shard(0), Shard(0)])" + ) + r[shard_dim] = i + return r + + @property + def sums(self) -> List[int]: + """ + sums is a property we derive from `placements` of the + distributed tensor. It simply return a list of ints where + sums[i] denotes the pending sum (partial) on mesh dim i + """ + return [ + idx + for idx, placement in enumerate(self.placements) + if placement.is_partial() + ] + + @classmethod + def from_dim_map( + cls, + mesh: DeviceMesh, + dim_map: List[int], + sums: List[int], + tensor_meta: Optional[TensorMeta] = None, + ) -> "DTensorSpec": + """ + Construct a DTensorSpec from dim_map list and pending sum. + + Args: + mesh (class:`DeviceMesh`): device mesh to be used in the DTensorSpec + dim_map (List[int]): a list of integer that represents sharding on each + tensor dimension, see `dim_map` property doc for details + sums (List[int]): a list of integer that represents the dist tensor have + pending sum on which device mesh dimension. + tensor meta (TensorMeta): DTensor metadata + + Return: + a class:`DTensorSpec` object + """ + # by default replicate on device mesh dims + placements: List[Placement] = [Replicate() for _ in range(mesh.ndim)] + + # find all mesh dims that need pending reductions + for s in sums: + placements[s] = _Partial() + + for i, m in enumerate(dim_map): + if m >= 0: + placement = placements[m] + if placement.is_shard(): + placement = cast(Shard, placement) + raise RuntimeError( + f"DeviceMesh dimension cann't be mapped to two dimension of the same tensor: {i} and {placement.dim}" + ) + elif placement.is_partial(): + raise RuntimeError( + f"DeviceMesh dimension {m} cannot be both shard and partial!" + ) + placements[m] = Shard(i) + + return cls(mesh, tuple(placements), tensor_meta=tensor_meta) + + def is_replicated(self): + """ + return True if the current DTensorSpec replicates on all mesh dims (devices) + """ + return all(placement.is_replicate() for placement in self.placements) + + def shallow_copy_with_tensor_meta( + self, tensor_meta: Optional[TensorMeta] + ) -> "DTensorSpec": + """ + Shallow copy the DTensorSpec with a new tensor_meta. + """ + assert tensor_meta is not None, "shallow copy with no tensor_meta!" + return DTensorSpec( + self.mesh, + self.placements, + tensor_meta=tensor_meta, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/random.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/random.py new file mode 100644 index 0000000000000000000000000000000000000000..3fdc81a791d3b88a97ced9a433d5380a4fc00fe0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/random.py @@ -0,0 +1,372 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import contextlib +import warnings +from typing import Dict, List, Optional + +import torch +import torch.distributed as dist + +from torch import Tensor +from torch.distributed._tensor.placement_types import DTensorSpec, Shard +from torch.distributed.device_mesh import _get_device_handle, DeviceMesh + + +_rng_tracker: Optional["RNGStateTracker"] = None + + +def is_rng_supported_mesh(device_mesh: DeviceMesh) -> bool: + """Checks if the current device of `device_mesh` supports DTensor's random APIs. + Currently DTensor Random APIs only supports cuda/cuda-like devices. We suggest + users call this API to test the availability before using our random APIs. + + Args: + device_mesh (:class:`DeviceMesh`): The device mesh on which we check if the + random ops APIs are supported. + + Returns: + A bool value. True if `device_mesh` supports DTensor Random APIs; False otherwise. + + .. warning:: + Currently we only support correct RNG on cuda/cuda-like devices. + """ + device_handle = _get_device_handle(device_mesh.device_type) + if device_handle and hasattr(device_handle, "set_rng_state"): + return True + else: + warnings.warn( + f"DTensor random operators may not have complete support on {device_mesh.device_type} device mesh" + ) + return False + + +def manual_seed(seed: int, device_mesh: DeviceMesh) -> None: + """Sets the seed for generating random numbers for the calling rank. + + Args: + seed (int): The desired seed. + device_mesh (:class:`DeviceMesh`): The device mesh to set the seed. + + Returns: + None + + .. warning:: + When calling this function, :func:`manual_seed` must be called from all ranks of the + default `ProcessGroup` even if some ranks may not be a part of the `device_mesh`, + with the same `seed` value. + If ``device_mesh`` is a sub-mesh and the calling rank is not a part of it, + `manual_seed` will not set its GPU device's generator seed. + Current implementation only supports a GPU device mesh. + """ + device_handle = _get_device_handle(device_mesh.device_type) + if not device_handle: + raise NotImplementedError( + f"DTensor randomness only supports cuda/cuda-like device type, but got {device_mesh.device_type}" + ) + + # allgather the seed over the default PG + object_list = [seed] * dist.get_world_size() + dist.all_gather_object(object_list, seed) + for rank, object in enumerate(object_list): + if seed != int(object): + raise RuntimeError( + f"calling manual_seed function over {device_mesh} but received different seed values on ranks:", + f"seed on rank {dist.get_rank()} is {seed}, and seed on rank {rank} is {object}!", + ) + # instantiate a RNG tracker if haven't. By default DTensor uses an + # OffsetBasedRNGTracker to perform random operators. + global _rng_tracker + if not _rng_tracker: + _rng_tracker = OffsetBasedRNGTracker(device_mesh.device_type) + + # the current rank is in mesh + if device_mesh.get_coordinate() is not None: + if isinstance(_rng_tracker, TensorParallelRNGTracker): + _rng_tracker._manual_seed(device_mesh, seed) + elif isinstance(_rng_tracker, OffsetBasedRNGTracker): + _rng_tracker._manual_seed(seed) + else: + raise RuntimeError( + f"Unknown type of cuda RNG state tracker: _rng_tracker = {_rng_tracker}" + ) + + +class RNGStateTracker: + """ + RNGStateTracker stores Random Number Generator (RNG) state (a ByteTensor object) + in a dict, mapping from a corresponding tag to each state tensor. It also provides + a set of convenient utility methods to help access/modify the state tensors. The most + important interface is _distribute_region which will be used when DTensor executes + a random op (an operator that calls RNG). + """ + + def __init__(self, device_type: str = "cuda"): + self._device_type = device_type + self._device_handle = _get_device_handle(device_type) + if not (self._device_handle and self._device_handle.is_available()): + raise RuntimeError( + f"{self.__class__.__name__} instantiation requires the presence of CUDA/CUDA-like device" + ) + + self._states: Dict[str, Tensor] = {} + self._devices = [self._device_handle.current_device()] + self._use_distribute_region = True + + @property + def rng_states(self) -> Dict[str, Tensor]: + return self._states + + @property + def distribute_region_enabled(self) -> bool: + return self._use_distribute_region + + @distribute_region_enabled.setter + def distribute_region_enabled(self, value) -> None: + self._use_distribute_region = value + + def rng_state_is_sync(self, name) -> bool: + return name in self.rng_states + + def get_seed(self, name: str) -> int: + if name not in self.rng_states: + raise RuntimeError( + f"{self.__class__.__name__} does not have random state for {name}" + ) + + seed_tensor = (self.rng_states[name])[0:8].view(dtype=torch.int64) + return int(seed_tensor.item()) + + def set_seed(self, name: str, seed: int) -> None: + seed_tensor = torch.tensor([seed]).view(torch.uint8) + offset_tensor = torch.tensor([0]).view(torch.uint8) + self.rng_states[name] = torch.cat([seed_tensor, offset_tensor]) + + def _distribute_region(self, spec: DTensorSpec): + pass + + +class OffsetBasedRNGTracker(RNGStateTracker): + """ + This subclass of `RNGStateTracker` defines the default policy of how RNG states + should be shared and synchronized among all ranks to respect the semantics of DTensor + random operators. + """ + + def __init__(self, device_type: str = "cuda"): + super().__init__(device_type) + # synchronize RNG state using rank 0's current one + rng_state = self._device_handle.get_rng_state().to(device_type) + dist.broadcast(rng_state, 0) + self.rng_states["parallel-rng"] = rng_state.to("cpu") + + def _manual_seed(self, parallel_seed: int) -> None: + self.set_seed("parallel-rng", parallel_seed) + + @contextlib.contextmanager + def _distribute_region(self, spec: DTensorSpec): + # check if the parallel rng state has been synchronized or not + if not self.rng_state_is_sync("parallel-rng"): + raise RuntimeError( + "OffsetBasedRNGTracker requires the random state to be synchronized " + "before entering into a distribute region!" + ) + + if self.distribute_region_enabled: + old_offset = self.get_offset("parallel-rng") + self._set_pre_op_offset(spec) + with torch.random.fork_rng(self._devices, device_type=self._device_type): + self._device_handle.set_rng_state(self.rng_states["parallel-rng"]) + try: + yield # execute the region code + finally: + # update offset to synchronize among ranks + self._set_post_op_offset(spec, old_offset) + else: + yield + + def get_offset(self, name: str) -> int: + if name not in self.rng_states: + raise RuntimeError( + f"{self.__class__.__name__} does not have random state for {name}" + ) + + offset_tensor = (self.rng_states[name])[8:].view(dtype=torch.int64) + return int(offset_tensor.item()) + + def set_offset(self, name: str, offset: int) -> None: + if name not in self.rng_states: + raise RuntimeError( + f"{self.__class__.__name__} does not have random state for {name}" + ) + + seed_tensor = (self.rng_states[name])[0:8] + offset_tensor = torch.tensor([offset]).view(torch.uint8) + self.rng_states[name] = torch.cat([seed_tensor, offset_tensor]) + + def _set_pre_op_offset(self, spec: DTensorSpec) -> None: + """Set the starting RNG offset for current device's local shard before actual + op execution. The pre_op_offset value should start from the current RNG offset + and increment by the size of local shard until it reaches the size of the whole + DTensor. For different ranks that hold the same DTensor shard, their pre_op_offset + will be the same. + + Args: + spec (:class:`DTensorSpec`): the spec of the DTensor object on which + we prepare the offset for running random ops. + + Returns: + None + + .. warning:: + Note that, current implementation does not consider DTensor's continguity. + + Example: + take a DTensor of shape [8, 16] as an example. Assume that the DTensor + is placed on a device mesh with placements ([Shard(1), Replicate(), Shard(0)]), + and the mesh is: + [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + ``spec.mesh.get_coordinate()`` provides the coordinate of the current rank + in the mesh. For example, the coordinate of rank 5 is (1, 0, 1). + + Another concept to introduce besides rank coordinate is shard coordinate. + Each rank holds a local shard of the DTensor. In the example, the DTensor + is partitioned into 4 [4, 8] shards. The first shard has 2 replicas and + rank 0 (coord (0, 0, 0)) and rank 2 (coord (0, 1, 0)) have 1 replica each. + That being said, the local shard on rank 0 and rank 2 correspond to the same + shard of the DTensor. To denote each DTensor shard, we use a shard coordinate + (in the example, it will be a tuple (i, j) where shard (i, j) has the slice + DTensor[4 * i : 4 * (i + 1), 8 * j : 8 * (j + 1)], 0 <= i < 2, 0 <= j < 2). + + Once we have rank coordinate and shard coordinate, we can calculate on each rank + what shard of the DTensor the rank holds, with the help of dim_map. The dim_map + of the above DTensor is [2, 0] so the shard coordinate of a rank with rank coord + (x, y, z) is simply (z, x) by taking(rank_coord[dim_map[0]],rank_coord[dim_map[1]]). + Following this calculation, + rank 0 and rank 2 holds the shard of coord (0, 0); + rank 1 and rank 3 holds the shard of coord (0, 1); + rank 4 and rank 6 holds the shard of coord (1, 0); + rank 5 and rank 7 holds the shard of coord (1, 1); + + The last value to calculate before obtaining the starting offset is the shard linear index. + The starting offset for each rank will be its shard_linear_index * local_tensor_numel. + """ + dtensor_shape = spec.shape + mesh = spec.mesh + dim_map = spec.dim_map + + # Compute shard coordinate: + # The coordinate on each tensor dim is a tuple (idx, range) + # If a DTensor is partitioned on its dim i into n shards, and the current rank + # holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i + coordinate = mesh.get_coordinate() + assert coordinate is not None + shard_coord = [ + coordinate[mesh_dim] if mesh_dim >= 0 else 0 for mesh_dim in dim_map + ] + shard_size = [ + mesh.size(mesh_dim) if mesh_dim >= 0 else 1 for mesh_dim in dim_map + ] + + # compute shard linear index + shard_linear_idx = self._calc_shard_linear_idx(shard_coord, shard_size) + + # compute starting offset using the first shard's size + local_size_on_rank_0 = list(dtensor_shape) + for idx, placement in enumerate(spec.placements): + if isinstance(placement, Shard): + mesh_dim_size = mesh.size(idx) + shard_dim = placement.dim + local_size_on_rank_0[shard_dim] = placement._local_shard_size_on_dim( + dtensor_shape[shard_dim], + mesh_dim_size, + 0, + return_offset=False, + )[0] + + from torch.distributed._tensor.ops.utils import prod + + local_size = prod(local_size_on_rank_0) + + # get current RNG offset + current_offset = self.get_offset("parallel-rng") + + # pytorch: offset must be multiple of 4 + # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp + offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4 + self.set_offset("parallel-rng", current_offset + offset_incr) + + def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None: + """Sets the RNG to a synchronized state after running the local random op. Every + rank should set its RNG offset to `old_offset + DTensor.numel()` where old_offset is + the offset before calling `set_pre_op_offset` i.e. the offset before running DTensor + random ops. + + Args: + spec (:class:`DTensorSpec`): the spec of the DTensor object on which + we post-process the offset for running random ops. + + Returns: + None + """ + dtensor_shape = spec.shape + + from torch.distributed._tensor.ops.utils import prod + + numel = prod(dtensor_shape) + # pytorch: offset must be multiple of 4 + # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp + numel = (numel + 3) // 4 * 4 + self.set_offset("parallel-rng", old_offset + numel) + + def _calc_shard_linear_idx( + self, shard_coord: List[int], shard_size: List[int] + ) -> int: + # compute shard linear index + shard_linear_idx = 0 + shard_coord_stride = 1 + for idx, size in zip(reversed(shard_coord), reversed(shard_size)): + shard_linear_idx += idx * shard_coord_stride + shard_coord_stride *= size + + return shard_linear_idx + + +class TensorParallelRNGTracker(RNGStateTracker): + def __init__(self, device_type: str = "cuda"): + super().__init__(device_type) + # copy the default RNG state + self.rng_states["tensor-parallel-rng"] = self._device_handle.get_rng_state() + + def _manual_seed( + self, + tp_mesh: DeviceMesh, + base_seed: int = 1234, + ): + tensor_parallel_rank = tp_mesh.get_local_rank() + # this magic number 2718 comes from Megatron's code + # (https://github.com/NVIDIA/Megatron-LM/blob/060415572f4365a2e895f8036c4e37dad0efbdf5/megatron/core/tensor_parallel/random.py#L162-L163) + MegatronMagicNum = 2718 + tensor_parallel_seed = base_seed + MegatronMagicNum + tensor_parallel_rank + self.set_seed("tensor-parallel-rng", tensor_parallel_seed) + + @contextlib.contextmanager + def _distribute_region(self, spec: DTensorSpec): + # check if the tensor parallel rng state has been synchronized or not + if not self.rng_state_is_sync("tensor-parallel-rng"): + raise RuntimeError( + "TensorParallelRNGTracker requires the random state to be synchronized " + "before entering into a distribute region!" + ) + + if self.distribute_region_enabled: + with torch.random.fork_rng(self._devices, device_type=self._device_type): + self._device_handle.set_rng_state( + self.rng_states["tensor-parallel-rng"] + ) + try: + yield + finally: + self.rng_states[ + "tensor-parallel-rng" + ] = self._device_handle.get_rng_state() + else: + yield diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py new file mode 100644 index 0000000000000000000000000000000000000000..58f3f42c94b70f129899ba33dafbf8b4e349a900 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py @@ -0,0 +1,337 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from functools import lru_cache +from typing import cast, Dict, List, NamedTuple, Tuple + +import torch +import torch.distributed._functional_collectives as funcol +import torch.distributed._tensor.api as dtensor +from torch.distributed._tensor.device_mesh import DeviceMesh +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + + +class _TransformInfo(NamedTuple): + mesh_dim: int + src_dst_placements: Tuple[Placement, Placement] + # logical_shape on this mesh dimension + logical_shape: List[int] + + +def _replicate_then_shard(val: _TransformInfo) -> int: + """ + This is a helper function to allow reordering _TransformInfo list. The high level + idea is that we want to reorder the sharding redistributions so that the DTensor + redistribution is consistent with its full tensor. This is built on top of two simple + assumptions: + 1. Replication happens from inner to outer dimension. i.e. Shard -> Replicate + 2. Sharding happens from outer to inner dimension, i.e. Replicate -> Shard + + So we always put the replication first and put sharding later. + """ + mesh_dim = val.mesh_dim + src, dst = val.src_dst_placements + if (dst.is_replicate() or dst.is_partial()) and src.is_shard(): + return -mesh_dim + elif (src.is_replicate() or src.is_partial()) and dst.is_shard(): + return mesh_dim + else: + return 0 + + +@lru_cache(maxsize=None) +def _gen_transform_infos( + src_spec: DTensorSpec, + dst_spec: DTensorSpec, +) -> List[_TransformInfo]: + """ + Generate the transform infos from the source placements to the target placements, to + transform from source to target placement it might have multipl steps, i.e. it might + decompose Si -> Sj into Si -> R -> Sj. + This would detects if there're mis-aligned shardings between src/dst placements. + i.e. (Shard(0), Shard(0)) -> (Replicate(), Shard(0)), in this case Shard(0) -> Shard(0) + for mesh dimension 1 actually needs reshard, because in the first case it's a sub-sharding + of an already tensor dimension 0, and in the second case, it's the first sharding on tensor + dimension 0. + + Note that we also currently handles sharding on different tensor dimensions, e.g. + Shard(0) -> Shard(1) in this pass + """ + src_dim_counts: Dict[int, int] = {} + dst_dim_counts: Dict[int, int] = {} + transform_infos: List[_TransformInfo] = [] + + src_placements = src_spec.placements + dst_placements = dst_spec.placements + device_mesh = src_spec.device_mesh + my_coordinate = device_mesh.get_coordinate() + assert my_coordinate is not None + + # logical shape records the logic tensor shape on the mesh dimension + # this is useful to ensure uneven sharding gets correct output shape + initial_logical_shape = list(src_spec.shape) + mesh_dims_to_logical_shape = [initial_logical_shape] + mesh_ndim = len(src_placements) + + for i, (src, dst) in enumerate(zip(src_placements, dst_placements)): + # detect mis-aligned sharding and build logical shapes + current_logical_shape = mesh_dims_to_logical_shape[i] + if isinstance(src, Shard): + src_dim_counts[src.dim] = src_dim_counts.get(src.dim, 0) + 1 + + if i < mesh_ndim - 1: + # calculate and save the logical shape for this sharding + mesh_dim_size = device_mesh.size(mesh_dim=i) + local_shard_size, _ = src._local_shard_size_on_dim( + current_logical_shape[src.dim], + mesh_dim_size, + my_coordinate[i], + ) + new_logical_shape = list(current_logical_shape) + new_logical_shape[src.dim] = local_shard_size + mesh_dims_to_logical_shape.append(new_logical_shape) + else: + mesh_dims_to_logical_shape.append(current_logical_shape) + + if isinstance(dst, Shard): + dst_dim_counts[dst.dim] = dst_dim_counts.get(dst.dim, 0) + 1 + + if ( + isinstance(src, Shard) + and isinstance(dst, Shard) + and ( + src.dim != dst.dim or src_dim_counts[src.dim] != dst_dim_counts[dst.dim] + ) + ): + # decompose Shard(i) -> Shard(j) into Shard(i) -> Replicate() -> Shard(j) + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(src, Replicate()), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(Replicate(), dst), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + else: + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(src, dst), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + + # sort the pairs by first perform replication then sharding + transform_infos.sort(key=_replicate_then_shard) + return transform_infos + + +def redistribute_local_tensor( + local_tensor: torch.Tensor, + current_spec: DTensorSpec, + target_spec: DTensorSpec, + *, + async_op: bool = False, + is_backward: bool = False, +) -> torch.Tensor: + """ + This redistribute the local tensor (torch.Tensor) from the current DTensorSpec to + the target DTensorSpec, which involves the necessary collective calls to transform + the local shard of the DTensor from its current spec to the target spec. + """ + + if current_spec.mesh != target_spec.mesh: + # TODO: alltoall/permute reshuffling to change device_mesh if they are not the same + raise NotImplementedError("Cross device mesh comm not supported yet!") + + new_local_tensor = None + device_mesh = current_spec.mesh + + my_coordinate = device_mesh.get_coordinate() + + if my_coordinate is None: + # if rank is not part of mesh, we skip redistribute and simply return local_tensor, + # which should be an empty tensor + return local_tensor + + transform_infos = _gen_transform_infos(current_spec, target_spec) + + for transform_info in transform_infos: + i = transform_info.mesh_dim + current, target = transform_info.src_dst_placements + num_chunks = device_mesh.size(mesh_dim=i) + + if current == target: + # short cut, just use the original local tensor + new_local_tensor = local_tensor + continue + + if target.is_replicate(): + # Case 1: target is Replicate + if current.is_partial(): + partial_spec = cast(_Partial, current) + new_local_tensor = partial_spec._reduce_value( + local_tensor, device_mesh, i + ) + elif current.is_shard(): + current_placement = cast(Shard, current) + new_local_tensor = current_placement._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + else: + raise RuntimeError( + f"redistribute from {current} to {target} not supported yet" + ) + elif target.is_shard(): + # Case 2: target is Shard + target_placement = cast(Shard, target) + target_dim = target_placement.dim + if current.is_partial(): + partial_spec = cast(_Partial, current) + new_local_tensor = partial_spec._reduce_shard_value( + local_tensor, device_mesh, i, target_placement + ) + elif current.is_replicate(): + # split the tensor and return the corresponding cloned local shard + new_local_tensor = target_placement._replicate_to_shard( + local_tensor, device_mesh, i, my_coordinate[i] + ) + else: + # NOTE: we don't support this case efficiently yet, the fallback path we are going here is + # to decompose Shard(0) -> Shard(1) into Shard(0) -> Replicate -> Shard(1) + # TODO: enable this with all_to_all + assert ( + current.is_shard() + ), f"Current placement should be shard but found {current}" + shard_spec = cast(Shard, current) + if shard_spec.dim != target_placement.dim: + new_local_tensor = shard_spec._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + shards, _ = target_placement._split_tensor( + new_local_tensor, + num_chunks, + with_padding=False, + contiguous=False, + ) + new_local_tensor = shards[my_coordinate[i]] + elif target.is_partial(): + if current.is_replicate(): + partial_spec = cast(_Partial, target) + # skip the replicate to partial transformation when we are in backward pass + # In this case we keep the grad as replicate, this is because we don't + # want to convert the replicated gradients back to partial, although + # that's logically conform with the same layout, converting the gradients + # back to partial is actually useless as you would have to do reduce later + # which would be more expensive than keeping it replicate! For this reason, + # we keep the replicate grad here. + new_local_tensor = ( + partial_spec._partition_value(local_tensor, device_mesh, i) + if not is_backward + else local_tensor + ) + elif current.is_shard(): + if not is_backward: + raise RuntimeError( + f"redistribute from {current} to {target} not supported yet" + ) + # for backward shard -> partial, we just need to convert the shard to replicate + current_placement = cast(Shard, current) + new_local_tensor = current_placement._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + else: + # partial -> partial no op, should never hit + new_local_tensor = local_tensor + + assert new_local_tensor is not None + local_tensor = new_local_tensor + + assert new_local_tensor is not None, "redistribute failed!" + + if not async_op and isinstance(new_local_tensor, funcol.AsyncCollectiveTensor): + new_local_tensor = new_local_tensor.wait() + + return new_local_tensor + + +class Redistribute(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + # pyre-fixme[2]: Parameter must be annotated. + ctx, + input: "dtensor.DTensor", + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + async_op: bool = False, + ): + current_spec = input._spec + ctx.current_spec = current_spec + ctx.async_op = async_op + target_spec = DTensorSpec( + device_mesh, placements, tensor_meta=input._spec.tensor_meta + ) + + local_tensor = input._local_tensor + output = redistribute_local_tensor( + local_tensor, current_spec, target_spec, async_op=async_op + ) + + return dtensor.DTensor( + output, + device_mesh, + target_spec.placements, + shape=input.shape, + dtype=input.dtype, + requires_grad=input.requires_grad, + stride=input.stride(), + ) + + @staticmethod + def backward(ctx, grad_output: "dtensor.DTensor"): # type: ignore[override] + previous_spec = ctx.current_spec + current_spec = grad_output._spec + async_op = ctx.async_op + + local_tensor = grad_output._local_tensor + output = redistribute_local_tensor( + local_tensor, + current_spec, + previous_spec, + async_op=async_op, + is_backward=True, + ) + # normalize the target placement to replicate if it is partial + normalized_placements: List[Placement] = [] + for previous_placement in previous_spec.placements: + if previous_placement.is_partial(): + # keep target placement to replicate instead of partial in this case + normalized_placements.append(Replicate()) + else: + normalized_placements.append(previous_placement) + output_dtensor = dtensor.DTensor( + output, + previous_spec.mesh, + tuple(normalized_placements), + shape=grad_output.shape, + dtype=grad_output.dtype, + requires_grad=grad_output.requires_grad, + stride=grad_output.stride(), + ) + + return ( + output_dtensor, + None, + None, + None, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py new file mode 100644 index 0000000000000000000000000000000000000000..c2cf784e8c02dad72325a4883267d3ac70a9616e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py @@ -0,0 +1,410 @@ +from functools import lru_cache +from itertools import chain +from typing import Callable, cast, Dict, List, Optional, Sequence, Union + +import torch +from torch._ops import OpOverload +from torch._subclasses import FakeTensorMode +from torch.distributed._tensor._utils import try_find_mesh_from_args +from torch.distributed._tensor.op_schema import ( + DTensorSpec, + OpInfo, + OpSchema, + OpStrategy, + OutputSharding, + OutputSpecType, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) +from torch.distributed._tensor.placement_types import TensorMeta +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +def _length(obj) -> int: + if obj is None: + return 0 + if not isinstance(obj, Sequence): + return 1 + return len(obj) + + +class ShardingPropagator: + def __init__(self) -> None: + self.op_to_rules: Dict[OpOverload, Callable[[OpSchema], OutputSharding]] = {} + self.op_strategy_funcs: Dict[ + OpOverload, + Callable[[DeviceMesh, OpSchema], StrategyType], + ] = {} + # op map to save static argnum to decide to reuse sharding prop cache or re-run sharding prop + self.op_to_schema_info: Dict[OpOverload, RuntimeSchemaInfo] = {} + self.propagate_op_sharding = lru_cache(None)(self.propagate_op_sharding_non_cached) # type: ignore[method-assign] + + def register_sharding_prop_rule( + self, + op_overload: OpOverload, + rule_func: Callable[[OpSchema], OutputSharding], + schema_info: Optional[RuntimeSchemaInfo] = None, + ): + """ + Register a sharding propagation rule for an operator. + """ + self.op_to_rules[op_overload] = rule_func + if schema_info is not None: + self.op_to_schema_info[op_overload] = schema_info + + def register_op_strategy( + self, + op_overload: OpOverload, + strategy_func: Callable[[DeviceMesh, OpSchema], StrategyType], + schema_info: Optional[RuntimeSchemaInfo] = None, + ): + """ + Register a sharding strategy generator for an operator. + """ + self.op_strategy_funcs[op_overload] = strategy_func + if schema_info is not None: + self.op_to_schema_info[op_overload] = schema_info + + @lru_cache + def _propagate_tensor_meta( + self, op_schema: OpSchema + ) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]: + """ + Propagate the tensor metadata, it could either return a TensorMeta + or a list/tuple of TensorMetas + """ + if op_schema.op == aten.equal.default: + # data dependent ops can't be used for fake propagation + return None + + # NOTE: We must call the tracing in fake tensor mode so that it + # avoids materializing memory + with FakeTensorMode(): + fake_args = op_schema.gen_fake_args() + fake_kwargs = op_schema.gen_fake_kwargs() + fake_out = op_schema.op(*fake_args, **fake_kwargs) + + if isinstance(fake_out, torch.Tensor): + return TensorMeta( + shape=fake_out.shape, stride=fake_out.stride(), dtype=fake_out.dtype + ) + + elif isinstance(fake_out, (tuple, list)): + tensor_meta_list: List[Optional[TensorMeta]] = [] + for fake_out_item in fake_out: + if isinstance(fake_out_item, torch.Tensor): + tensor_meta_list.append( + TensorMeta( + shape=fake_out_item.shape, + stride=fake_out_item.stride(), + dtype=fake_out_item.dtype, + ) + ) + else: + tensor_meta_list.append(None) + return ( + tuple(tensor_meta_list) + if isinstance(fake_out, tuple) + else tensor_meta_list + ) + else: + # if fake is not a tensor or tuple of tensor, return as none + return None + + def _wrap_output_spec_tensor_meta( + self, + op: OpOverload, + output_specs: OutputSpecType, + output_tensor_meta: Union[None, TensorMeta, Sequence[Optional[TensorMeta]]], + ) -> None: + """ + Wrap the output_specs with the tensor metadata from the output. + """ + + if isinstance(output_specs, DTensorSpec): + if not isinstance(output_tensor_meta, TensorMeta): + # Either error due to ShardingPropagator or due to incorrect OutputSpec + if not isinstance(output_tensor_meta, (tuple, list)): + raise ValueError( + "ShardingPropagator error: output does not have an associated TensorMeta" + ) + raise ValueError( + f"For the op {op.name()}, `output_specs` has 1 output which does not equal the " + f"number of op outputs: {len(output_tensor_meta)}." + ) + output_specs.tensor_meta = output_tensor_meta + elif isinstance(output_specs, (tuple, list)): + if not isinstance(output_tensor_meta, (tuple, list)) or len( + output_specs + ) != len(output_tensor_meta): + raise ValueError( + f"For the op {op.name()}, `output_specs` has {len(output_specs)} outputs which does not equal the " + f"number of op outputs {_length(output_tensor_meta)}." + ) + for i, spec in enumerate(output_specs): + if isinstance(spec, DTensorSpec): + output_tensor_meta_i = output_tensor_meta[i] + if not isinstance(output_tensor_meta_i, TensorMeta): + raise ValueError( + f"ShardingPropagator error: output {i} does not have an associated TensorMeta" + ) + spec.tensor_meta = output_tensor_meta_i + + def propagate(self, op_info: OpInfo) -> None: + # We cannot use an lru cache if we know that inputs will have dynamic shapes, + # because SymInts are not hashable. + # This is generally ok because this only happens during tracing in torch.compile, + # and tracing does not need to be as fast as eagermode DTensor usages. + if op_info.schema.has_symints: + output_sharding = self.propagate_op_sharding_non_cached(op_info.schema) + else: + output_sharding = self.propagate_op_sharding(op_info.schema) + op_info.output_sharding = output_sharding + + def propagate_op_sharding_non_cached(self, op_schema: OpSchema) -> OutputSharding: + """ + Propagate the sharding for an operator given the op_schema. + """ + # special case op, we don't need to propagate for local + # scalar. TODO: figure out a better way to handle this + if op_schema.op is aten._local_scalar_dense.default: + return OutputSharding(None, [op_schema]) + + out_tensor_meta = self._propagate_tensor_meta(op_schema) + + def spec_to_strategy(spec: object) -> object: + if isinstance(spec, DTensorSpec): + return OpStrategy([PlacementStrategy(spec)]) + elif ( + isinstance(spec, (list, tuple)) + and len(spec) > 0 + and isinstance(spec[0], DTensorSpec) + ): + # tensor list create tuple strategy + tuple_strategy = [spec_to_strategy(s) for s in spec] + tuple_strategy = cast(Sequence[StrategyType], tuple_strategy) + return TupleStrategy( + tuple(tuple_strategy) if isinstance(spec, tuple) else tuple_strategy + ) + else: + return spec + + if op_schema.op in self.op_strategy_funcs: + # generate op strategy for the op. + mesh = try_find_mesh_from_args(op_schema.op, op_schema.args_schema) + # swap the args spec with args strategies + args_op_strategy = [spec_to_strategy(i) for i in op_schema.args_schema] + + kwargs_op_strategy = { + k: spec_to_strategy(v) for k, v in op_schema.kwargs_schema.items() + } + + # construct a new OpSchema on args for strategy based propagation + strategy_schema: OpSchema = OpSchema( + op=op_schema.op, + args_schema=tuple(args_op_strategy), + kwargs_schema=kwargs_op_strategy, + ) + + op_strategy = self.op_strategy_funcs[op_schema.op](mesh, strategy_schema) + + if isinstance(op_strategy, OpStrategy): + # single Op strategy + output_strategy = self._select_strategy(op_strategy) + + # check if we need to redistribute the input + needs_redistribute = False + expected_input_specs = [] + + # in case where the op does not specify input_specs and output_specs + # is a DTensorSpec, we use output_specs as the spec for each DTensor + # input arg. + if output_strategy.input_specs is None: + assert isinstance(output_strategy.output_specs, DTensorSpec) + + for idx, input_spec in enumerate(op_schema.args_spec): + desired_spec = ( + output_strategy.output_spec + if output_strategy.input_specs is None + else output_strategy.input_specs[idx] + ) + expected_input_specs.append(desired_spec) + if input_spec.placements != desired_spec.placements: + needs_redistribute = True + + suggestion_schema = None + if needs_redistribute: + reshard_schema = OpSchema( + op_schema.op, tuple(expected_input_specs), {} + ) + reshard_schema._inplace_rewrap_schema_suggestion(op_schema) + suggestion_schema = [reshard_schema] + + # construct output spec for the op + if op_schema.return_type_tuple_tensor_like(): + # for ops that return multiple tensors and the output_specs is not + # a tuple, we use a tuple of that single output spec as the new + # output_specs + output_specs: OutputSpecType = output_strategy.output_specs + if isinstance(output_specs, DTensorSpec): + output_specs = tuple( + [ + # create a new DTensorSpec with the same placement as the + # output_specs in output_strategy + DTensorSpec( + mesh=output_specs.mesh, + placements=output_specs.placements, + tensor_meta=output_specs.tensor_meta, + ) + for _ in range(len(op_schema.op._schema.returns)) + ] + ) + elif op_schema.return_type_tensor(): + output_specs = output_strategy.output_specs + else: + output_specs = None + + output_sharding = OutputSharding( + output_specs, + suggestion_schema, + needs_redistribute=needs_redistribute, + ) + elif isinstance(op_strategy, TupleStrategy): + # tuple strategy output sharding processing + # runtime selected placement strategy for each TupleStrategy input arg + selected_strategies: List[PlacementStrategy] = [] + out_spec_list: List[DTensorSpec] = [] + for strategy in op_strategy.childs: + assert isinstance(strategy, OpStrategy) + selected_strategy = self._select_strategy(strategy) + selected_strategies.append(selected_strategy) + out_spec_list.append(selected_strategy.output_spec) + + needs_redistribute = False + suggestion_args: List[object] = [] + for arg_idx, arg in enumerate(op_schema.args_schema): + if isinstance(arg, (list, tuple)) and isinstance( + arg[0], DTensorSpec + ): + expected_input_spec_list: List[DTensorSpec] = [] + for idx, arg_spec in enumerate(arg): + expected_input_spec = selected_strategies[idx].input_spec( + arg_idx + ) + expected_input_spec = ( + expected_input_spec.shallow_copy_with_tensor_meta( + arg_spec.tensor_meta + ) + ) + if arg_spec.placements != expected_input_spec.placements: + needs_redistribute = True + expected_input_spec_list.append(expected_input_spec) + suggestion_args.append( + tuple(expected_input_spec_list) + if isinstance(arg, tuple) + else expected_input_spec_list + ) + elif isinstance(arg, DTensorSpec): + expected_input_spec = selected_strategies[0].input_spec(arg_idx) + expected_input_spec = ( + expected_input_spec.shallow_copy_with_tensor_meta( + arg.tensor_meta + ) + ) + if arg.placements != expected_input_spec.placements: + needs_redistribute = True + suggestion_args.append(expected_input_spec) + else: + suggestion_args.append(arg) + + suggestion_schema = None + if needs_redistribute: + reshard_schema = OpSchema( + op_schema.op, tuple(suggestion_args), op_schema.kwargs_schema + ) + suggestion_schema = [reshard_schema] + + output_sharding = OutputSharding( + tuple(out_spec_list) if out_tensor_meta is not None else None, + suggestion_schema, + needs_redistribute=needs_redistribute, + ) + else: + raise ValueError("Unsupported op strategy type") + + # associate the output sharding with the output tensor metadata + self._wrap_output_spec_tensor_meta( + op_schema.op, output_sharding.output_spec, out_tensor_meta + ) + return output_sharding + elif op_schema.op in self.op_to_rules: + # propagate the sharding with rule + sharding_prop_func = self.op_to_rules[op_schema.op] + + # step 1. there's sharding propagation rule, run + # sharding propagation to get the output sharding + try: + output_sharding = sharding_prop_func(op_schema) + except NotImplementedError as e: + raise e + except Exception as e: + raise RuntimeError( + f"Sharding propagation failed on op {op_schema}.\n" f"Error: {e}" + ) from e + + # step 2. if can't get output_spec from sharding + # propagation (i.e. no rules apply for input + # placements), we return the output sharding + # with schema suggestions, which can be used to + # decide how to do redistribute on inputs + if output_sharding.output_spec is None: + if output_sharding.schema_suggestions is None: + if output_sharding.failed_reason is not None: + raise RuntimeError( + f"Sharding propagation failed on op {op_schema}!" + f"Failed reason: {output_sharding.failed_reason}" + ) + else: + # we do auto redistribute on inputs if necessary + # to get an eligible input, which we will pick a + # schema suggestion base on the redistribute cost. + # For now we simply pick the first suggestion. + suggested_input_schema = output_sharding.schema_suggestions[0] + # run sharding propagation again with suggested schema + propagation_res = sharding_prop_func(suggested_input_schema) + # we set the output sharding with the new propagation result + # so that dispatching know both output_spec and schema_suggestions + # exist, which indicates a reshard is needed + output_sharding.output_spec = propagation_res.output_spec + output_sharding.needs_redistribute = True + + # associate the output sharding with the output tensor metadata + self._wrap_output_spec_tensor_meta( + op_schema.op, output_sharding.output_spec, out_tensor_meta + ) + + return output_sharding + else: + raise NotImplementedError( + f"Operator {op_schema.op} does not have a sharding strategy registered." + ) + + def _select_strategy(self, strategy: OpStrategy) -> PlacementStrategy: + if len(strategy.strategies) == 1: + # short cut with only one possible strategy + return strategy.strategies[0] + + strategy_costs: List[float] = [] + for strtg in strategy.strategies: + assert ( + strtg.redistribute_cost is not None + ), "must set redistribute cost each strategy!" + redistribute_cost = sum(chain.from_iterable(strtg.redistribute_cost)) + strategy_costs.append(redistribute_cost) + + # for eager execution, we just select the one with the minimal redistribute cost + return strategy.strategies[strategy_costs.index(min(strategy_costs))] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..ebcc981d2c93ac24e5c9fd7414af1038939076ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py @@ -0,0 +1,277 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import cast, Dict, List, Tuple + +import torch +import torch.distributed as dist +import torch.distributed._tensor.api as dtensor + +aten = torch.ops.aten + + +def _requires_data_exchange(padding): + # TODO: whether there requires data exchange is currently determined by padding + return padding[1] != 0 + + +def _is_supported(input_size, kernel_size, stride, padding, dilation): + if dilation[1] != 1: + raise RuntimeError("Dilation must be 1 for tensor parallel convolution.") + if padding[1] != 0: + if stride[1] != 1: + raise RuntimeError( + "Stride must be 1 when there is padding for tensor parallel convolution." + ) + if kernel_size[3] // 2 > input_size[3]: + raise RuntimeError( + "kernel_size[3] // 2 should be less than or equal to input_size[3] for tensor parallel convolution." + ) + else: + if not (input_size[3] % stride[1] == 0 and stride[1] == kernel_size[3]): + raise RuntimeError( + "It requires that input_size[3] is divisible by stride[1] and stride[1] equals kernel_size[3] " + "when there is padding for tensor parallel convolution." + ) + return True + + +def _ring_send_recv_construct(in_tensor, d1, d2, left, right, rank, size): + # dist comms and reconstruct local input tensor + send_to_right = in_tensor[:, :, :, -d1:].contiguous() + send_to_left = in_tensor[:, :, :, :d2].contiguous() + recv_from_right = torch.zeros_like(send_to_left) + recv_from_left = torch.zeros_like(send_to_right) + + send_op_right = dist.P2POp(dist.isend, send_to_right, right) + send_op_left = dist.P2POp(dist.isend, send_to_left, left) + recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right) + recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left) + + reqs = dist.batch_isend_irecv( + [send_op_right, send_op_left, recv_op_left, recv_op_right] + ) + for req in reqs: + req.wait() + + if rank == 0: + in_tensor = torch.cat([in_tensor, recv_from_right], dim=-1) + elif rank == size - 1: + in_tensor = torch.cat([recv_from_left, in_tensor], dim=-1) + else: + in_tensor = torch.cat([recv_from_left, in_tensor, recv_from_right], dim=-1) + + return in_tensor + + +def _ring_send_recv_aggregate(grad_in_tensor, d1, d2, left, right, rank, size): + # dist comms and aggregate gradients for edge pixels + send_to_right = grad_in_tensor[:, :, :, -d2:].contiguous() + send_to_left = grad_in_tensor[:, :, :, :d1].contiguous() + recv_from_right = torch.zeros_like(send_to_left) + recv_from_left = torch.zeros_like(send_to_right) + + send_op_right = dist.P2POp(dist.isend, send_to_right, right) + send_op_left = dist.P2POp(dist.isend, send_to_left, left) + recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right) + recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left) + + reqs = dist.batch_isend_irecv( + [send_op_right, send_op_left, recv_op_left, recv_op_right] + ) + for req in reqs: + req.wait() + + if rank == 0: + grad_in_tensor = grad_in_tensor[:, :, :, :-d2] + grad_in_tensor[:, :, :, -d1:] = torch.add( + grad_in_tensor[:, :, :, -d1:], recv_from_right + ) + elif rank == size - 1: + grad_in_tensor = grad_in_tensor[:, :, :, d1:] + grad_in_tensor[:, :, :, :d2] = torch.add( + grad_in_tensor[:, :, :, :d2], recv_from_left + ) + else: + grad_in_tensor = grad_in_tensor[:, :, :, d1:-d2] + grad_in_tensor[:, :, :, -d1:] = torch.add( + grad_in_tensor[:, :, :, -d1:], recv_from_right + ) + grad_in_tensor[:, :, :, :d2] = torch.add( + grad_in_tensor[:, :, :, :d2], recv_from_left + ) + + +def tp_convolution( + op_call: torch._ops.OpOverload, + local_tensor_args: Tuple[object, ...], + local_tensor_kwargs: Dict[str, object], +) -> object: + assert op_call == aten.convolution.default + assert len(local_tensor_args) == 9 + + rank = dist.get_rank() + size = dist.get_world_size() + in_tensor = cast(torch.Tensor, local_tensor_args[0]) + weight = cast(torch.Tensor, local_tensor_args[1]) + stride, padding, dilation = local_tensor_args[3:6] + + assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation) + assert isinstance(padding, List) + + if not _requires_data_exchange(padding): + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + return local_results + else: + # step 0 compute the overlap pixels of the input tensor + d = weight.shape[3] - 1 + d1 = d // 2 + d2 = d - d1 + assert d1 + d2 == d + right = (rank + 1) % size + left = (rank - 1 + size) % size + + # step1 reconstruct local input tensor + in_tensor = _ring_send_recv_construct( + in_tensor, d1, d2, left, right, rank, size + ) + + # step2 feed local input tensor to op_call + local_tensor_args_list = list(local_tensor_args) + local_tensor_args_list[0] = in_tensor + local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list) + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + + # step3 remove extra outputs from the results + padding_w = padding[1] + w = local_results.size(3) + if rank == 0: + local_results = local_results[:, :, :, : w - padding_w] + elif rank == size - 1: + local_results = local_results[:, :, :, padding_w:] + else: + local_results = local_results[:, :, :, padding_w : w - padding_w] + + return local_results + + +def tp_convolution_backward( + op_call: torch._ops.OpOverload, + local_tensor_args: Tuple[object, ...], + local_tensor_kwargs: Dict[str, object], +) -> object: + assert op_call == aten.convolution_backward.default + assert len(local_tensor_args) == 11 + + rank = dist.get_rank() + size = dist.get_world_size() + grad_out_tensor = cast(torch.Tensor, local_tensor_args[0]) + in_tensor = cast(torch.Tensor, local_tensor_args[1]) + weight = cast(torch.Tensor, local_tensor_args[2]) + stride, padding, dilation = local_tensor_args[4:7] + + assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation) + assert isinstance(padding, List) + + if not _requires_data_exchange(padding): + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + return local_results + else: + # step 0 compute the overlap pixels of the input tensor + d = weight.shape[3] - 1 + d1 = d // 2 + d2 = d - d1 + assert d1 + d2 == d + right = (rank + 1) % size + left = (rank - 1 + size) % size + + # step1 reconstruct local input tensor + in_tensor = _ring_send_recv_construct( + in_tensor, d1, d2, left, right, rank, size + ) + + # step2 reconstruct local gradient output tensor + N, C_out, H_out, _ = grad_out_tensor.shape + padding_w = padding[1] + if rank == 0: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (0, padding_w), "constant", 0 + ) + elif rank == size - 1: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (padding_w, 0), "constant", 0 + ) + else: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (padding_w, padding_w), "constant", 0 + ) + + # step3 feed local input tensor to op_call + local_tensor_args_list = list(local_tensor_args) + local_tensor_args_list[0] = grad_out_tensor + local_tensor_args_list[1] = in_tensor + local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list) + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + + # step4 aggregate gradients for edge pixels + grad_in_tensor = local_results[0] + grad_in_tensor = _ring_send_recv_aggregate( + grad_in_tensor, d1, d2, left, right, rank, size + ) + + local_results = list(local_results) + local_results[0] = grad_in_tensor + local_results = cast(Tuple[object, ...], local_results) + + return local_results + + +def convolution_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + # extract local tensor and sharding infos to a OpInfo + op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + + # sharding propagation + dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info) + output_sharding = op_info.output_sharding + assert output_sharding is not None, "output sharding should not be None" + + # local propagation + local_results = tp_convolution( + op_call, tuple(op_info.local_args), op_info.local_kwargs + ) + + return dtensor.DTensor._op_dispatcher.wrap( + local_results, output_sharding.output_spec + ) + + +def convolution_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + # Redistribute grad_output tensor to the same placement as input tensor + args = list(args) + assert isinstance(args[0], dtensor.DTensor) and isinstance(args[1], dtensor.DTensor) + args[0] = args[0].redistribute(args[1].device_mesh, args[1].placements) + args = tuple(args) + + # extract local tensor and sharding infos to a OpInfo + op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + + # sharding propagation + dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info) + output_sharding = op_info.output_sharding + assert output_sharding is not None, "output sharding should not be None" + + # local propagation + local_results = tp_convolution_backward( + op_call, tuple(op_info.local_args), op_info.local_kwargs + ) + + return dtensor.DTensor._op_dispatcher.wrap( + local_results, output_sharding.output_spec + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c46b4759b413fd2ec98273134d61d759d943d11e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75a80c5db0f9f5d622d58950d09cc2a14f6779db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""A Pipe implementation in PyTorch.""" +from .checkpoint import is_checkpointing, is_recomputing +from .pipe import Pipe, WithDevice +from .microbatch import NoChunk + +__all__ = ["Pipe", "is_checkpointing", "is_recomputing"] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4845230c2c37a6ba210aaadc645b89755534e0f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e78a6f5d6a069f1b851815791ca572a86216aa76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6e2da4ed4bfd8227c06910713b4032c487ee159 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..567dd4af241aea728a804ee18eab8f68f756db37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/copy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..608e2a3866fc10550258016dc7065b9e9634bdf7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/dependency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f9031a6389e53786f086196a3588bceee60067 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64d6674f115c1cddf4587b5f998be4005888bf85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/phony.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a1620c4182649adb3567b1295a68d350a95d91 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba6bec2b868ea0efd905ccb5aea1a559757e3b95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1578429943e00aa66f8319e10ae0043b2ee2fa5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4370d142af9b96e9fecd087e1b3ae328192d5c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..862f067dede12360dff056a87ee297351179b303 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc657896d87b69cbd548878164e08b29be25e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__init__.py @@ -0,0 +1,164 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""A helper to roughly balance a sequential module. + +Usage:: + + import torch + from torch.distributed.pipeline.sync import Pipe + from torch.distributed.pipeline.sync.balance import balance_by_time + + sample = torch.empty(128, 3, 224, 224) + balance = balance_by_time(torch.cuda.device_count(), model, sample) + + pipe = Pipe(model, balance, chunks=8) + +""" +from typing import Any, List, Union, Sequence + +import torch +from torch import Tensor +import torch.nn as nn + +from . import blockpartition +from .profile import profile_sizes, profile_times + +__all__ = ["balance_by_time", "balance_by_size"] + + +Device = Union[torch.device, int, str] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + + +def balance_cost(cost: List[int], partitions: int) -> List[int]: + partitioned = blockpartition.solve(cost, partitions) + return [len(p) for p in partitioned] + + +def balance_by_time( + partitions: int, + module: nn.Sequential, + sample: Union[List[Any], Tensor], + *, + timeout: float = 1.0, + device: Device = torch.device("cuda"), +) -> List[int]: + """Naive automatic balancing by elapsed time per layer. + :: + + sample = torch.empty(128, 3, 224, 224) + balance = balance_by_time(torch.cuda.device_count(), model, sample) + pipe = Pipe(model, balance, chunks=8) + + Args: + partitions (int): + intended number of partitions + module (torch.nn.Sequential): + sequential module to be partitioned + sample (torch.Tensor): + example input with arbitrary batch size + + Keyword Args: + timeout (float): + profiling iterates again if the timeout (in second) is not exceeded + (default: ``1.0``) + device ('cpu' or 'cuda' device): + CPU or CUDA device where each layer is profiled (default: the + current CUDA device) + + Returns: + A list of number of layers in each partition. Use it for the `balance` + parameter of :class:`~torchpipe.Pipe`. + + .. note:: + `module` and `sample` must be placed on the same device. + + """ + times = profile_times(module, sample, timeout, torch.device(device)) + return balance_cost(times, partitions) + + +def balance_by_size( + partitions: int, + module: nn.Sequential, + input: Union[List[Any], Tensor], + *, + chunks: int = 1, + param_scale: float = 2.0, + device: Device = torch.device("cuda"), +) -> List[int]: + """Naive automatic balancing by CUDA memory usage per layer. + + During training, required memory for parameters depends on which optimizer + is used. Optimizers may use buffers for each parameter to track + optimization statistics internally, such as momentum buffer in SGD. + + To get more reliable size based balance, you should specify `param_scale` + with regard to your optimizer. The default `param_scale` is 2 instead of 1 + due to gradient accumulation which is necessary for every optimizer. + + Follow this guide to choose correct `param_scale` for typical optimizers: + + ========= ============= ========================================= + Optimizer `param_scale` Internal State + ========= ============= ========================================= + SGD 2--3 (momentum_buffer) + Adam 4--5 exp_avg, exp_avg_sq, (max_exp_avg_sq) + Adadelta 4 square_avg, acc_delta + Adagrad 3 sum + RMSprop 3--5 square_avg, (momentum_buffer), (grad_avg) + ========= ============= ========================================= + + Here's a simple example with the Adam optimizer:: + + balance = balance_by_size( + torch.cuda.device_count(), + model, + + # Same size with mini-batch to train + torch.empty(1024, 3, 224, 224), + + # Number of micro-batches to train with Pipe + chunks=8, + + # 4 for Adam + param_scale=4.0, + ) + + pipe = Pipe(model, balance, chunks=8) + adam = Adam(pipe.parameters()) + + Args: + partitions (int): + intended number of partitions + module (torch.nn.Sequential): + sequential module to be partitioned + input (torch.Tensor): + example mini-batch with the same size to train + + Keyword Args: + chunks (int): + number of micro-batches will be used to train (default: ``1``) + param_scale (float): + how many copies of parameters would be allocated for training. It + depends on optimizer. See the above guide. (default: ``2.0``) + device ('cuda' device): + CUDA device where each layer is profiled (default: the current CUDA + device) + + Returns: + A list of number of layers in each partition. Use it for the `balance` + parameter of :class:`~torchpipe.Pipe`. + + .. note:: + `module` and `input` must be placed on the same CUDA device. + + """ + sizes = profile_sizes(module, input, chunks, param_scale, torch.device(device)) + return balance_cost(sizes, partitions) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd6e629cd1f7de342729e93de750e694c9c8c66c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d25c67658150598647499472fc0e310213501df1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d02c7ab60179d948eb1308ed7f8e9cc56f0301 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py new file mode 100644 index 0000000000000000000000000000000000000000..7afe782f6ac8c7c0585ae1f93e3ccfa7e25fce78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/blockpartition.py @@ -0,0 +1,95 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Implements "Block Partitions of Sequences" by Imre Bárány et al. + +Paper: https://arxiv.org/pdf/1308.2452.pdf + +""" +from typing import Iterator, List, Tuple + +__all__ = ["solve"] + + +def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]: + """Splits a sequence into several partitions to minimize variance for each + partition. + + The result might not be optimal. However, it can be done only in O(kn³), + where k is the number of partitions and n is the length of the sequence. + + """ + if partitions < 1: + raise ValueError(f"partitions must be a positive integer ({partitions} < 1)") + + n = len(sequence) + if n < partitions: + raise ValueError(f"sequence is shorter than intended partitions ({n} < {partitions})") + + # Normalize the sequence in [0, 1]. + minimum = min(sequence) + maximum = max(sequence) - minimum + + normal_sequence: List[float] + if maximum == 0: + normal_sequence = [0 for _ in sequence] + else: + normal_sequence = [(x - minimum) / maximum for x in sequence] + + splits = [n // partitions * (x + 1) for x in range(partitions - 1)] + [n] + + def block_size(i: int) -> float: + start = splits[i - 1] if i > 0 else 0 + stop = splits[i] + return sum(normal_sequence[start:stop]) + + def leaderboard() -> Iterator[Tuple[float, int]]: + return ((block_size(i), i) for i in range(partitions)) + + while True: + """ + (1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P. + """ + # max_size: M(P) + max_size, p = max(leaderboard()) + + while True: + """ + (2) If M(P) ≤ m(P) + 1, then stop. + """ + # min_size: m(P) + min_size, q = min(leaderboard()) + + if max_size <= min_size + 1: + return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)] + + """ + (3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is + closest to p (ties broken arbitrarily). Thus Bq is a minimal block + of P. Let Bh be the block next to Bq between Bp and Bq. (Note that + Bh is a non-empty block: if it were, then m(P) = 0 and we should + have chosen Bh instead of Bq.) + """ + if p < q: + """ + So either p < q and then h = q−1 and we define P ∗ by moving + the last element from Bh = Bq−1 to Bq, + """ + h = q - 1 + splits[h] -= 1 + else: + """ + or q < p, and then h = q + 1 and P ∗ is obtained by moving the + first element of Bh = Bq+1 to Bq. + """ + h = q + 1 + splits[q] += 1 + + """ + Set P = P ∗ . If p = h, then go to (1), else go to (2). + """ + if p == h: + break diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1a0c06a8e3ac580d42cc0b34fb093126bc6333 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py @@ -0,0 +1,116 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Per-layer profilers.""" +import copy +import time +from typing import Any, Generator, List, Union, Sequence + +import torch +from torch import Tensor +import torch.nn as nn + +from ..microbatch import Batch + +__all__: List[str] = [] + + +Device = Union[torch.device, int, str] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + + +def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]: + """Copies layers for ease to profile. It doesn't modify the given + module. + """ + for layer in module: + layer_copy = copy.deepcopy(layer) + layer_copy.to(device) + layer_copy.train() + yield layer_copy + + +def detach(batch: Batch) -> None: + """Detaches from autograd graph.""" + for i, x in enumerate(batch): + batch[i] = x.detach().requires_grad_(x.requires_grad) + + +def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]: + """Profiles elapsed times per layer.""" + if any(p.grad is not None for p in module.parameters()): + raise ValueError("some parameter already has gradient") + + _batch = Batch(sample) + for i, x in enumerate(_batch): + _batch[i] = x.detach().to(device).requires_grad_(x.requires_grad) + + time_bufs: List[List[float]] = [[] for _ in module] + begun_at = time.time() + + while time.time() - begun_at < timeout: + batch = _batch + + for i, layer in enumerate(layerwise_sandbox(module, device)): + detach(batch) + + if device.type == "cuda": + torch.cuda.synchronize(device) + tick = time.time() + + # Forward + batch = batch.call(layer) + + # Backward + backward_tensors = tuple(y for y in batch if y.requires_grad) + if backward_tensors: + torch.autograd.backward(backward_tensors, backward_tensors) + + if device.type == "cuda": + torch.cuda.synchronize(device) + tock = time.time() + + time_bufs[i].append(tock - tick) + + us = 1_000_000 + return [sum(int(t * us) for t in buf) for buf in time_bufs] + + +def profile_sizes( + module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device, +) -> List[int]: + """Profiles CUDA memory usage per layer.""" + if device.type != "cuda": + raise ValueError("size profiler supports only CUDA device") + + batch = Batch(input) + sizes: List[int] = [] + + latent_scale = batch[0].size(0) / chunks + for i, x in enumerate(batch): + batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad) + + for layer in layerwise_sandbox(module, device): + detach(batch) + + # Detect memory usage at forward. + torch._C._cuda_clearCublasWorkspaces() + memory_before = torch.cuda.memory_allocated(device) + batch = batch.call(layer) + torch._C._cuda_clearCublasWorkspaces() + memory_after = torch.cuda.memory_allocated(device) + latent_size = memory_after - memory_before + + # Analyze size of parameters. + param_size = sum(p._typed_storage()._nbytes() for p in layer.parameters()) + + # Combine size of parameters and activations with normalize scales. + size = latent_size * latent_scale + param_size * param_scale + sizes.append(int(size)) + + return sizes diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..ad375f893318ec130c4b7777c7f557a6697f0091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py @@ -0,0 +1,159 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Tracks the running statistics per mini-batch instead of micro-batch.""" +from typing import TypeVar, cast + +import torch +from torch import Tensor, nn +from torch.nn.functional import batch_norm +from torch.nn.modules.batchnorm import _BatchNorm + +from .checkpoint import is_recomputing + +__all__ = ["DeferredBatchNorm"] + + +TModule = TypeVar("TModule", bound=nn.Module) + + +class DeferredBatchNorm(_BatchNorm): + """A BatchNorm layer tracks multiple micro-batches to update running statistics per mini-batch.""" + + sum: Tensor + sum_squares: Tensor + running_mean: Tensor + running_var: Tensor + num_batches_tracked: Tensor + + def __init__( + self, + num_features: int, + eps: float = 1e-5, + momentum: float = 0.1, + affine: bool = True, + chunks: int = 1, + ) -> None: + super().__init__(num_features, eps, momentum, affine, track_running_stats=True) + + self.register_buffer("sum", torch.zeros_like(self.running_mean)) + self.register_buffer("sum_squares", torch.zeros_like(self.running_var)) + + self.counter = 0 + self.tracked = 0 + self.chunks = chunks + + def _check_input_dim(self, input: Tensor) -> None: + # It's the typical _check_input_dim() implementation in PyTorch. + if input.dim() <= 2: + raise ValueError("expected at least 3D input (got %dD input)" % input.dim()) + + def _track(self, input: Tensor) -> bool: + """Tracks statistics of a micro-batch.""" + # Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d. + dim = [0] + dim.extend(range(2, input.dim())) + + with torch.no_grad(): + self.sum += input.sum(dim) + self.sum_squares += (input ** 2).sum(dim) + + size = input.size().numel() // input.size(1) + self.counter += size + self.tracked += 1 + + return self.tracked == self.chunks + + def _commit(self) -> None: + """Update the running statistics of a mini-batch.""" + exponential_average_factor = 0.0 + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + mean = self.sum / self.counter + var = self.sum_squares / self.counter - mean ** 2 + + # Calculate the exponential moving average here. + m = exponential_average_factor + + self.running_mean *= 1 - m + self.running_mean += mean * m + + self.running_var *= 1 - m + self.running_var += var * m + + self.sum.zero_() + self.sum_squares.zero_() + self.counter = 0 + self.tracked = 0 + + def forward(self, input: Tensor) -> Tensor: + if not self.training: + # Don't train parameters on the evaluation mode. + return batch_norm( + input, + running_mean=self.running_mean, + running_var=self.running_var, + weight=self.weight, + bias=self.bias, + training=False, + momentum=0.0, + eps=self.eps, + ) + + if not is_recomputing(): + # Track a micro-batch on the training mode + # but not under a recomputation. + tracked_enough = self._track(input) + + # Update the running statistics for a mini-batch + # if it has tracked enough micro-batches. + if tracked_enough: + self._commit() + + # Normalize a micro-batch and train the parameters. + return batch_norm( + input, + running_mean=None, + running_var=None, + weight=self.weight, + bias=self.bias, + training=True, + momentum=0.0, + eps=self.eps, + ) + + @classmethod + def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule: + """Converts a :class:`nn.BatchNorm` or underlying :class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`:: + + from torchvision.models.resnet import resnet101 + from torchpipe.batchnorm import DeferredBatchNorm + model = resnet101() + model = DeferredBatchNorm.convert_deferred_batch_norm(model) + + """ + if isinstance(module, DeferredBatchNorm) and module.chunks is chunks: + return cast(TModule, module) + + module_output: nn.Module = module + + if isinstance(module, _BatchNorm) and module.track_running_stats: + module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks) + if module.affine: + module_output.register_parameter("weight", module.weight) + module_output.register_parameter("bias", module.bias) + module_output.register_buffer("running_mean", module.running_mean) + module_output.register_buffer("running_var", module.running_var) + module_output.register_buffer("num_batches_tracked", module.num_batches_tracked) + + for name, child in module.named_children(): + module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks)) + + return cast(TModule, module_output) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e67da2499d573e9e796a9b5241187e8b0fe6d0c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py @@ -0,0 +1,364 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Checkpointing with preceding recomputation. + +PyTorch already provides the official checkpointing utilities in +:mod:`torch.utils.checkpoint`. The official checkpointing combines +recomputation and recursive backpropagation into one autograd function named +``CheckpointFunction``. Hence, the recomputation can be started only when the +gradients arrive to the function. In Pipe, the recomputation needs to precede +the gradient arrival to minimize the GPU idle time. + +We solve this problem by introducing separate autograd functions named +:class:`Recompute` and :class:`Checkpoint`. Each function represents +recomputation and recursive backpropagation, respectively. We can manipulate +the control flow in aspect of both the autograd engine and CUDA with a pair of +the functions. + +Specifically, we place CUDA stream synchronization between :class:`Recompute` +and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is +copied entirely. + +""" +from collections import deque +from contextlib import contextmanager +import threading +from typing import ( + Any, + Deque, + Generator, + List, + Optional, + Protocol, + Union, + Sequence, + Tuple +) + +import torch +from torch import Tensor +import torch.autograd + +from .dependency import fork, join +from .microbatch import Batch +from .phony import get_phony + +__all__ = ["Function", "checkpoint", "Checkpointing", "ThreadLocal", "enable_checkpointing", + "enable_recomputing", "is_checkpointing", "is_recomputing", "Context", "save_rng_states", + "restore_rng_states", "Checkpoint", "Recompute"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +# Types for shared memory between Checkpoint and Recompute. +Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf) +RNGStates = Tuple[Tensor, Optional[Tensor]] # (cpu_rng_state, gpu_rng_state) + + +# Protocol with __call__ instead of Callable can be used as an attribute type. +# See: https://github.com/python/mypy/issues/708#issuecomment-561735949 +class Function(Protocol): + def __call__(self, input: TensorOrTensors) -> TensorOrTensors: + ... + + +def checkpoint(function: Function, input): + """Make a checkpoint with a simple interface like + :func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug + :class:`Checkpoint` and :class:`Recompute` without boilerplate. + """ + batch = Batch(input) + + chk = Checkpointing(function, batch) + batch = chk.checkpoint() + chk.recompute(batch) + + return batch.values + + +class Checkpointing: + """Generates a pair of :class:`Checkpoint` and :class:`Recompute`.""" + + def __init__(self, function: Function, batch: Batch) -> None: + self.function = function + self.batch = batch + + # Shared memory between Checkpoint and Recompute. 1-length deque is + # used for mutability and length limitation. + self.recomputed: Deque[Recomputed] = deque(maxlen=1) + self.rng_states: Deque[RNGStates] = deque(maxlen=1) + + def checkpoint(self) -> Batch: + """Return a batch applied by :class:`Checkpoint`.""" + input_atomic = self.batch.atomic + inputs = tuple(self.batch) + + # Use a phony which requires grad to ensure that Checkpoint can be + # tracked by the autograd engine even when none of the input tensors + # require grad. + phony = get_phony(self.batch.get_device(), requires_grad=True) + + output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs) + + # Gradients are only supported for float Tensors. + if isinstance(output, tuple): + output = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in output]) + + return Batch(output) + + def recompute(self, batch: Batch) -> None: + """Apply :class:`Recompute` to the batch in place.""" + input_atomic = self.batch.atomic + inputs = tuple(self.batch) + + # Use a tensor in the batch to tie together fork-join + tensor_idx = batch.find_tensor_idx() + # batch[tensor_idx] is always requiring grad, because it has been passed + # checkpoint with a phony requiring grad. + batch[tensor_idx], phony = fork(batch[tensor_idx]) + phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs) + batch[tensor_idx] = join(batch[tensor_idx], phony) + + +class ThreadLocal(threading.local): + def __init__(self) -> None: + self.is_checkpointing = False + self.is_recomputing = False + + +thread_local = ThreadLocal() + + +@contextmanager +def enable_checkpointing() -> Generator[None, None, None]: + """Make :func:`is_checkpointing` return :data:`True` within a context.""" + orig = thread_local.is_checkpointing + thread_local.is_checkpointing = True + try: + yield + finally: + thread_local.is_checkpointing = orig + + +@contextmanager +def enable_recomputing() -> Generator[None, None, None]: + """Makes :func:`is_recomputing` return :data:`True` within a context.""" + orig = thread_local.is_recomputing + thread_local.is_recomputing = True + try: + yield + finally: + thread_local.is_recomputing = orig + + +def is_checkpointing() -> bool: + """Whether the current forward propagation is under checkpointing. + + Returns: + bool: :data:`True` if it's under checkpointing. + + """ + return thread_local.is_checkpointing + + +def is_recomputing() -> bool: + """Whether the current forward propagation is under checkpoint recomputation. + + Use this to prevent duplicated side-effects at forward + propagation:: + + class Counter(nn.Module): + def __init__(self): + super().__init__() + self.counter = 0 + + def forward(self, input): + if not is_recomputing(): + self.counter += 1 + return input + + Returns: + bool: :data:`True` if it's under checkpoint recomputation. + + .. seealso:: :ref:`Detecting Recomputation` + + """ + return thread_local.is_recomputing + + +class Context: + """The common interface between the :class:`Checkpoint` and :class:`Recompute` context.""" + + recomputed: Deque[Recomputed] + rng_states: Deque[RNGStates] + function: Function + input_atomic: bool + inputs: Sequence[Any] + + saved_tensors: Tuple[Tensor, ...] + + def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover + pass + + +def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None: + """: + Capture the current random number generator states. + + meth:`Checkpoint.forward` captures the current PyTorch's random number + generator states at CPU and GPU to reuse in :meth:`Recompute.backward`. + + .. seealso:: :ref:`Referential Transparency` + + """ + cpu_rng_state = torch.get_rng_state() + + gpu_rng_state: Optional[Tensor] + if device.type == "cuda": + gpu_rng_state = torch.cuda.get_rng_state(device) + else: + gpu_rng_state = None + + rng_states.append((cpu_rng_state, gpu_rng_state)) + + +@contextmanager +def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]: + """: + Restore the random number generator state. + + meth:`Recompute.backward` restores the random number generator states + captured by :func:`save_rng_states` within its context. + + .. seealso:: :ref:`Referential Transparency` + + """ + cpu_rng_state, gpu_rng_state = rng_states.pop() + + gpu_devices: List[torch.device] = [] + if device.type == "cuda": + gpu_devices.append(device) + + with torch.random.fork_rng(gpu_devices): + torch.set_rng_state(cpu_rng_state) + if gpu_rng_state is not None: + torch.cuda.set_rng_state(gpu_rng_state, device) + yield + + +class Checkpoint(torch.autograd.Function): + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + phony: Tensor, + recomputed: Deque[Recomputed], + rng_states: Deque[RNGStates], + function: Function, + input_atomic: bool, + *inputs, + ): + ctx.recomputed = recomputed + ctx.rng_states = rng_states + + save_rng_states(phony.device, ctx.rng_states) + + ctx.function = function + ctx.input_atomic = input_atomic + if input_atomic: + tensors = [inputs[0]] + else: + tensors = [] + for input in inputs: + if torch.is_tensor(input): + tensors.append(input) + + ctx.save_for_backward(*tensors) + + with torch.no_grad(), enable_checkpointing(): + if input_atomic: + assert len(inputs) == 1 + output = function(inputs[0]) + else: + output = function(*inputs) + return output + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover + output, input_leaf = ctx.recomputed.pop() + + if isinstance(output, tuple): + outputs = output + else: + outputs = (output,) + if any(torch.is_tensor(y) and y.requires_grad for y in outputs): + tensors = tuple([x for x in outputs if torch.is_tensor(x) and x.requires_grad]) + torch.autograd.backward(tensors, grad_output) + + grad_input: List[Optional[Tensor]] = [None, None, None, None, None] + grad_input.extend(x.grad if torch.is_tensor(x) else None for x in input_leaf) + return tuple(grad_input) + + +class Recompute(torch.autograd.Function): + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + phony: Tensor, + recomputed: Deque[Recomputed], + rng_states: Deque[RNGStates], + function: Function, + input_atomic: bool, + *inputs, + ) -> Tensor: + ctx.recomputed = recomputed + ctx.rng_states = rng_states + + ctx.function = function + ctx.input_atomic = input_atomic + ctx.inputs = inputs + if input_atomic: + tensors = [inputs[0]] + else: + tensors = [] + for input in inputs: + if torch.is_tensor(input): + tensors.append(input) + ctx.save_for_backward(*tensors) + + return phony + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover + inputs = ctx.inputs + inputs_leaf = tuple(x.detach().requires_grad_(x.requires_grad) if torch.is_tensor(x) else x for x in inputs) + + # Get the device for the inputs from a tensor + device = None + for input in inputs: + if torch.is_tensor(input): + device = input.device + break + + if device is None: + raise RuntimeError(f'No tensors found in {inputs}') + + with restore_rng_states(device, ctx.rng_states): + with torch.enable_grad(), enable_recomputing(): + if ctx.input_atomic: + assert len(inputs_leaf) == 1 + output = ctx.function(inputs_leaf[0]) + else: + output = ctx.function(*inputs_leaf) + + ctx.recomputed.append((output, inputs_leaf)) + + grad_input: List[None] = [None, None, None, None, None] + grad_input.extend(None for _ in ctx.inputs) + return tuple(grad_input) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py new file mode 100644 index 0000000000000000000000000000000000000000..b717f0c2932c607ec398f52adca7f820704a55e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py @@ -0,0 +1,108 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Autograd functions for stream-aware CUDA copy. + +It is used to overlap copy and computation on the same GPU. +""" +from collections import deque +from typing import Deque, List, Optional, Tuple, Sequence + +import torch +from torch import Tensor + +from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream + +__all__: List[str] = ["Context", "Copy", "Wait"] + + +Tensors = Sequence[Tensor] + + +# Common interface between :class:`Copy` and :class:`Wait`. +class Context: + prev_stream: AbstractStream + next_stream: AbstractStream + + +class Copy(torch.autograd.Function): + """Copies tensors on specific streams.""" + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input,) -> Tensors: + ctx.prev_stream = prev_stream + ctx.next_stream = next_stream + + output = [] + output_stream = current_stream(get_device(next_stream)) + + with use_stream(prev_stream), use_stream(next_stream): + for x in input: + if torch.is_tensor(x): + y = x.to(get_device(next_stream), non_blocking=True) + output.append(y) + + # 'prev_stream' is not where 'x' has been allocated. + record_stream(x, prev_stream) + # 'y' has been allocated on 'next_stream'. + # It might be used on the current stream captured as 'output_stream'. + record_stream(y, output_stream) + else: + output.append(x) + + return tuple(output) + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: + prev_stream = ctx.prev_stream + next_stream = ctx.next_stream + + grad_input: Deque[Tensor] = deque(maxlen=len(grad_output)) + input_stream = current_stream(get_device(prev_stream)) + + with use_stream(prev_stream), use_stream(next_stream): + for x in reversed(grad_output): + y = x.to(get_device(prev_stream), non_blocking=True) + grad_input.appendleft(y) + + # 'next_stream' is not where 'x' has been allocated. + record_stream(x, next_stream) + # 'y' has been allocated on 'prev_stream'. + # It might be used on the current stream captured as 'input_stream'. + record_stream(y, input_stream) + + grad_streams: Tuple[Optional[Tensor], ...] = (None, None) + return grad_streams + tuple(grad_input) + + +class Wait(torch.autograd.Function): + """Synchronizes a stream to another stream. + + Place it just before you want to start an operation on the next stream, + provided that all operations on the previous stream are done. + + """ + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input) -> Tensors: + ctx.prev_stream = prev_stream + ctx.next_stream = next_stream + + wait_stream(next_stream, prev_stream) + + return tuple(x.detach() if torch.is_tensor(x) else x for x in input) + + @staticmethod + def backward(ctx: Context, *grad_input: Tensor,) -> Tuple[Optional[Tensor], ...]: + prev_stream = ctx.prev_stream + next_stream = ctx.next_stream + + wait_stream(prev_stream, next_stream) + + grad_streams: Tuple[Optional[Tensor], ...] = (None, None) + return grad_streams + grad_input diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5c69e388fe4412a13c5ac3b1850ef13087e6e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py @@ -0,0 +1,54 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Arbitrary dependency between two autograd lanes.""" +from typing import List, Tuple + +import torch +from torch import Tensor + +from .phony import get_phony + +__all__: List[str] = ["fork", "Fork", "join", "Join"] + + +def fork(input: Tensor) -> Tuple[Tensor, Tensor]: + """Branches out from an autograd lane of the given tensor.""" + if torch.is_grad_enabled() and input.requires_grad: + input, phony = Fork.apply(input) + else: + phony = get_phony(input.device, requires_grad=False) + + return input, phony + + +class Fork(torch.autograd.Function): + @staticmethod + def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]: # type: ignore[override] + phony = get_phony(input.device, requires_grad=False) + return input.detach(), phony.detach() + + @staticmethod + def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor: # type: ignore[override] + return grad_input + + +def join(input: Tensor, phony: Tensor) -> Tensor: + """Merge two autograd lanes.""" + if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad): + input = Join.apply(input, phony) + + return input + + +class Join(torch.autograd.Function): + @staticmethod + def forward(ctx: "Join", input: Tensor, phony: Tensor) -> Tensor: # type: ignore[override] + return input.detach() + + @staticmethod + def backward(ctx: "Join", grad_input: Tensor) -> Tuple[Tensor, None]: # type: ignore[override] + return grad_input, None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py new file mode 100644 index 0000000000000000000000000000000000000000..5b8aca25754808eb586a65543153bef0cba877c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py @@ -0,0 +1,234 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Manipulation of micro-batches.""" +import typing +from typing import Any, Callable, List, Union, cast, Sequence + +import torch +from torch import Tensor +import torch.cuda.comm + +__all__: List[str] = ["NoChunk", "Batch", "check", "scatter", "gather"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] +Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]] + + +class NoChunk: + """ + Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor + should not be chunked on the batch dimension and instead be replicated + as-is across all micro-batches. This is useful for tensors which might + not have any 'batch' semantics for the model. + """ + def __init__(self, inp: Tensor): + if not torch.is_tensor(inp): + raise TypeError(f'NoChunk only supported for tensors, found: {inp}') + self._tensor = inp + + @property + def tensor(self): + return self._tensor + + +class Batch: + """ + An abstraction representing a microbatch in the pipeline. + """ + + def __init__(self, values: Union[List[Any], Tensor]) -> None: + self._values = values + self.atomic = torch.is_tensor(values) + + # Verify at least on tensor + if not self.atomic: + if not any(torch.is_tensor(value) for value in self._values): + raise TypeError(f'No tensors found in batch: {self._values}') + + @property + def tensor(self) -> Tensor: + """Retrieves the underlying tensor.""" + if not self.atomic: + raise AttributeError("not atomic batch") + return cast(Tensor, self._values) + + @property + def values(self): + """Retrieves the underlying values for the batch""" + return self._values + + def find_tensor_idx(self): + """ + Retrieves the index of first tensor found. + """ + if self.atomic: + return 0 + for i, value in enumerate(self._values): + if torch.is_tensor(value): + return i + + raise TypeError("No tensor found!") + + def get_device(self): + """ + Retrieves the device for this microbatch. + """ + if self.atomic: + return self._values.device # type: ignore[union-attr] + + for value in self._values: + if torch.is_tensor(value): + return value.device + + def call(self, function: Function) -> "Batch": + """Calls a function on the microbatch. It also wraps + the output with :class:`Batch`. + """ + if self.atomic: + return Batch(function(self._values)) + else: + return Batch(function(*self._values)) + + def __repr__(self) -> str: + return f"Batch[atomic={self.atomic!r}]({self._values!r})" + + def __iter__(self): + if self.atomic: + yield self._values + else: + yield from self._values + + def __len__(self) -> int: + return 1 if self.atomic else len(self._values) + + def __getitem__(self, index: int): + if not self.atomic: + return self._values[index] + + if index != 0: + raise IndexError("atomic batch allows index 0 only") + + return self._values + + # NOTE(sublee): pyflakes can't detect "overload" instead of "typing.overload". + @typing.overload + def __setitem__(self, index: int, value: Tensor) -> None: + ... + + @typing.overload + def __setitem__(self, index: slice, value: Tensors) -> None: + ... + + def __setitem__(self, index: Union[int, slice], value) -> None: + if isinstance(index, int): + self._setitem_by_index(index, value) + else: + self._setitem_by_slice(index, value) + + def _setitem_by_index(self, index: int, value) -> None: + if not self.atomic: + i = index + self._values = self._values[:i] + (value,) + self._values[i + 1 :] # type: ignore[operator] + return + + if index != 0: + raise IndexError("atomic batch allows index 0 only") + + self._values = value + + def _setitem_by_slice(self, index: slice, value) -> None: + if not (index.start is index.stop is index.step is None): # noqa: E714 + raise NotImplementedError("only slice [:] supported") + + if not self.atomic: + self._values = value + return + + if len(value) != 1: + raise IndexError("atomic batch cannot be replaced with multiple tensors") + + self._values = value[0] + + +def check(first_device, *inputs) -> None: + """ + Checks whether the input contains at least one tensor and each tensor is + on the same device as the first partition. + + Raises: + ValueError: input does not contain at least one tensor + + """ + + if not any(torch.is_tensor(input) for input in inputs): + raise TypeError(f'inputs do not have any tensors: {inputs}') + if any(torch.is_tensor(input) and input.device != first_device for input in inputs): + raise ValueError('All inputs should be on the same device as the first partition') + + +def scatter(*inputs, chunks: int) -> List[Batch]: + """Splits an input mini-batch into multiple micro-batches.""" + if len(inputs) == 1 and isinstance(inputs[0], Tensor): + return [Batch(x) for x in inputs[0].chunk(chunks)] + + batches: List[Any] = [[] for _ in range(chunks)] + # Actual number of chunks produced + num_chunks = -1 + for input in inputs: + if torch.is_tensor(input): + # Chunk only tensors. + tensors = input.chunk(chunks) + + # Validate number of chunks equal across all inputs. + if num_chunks != -1 and num_chunks != len(tensors): + raise RuntimeError(f'Found different number of chunks produced for inputs: {num_chunks} and {len(tensors)}') + num_chunks = len(tensors) + + for i, tensor in enumerate(tensors): + batches[i].append(tensor) + else: + # Replicate non-tensors or tensors wrapped with 'NoChunk'. + for i in range(chunks): + if isinstance(input, NoChunk): + # Extract the tensor out. + batches[i].append(input.tensor) + else: + batches[i].append(input) + + # Truncate to actual number of chunks + batches = batches[:num_chunks] + + return [Batch(x) for x in batches] + + +def gather(outputs: List[Batch]): + """Concatenates output micro-batches into a mini-batch.""" + output: Any + + if outputs[0].atomic: + tensors = tuple(b.tensor for b in outputs) + output = torch.cat(tensors) + else: + output_buf: List[Any] = [] + for i in range(len(outputs[0])): + output_type = type(outputs[0][i]) + current_outputs = [] + for batch in outputs: + if output_type != type(batch[i]): + raise TypeError(f'Types for microbatch outputs do not match, found: {output_type} and {type(batch[i])}') + current_outputs.append(batch[i]) + + if torch.is_tensor(outputs[0][i]): + output_buf.append(torch.cat(current_outputs)) + else: + output_buf.append(current_outputs) + + output = tuple(output_buf) + + return output diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py new file mode 100644 index 0000000000000000000000000000000000000000..012926699cfbc53d85b4dd8e2bdeb14658506cb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py @@ -0,0 +1,50 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Provides phony for arbitrary dependency in a autograd graph.""" +from typing import Dict, List, Tuple + +import torch +from torch import Tensor + +from .stream import default_stream, use_stream + +__all__: List[str] = ["get_phony"] + + +_phonies: Dict[Tuple[torch.device, bool], Tensor] = {} + + +def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor: + """Get a phony. Phony is tensor without space. + + It is useful to make arbitrary dependency in a autograd graph because it doesn't require any + gradient accumulation. + + .. note:: + + Phonies for each device are cached. If an autograd function gets a phony + internally, the phony must be detached to be returned. Otherwise, the + autograd engine will mutate the cached phony in-place:: + + class Phonify(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + phony = get_phony(input.device, requires_grad=False) + return phony.detach() # detach() is necessary. + + """ + key = (device, requires_grad) + + try: + phony = _phonies[key] + except KeyError: + with use_stream(default_stream(device)): + phony = torch.empty(0, device=device, requires_grad=requires_grad) + + _phonies[key] = phony + + return phony diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..5e61341d9ad9f36199ead474245f81eaaa95ef6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py @@ -0,0 +1,490 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The Pipe interface.""" +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Union, Sequence, Tuple, cast + +import torch +from torch import Tensor, nn +from torch.distributed.rpc import RRef +import torch.autograd +import torch.cuda + +from . import microbatch +from .batchnorm import DeferredBatchNorm +from .pipeline import Pipeline +from .skip.layout import inspect_skip_layout +from .skip.skippable import verify_skippables +from .stream import AbstractStream, new_stream + +__all__ = ["Pipe", "BalanceError", "PipeSequential", "WithDevice"] + + +Device = Union[torch.device, int, str] +Devices = Union[Iterable[Device], List[Device]] + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +if TYPE_CHECKING: + # Typechecking: nn.Module is not a Generic + Module = nn.Module[TensorOrTensors] # type: ignore[type-arg] + NamedModules = OrderedDict[str, Module] +else: + Module = nn.Module + NamedModules = OrderedDict + + +def _recommend_auto_balance(message: str) -> str: + """Expands a message with recommendation to :mod:`torchpipe.balance`.""" + return f"""{message} + +If your model is still under development, its optimal balance would change +frequently. In this case, we highly recommend 'torch.distributed.pipeline.sync.balance' for +naive automatic balancing: + + from torch.distributed.pipeline.sync import Pipe + from torch.distributed.pipeline.sync.balance import balance_by_time + + partitions = torch.cuda.device_count() + sample = torch.empty(...) + balance = balance_by_time(partitions, model, sample) + + model = Pipe(model, balance, ...) +""" + + +def _verify_module(module: nn.Sequential) -> None: + if not isinstance(module, nn.Sequential): + raise TypeError("module must be nn.Sequential to be partitioned") + + named_children = list(module.named_children()) + if len(named_children) != len(module): + raise ValueError("module with duplicate children is not supported") + + +def _verify_splitting( + module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device] +) -> None: + num_parameters = len(list(module.parameters())) + num_child_parameters = sum(len(list(child.parameters())) for child in module.children()) + if num_parameters == num_child_parameters: + return + + for i in range(len(partitions)): + for j in range(i + 1, len(partitions)): + parti = partitions[i] + partj = partitions[j] + if devices[i] == devices[j]: + continue + for p in parti.parameters(): + for q in partj.parameters(): + if p is q: + raise ValueError("module with duplicate parameters on distinct devices is not supported") + + +class BalanceError(ValueError): + pass + + +def _retrieve_device(module: nn.Module) -> torch.device: + """Validates all parameters in the Module have the same device and returns + the appropriate device. + + Args: + An ``nn.Module`` to process. + + Returns: + ``torch.Device`` for the entire module. + + Raises: + ValueError: + If devices for ``nn.Module`` parameters are not all same. + """ + + device = None + for parameter in module.parameters(): + if device is None: + device = parameter.device + elif device != parameter.device: + raise ValueError( + f'nn.Module: {module}, should have all parameters on a single device,' + ' please use .to() to place the module on a single device') + + return device if device is not None else torch.device("cpu") + + +class PipeSequential(nn.Sequential): + """ + Pipe variant of ``nn.Sequential`` which supports multiple inputs. + """ + + def forward(self, *inputs): + for module in self: + if isinstance(inputs, Tuple): # type: ignore[arg-type] + inputs = module(*inputs) + else: + # Don't expand single variables (ex: lists/Tensor) + inputs = module(inputs) + return inputs + + +class WithDevice(nn.Module): + """ + Wraps an ``nn.Module`` which is part of ``nn.Sequential`` passed into :class:`Pipe` + that overrides the device for that module. In cases where :class:`Pipe` + can't implicitly determine the device for the module and places it on CPU, + this wrapper can be used to override the implicit behavior and explicitly + specify which device a module should run on. + + The provided module is also moved to the given device via ``.to(device)`` + by :class:`Pipe` + + Args: + module(:class:`torch.nn.Module`): The module to be wrapped. + device(:class:`torch.device`): The device to run the module on. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> fc1 = nn.Linear(16, 8).cuda(0) + >>> fc2 = nn.Linear(8, 4).cuda(1) + >>> dropout = nn.Dropout() + >>> + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1) + >>> # Dropout does not have any parameters/buffers, but we want to + >>> # run it on cuda:1 to avoid any GPU to CPU transfers. + >>> model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1')) + >>> # xdoctest: +SKIP("Needs RPC framework init") + >>> model = Pipe(model, chunks=8) + """ + def __init__(self, module: nn.Module, device: torch.device): + super().__init__() + self._module = module + self._device = torch.device(device) + + def forward(self, *args, **kwargs): + return self._module(*args, **kwargs) + + @property + def module(self): + return self._module + + @property + def device(self): + return self._device + + +def _assemble_partition(modules: List[nn.Module]): + modules_list: List[nn.Module] = [] + for module in modules: + if isinstance(module, nn.Sequential): + modules_list.extend(module.children()) + else: + modules_list.append(module) + return PipeSequential(*modules_list) + + +def _split_module(modules: nn.Sequential) -> Tuple[List[nn.Sequential], List[torch.device]]: + partitions = [] + devices = [] + + current_partition = [] + current_device = None + for name, module in modules.named_children(): + if isinstance(module, WithDevice): + # Process device override and move module to appropriate device. + device = module.device + module = module.module + module.to(device) + else: + device = _retrieve_device(module) + if current_device is not None and (current_device != device or device.type == 'cpu'): + partitions.append(_assemble_partition(current_partition)) + devices.append(current_device) + current_partition = [] + current_device = device + current_partition.append(module) + + if current_device is not None: + partitions.append(_assemble_partition(current_partition)) + devices.append(current_device) + + partitions = cast(List[nn.Sequential], nn.ModuleList(partitions)) + + return partitions, devices + + +MOVING_DENIED = TypeError("denied to move parameters and buffers, because Pipe should manage device placement") + + +class Pipe(Module): + """Wraps an arbitrary :class:`nn.Sequential ` module + to train on using synchronous pipeline parallelism. If the module requires + lots of memory and doesn't fit on a single GPU, pipeline parallelism is a + useful technique to employ for training. + + The implementation is based on the torchgpipe_ paper. + + .. _torchgpipe: https://arxiv.org/abs/2004.09910 + + Pipe combines pipeline parallelism with checkpointing to reduce peak + memory required to train while minimizing device under-utilization. + + You should place all the modules on the appropriate devices and wrap them + into an :class:`nn.Sequential ` module defining the + desired order of execution. If a module does not contain any + parameters/buffers, it is assumed this module should be executed on CPU + and appropriate input tensors to the module are moved to CPU before + execution. This behavior can be overridden by the :class:`WithDevice` + wrapper which can be used to explicitly specify which device a module + should run on. + + Args: + module (:class:`nn.Sequential `): + sequential module to be parallelized using pipelining. Each module + in the sequence has to have all of its parameters on a single + device. Each module in the sequence has to either be an nn.Module + or :class:`nn.Sequential ` (to combine multiple + sequential modules on a single device) + chunks (int): + number of micro-batches (default: ``1``) + checkpoint (str): + when to enable checkpointing, one of ``'always'``, + ``'except_last'``, or ``'never'`` (default: ``'except_last'``). + ``'never'`` disables checkpointing completely, ``'except_last'`` + enables checkpointing for all micro-batches except the last one + and ``'always'`` enables checkpointing for all micro-batches. + deferred_batch_norm (bool): + whether to use deferred ``BatchNorm`` moving statistics (default: + :data:`False`). If set to :data:`True`, we track statistics across + multiple micro-batches to update the running statistics per + mini-batch. + + Raises: + TypeError: + the module is not a :class:`nn.Sequential `. + ValueError: + invalid arguments + + Example:: + Pipeline of two FC layers across GPUs 0 and 1. + + >>> # Need to initialize RPC framework first. + >>> # xdoctest: +SKIP + >>> os.environ['MASTER_ADDR'] = 'localhost' + >>> os.environ['MASTER_PORT'] = '29500' + >>> torch.distributed.rpc.init_rpc('worker', rank=0, world_size=1) + >>> + >>> # Build pipe. + >>> fc1 = nn.Linear(16, 8).cuda(0) + >>> fc2 = nn.Linear(8, 4).cuda(1) + >>> model = nn.Sequential(fc1, fc2) + >>> model = Pipe(model, chunks=8) + >>> input = torch.rand(16, 16).cuda(0) + >>> output_rref = model(input) + + .. note:: + You can wrap a :class:`Pipe` model with + :class:`torch.nn.parallel.DistributedDataParallel` only when the + checkpoint parameter of :class:`Pipe` is ``'never'``. + + .. note:: + :class:`Pipe` only supports intra-node pipelining currently, but + will be expanded to support inter-node pipelining in the future. + The forward function returns an :class:`~torch.distributed.rpc.RRef` + to allow for inter-node pipelining in the future, where the output + might be on a remote host. For intra-node pipelining you can use + :meth:`~torch.distributed.rpc.RRef.local_value` to retrieve the + output locally. + + .. warning:: + :class:`Pipe` is experimental and subject to change. + """ + + def __init__( + self, + module: nn.Sequential, + chunks: int = 1, + checkpoint: str = "except_last", + deferred_batch_norm: bool = False, + ) -> None: + super().__init__() + + # Check if RPC framework is initialized. + if not torch.distributed.rpc._is_current_rpc_agent_set(): + raise RuntimeError( + 'Please initialize RPC framework for Pipe using ' + 'torch.distributed.rpc.init_rpc') + + chunks = int(chunks) + checkpoint = str(checkpoint) + + if chunks <= 0: + raise ValueError("number of chunks must be positive integer") + if checkpoint not in ["always", "except_last", "never"]: + raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'") + + _verify_module(module) + + # Verify if the underlying skippable modules satisfy integrity. The + # integrity can be verified before forward() because it is static. + verify_skippables(module) + + self.chunks = chunks + self.checkpoint = checkpoint + + if deferred_batch_norm: + module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks) + + self.partitions, self.devices = _split_module(module) + _verify_splitting(module, self.partitions, self.devices) + + self._copy_streams: List[List[AbstractStream]] = [] + self._skip_layout = inspect_skip_layout(self.partitions) + + # Separate CUDA streams for copy. + copy_streams = self._ensure_copy_streams() + + # The micro-batch index where the checkpointing stops. + checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint] + + self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop) + + def __len__(self) -> int: + """Counts the length of the underlying sequential module.""" + return sum(len(p) for p in self.partitions) + + def __getitem__(self, index: int) -> nn.Module: + """Gets a layer in the underlying sequential module.""" + partitions = self.partitions + if index < 0: + partitions = partitions[::-1] + + for partition in partitions: + try: + return partition[index] + except IndexError: + pass + + shift = len(partition) + + if index < 0: + index += shift + else: + index -= shift + + raise IndexError + + def __iter__(self) -> Iterator[nn.Module]: + """Iterates over children of the underlying sequential module.""" + for partition in self.partitions: + yield from partition + + # Pipe should manage the device of each partition. + # Deny cuda(), cpu(), and to() with device, by TypeError. + def cuda(self, device: Optional[Device] = None) -> "Pipe": + raise MOVING_DENIED + + def cpu(self) -> "Pipe": + raise MOVING_DENIED + + def to(self, *args: Any, **kwargs: Any) -> "Pipe": + # Deny these usages: + # + # - to(device[, dtype, non_blocking]) + # - to(tensor[, non_blocking]) + # + # But allow this: + # + # - to(dtype[, non_blocking]) + # + if "device" in kwargs or "tensor" in kwargs: + raise MOVING_DENIED + + if args: + if isinstance(args[0], (torch.device, int, str)): + raise MOVING_DENIED + if torch.is_tensor(args[0]): + raise MOVING_DENIED + + return super().to(*args, **kwargs) + + def _ensure_copy_streams(self) -> List[List[AbstractStream]]: + """Ensures that :class:`Pipe` caches CUDA streams for copy. + + It's worth to cache CUDA streams although PyTorch already manages a + pool of pre-allocated CUDA streams, because it may reduce GPU memory + fragmentation when the number of micro-batches is small. + + """ + if not self._copy_streams: + for device in self.devices: + self._copy_streams.append([new_stream(device) for _ in range(self.chunks)]) + + return self._copy_streams + + def forward(self, *inputs) -> RRef: + """ + Processes a single input mini-batch through the pipe and returns an + :class:`~torch.distributed.rpc.RRef` pointing to the output. + :class:`Pipe` is a fairly transparent module wrapper. It doesn't + modify the input and output signature of the underlying module. But + there's type restriction. Input and output have to contain at least one + tensor. This restriction is applied at partition boundaries too. + + The sequence of inputs are fed into the first stage of the pipeline as + ``*inputs``. As a result the positional args for this function should + match the positional args for the first stage of the pipeline. The same + condition applies for output of one stage of the pipeline which is the + input for the next stage. + + The input tensor is split into multiple micro-batches based on the + ``chunks`` parameter used to initialize :class:`Pipe`. The batch size + is assumed to be the first dimension of the tensor and if the batch + size is less than ``chunks``, the number of micro-batches is equal to + the batch size. + + Only tensors are split into multiple micro-batches, non-Tensor inputs + are just replicated as-is in each micro-batch. For non-Tensor outputs + in the last stage of the pipeline, they are aggregated as a ``List`` + and returned the user. For example, if you have 2 micro-batches + returning the integer 5, the user would receive the consolidated + output of `[5, 5]` + + All the input tensors need to be on the same device as the first + partition of the pipeline. + + If a tensor is wrapped with the :class:`NoChunk` wrapper, the tensor + is not split across micro-batches and is replicated as-is similar to + non-tensors. + + Args: + inputs: input mini-batch + + Returns: + :class:`~torch.distributed.rpc.RRef` to the output of the mini-batch + + Raises: + TypeError: input doesn't contain at least one tensor + + """ + first_partition_device = self.devices[0] if len(self.devices) != 0 else torch.device("cpu") + microbatch.check(first_partition_device, *inputs) + + if not self.devices: + # Empty sequential module is not illegal. + return RRef(*inputs) + + # Divide a mini-batch into micro-batches. + batches = microbatch.scatter(*inputs, chunks=self.chunks) + + # Run pipeline parallelism. + self.pipeline.run(batches) + + # Merge the micro-batches into one mini-batch. + output = microbatch.gather(batches) + return RRef(output) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..8eccc68183fa947248baaa30dc7d2fa722789157 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py @@ -0,0 +1,255 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The pipeline parallelism of Pipe.""" +from queue import Queue +from types import TracebackType +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Type, Union, cast, Sequence + +import torch +from torch import Tensor, nn +from torch.autograd.profiler import record_function + +from .checkpoint import Checkpointing +from .copy import Copy, Wait +from .dependency import fork, join +from .microbatch import Batch +from .skip.layout import SkipLayout +from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker +from .stream import AbstractStream, current_stream, use_device +from .worker import Task, create_workers + +__all__: List[str] = ["Pipeline"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] + +# Queue is generic only in stubs. +# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime +if TYPE_CHECKING: + InQueue = Queue[Optional["Task"]] + OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]] +else: + InQueue = Queue + OutQueue = Queue + + +def _depend(fork_from: Batch, join_to: Batch) -> None: + fork_from_idx = fork_from.find_tensor_idx() + join_to_idx = join_to.find_tensor_idx() + + fork_from[fork_from_idx], phony = fork(fork_from[fork_from_idx]) + join_to[join_to_idx] = join(join_to[join_to_idx], phony) + + +def _copy(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None: + batch[:] = Copy.apply(prev_stream, next_stream, *batch) + # Gradients are only supported for float Tensors. + batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch]) + + +def _wait(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None: + batch[:] = Wait.apply(prev_stream, next_stream, *batch) + # Gradients are only supported for float Tensors. + batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch]) + + +def _clock_cycles(m: int, n: int) -> Iterable[List[Tuple[int, int]]]: + """Generate schedules for each clock cycle.""" + # m: number of micro-batches + # n: number of partitions + # i: index of micro-batch + # j: index of partition + # k: clock number + # + # k (i,j) (i,j) (i,j) + # - ----- ----- ----- + # 0 (0,0) + # 1 (1,0) (0,1) + # 2 (2,0) (1,1) (0,2) + # 3 (2,1) (1,2) + # 4 (2,2) + for k in range(m + n - 1): + yield [(k - j, j) for j in range(max(1 + k - m, 0), min(1 + k, n))] + + +class Pipeline: + """The pipeline parallelism for Pipe.""" + + def __init__( + self, + partitions: List[nn.Sequential], + devices: List[torch.device], + copy_streams: List[List[AbstractStream]], + skip_layout: SkipLayout, + checkpoint_stop: int, + ) -> None: + self.partitions = partitions + self.devices = devices + self.copy_streams = copy_streams + self.skip_layout = skip_layout + self.checkpoint_stop = checkpoint_stop + (self.in_queues, self.out_queues) = create_workers(devices) + + def run(self, batches: List[Batch]) -> None: + """Runs pipeline parallelism. + + It modifies the given batches in place. + + """ + partitions = self.partitions + devices = self.devices + skip_layout = self.skip_layout + + m = len(batches) + n = len(partitions) + + skip_trackers = [SkipTrackerThroughPotals(skip_layout) for _ in batches] + + for schedule in _clock_cycles(m, n): + self.fence(batches, schedule, skip_trackers) + self.compute(batches, schedule, skip_trackers) + + def fence( + self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals], + ) -> None: + """Copy micro-batches after computation for the previous micro-batches.""" + copy_streams = self.copy_streams + skip_layout = self.skip_layout + + for i, j in schedule: + # Ensure that batches[i-1] is executed after batches[i] in + # backpropagation by an explicit dependency. + if i != 0 and j != 0: + _depend(batches[i - 1], batches[i]) + + next_stream = copy_streams[j][i] + + for prev_j, ns, name in skip_layout.copy_policy(j): + prev_stream = copy_streams[prev_j][i] + skip_trackers[i].copy(batches[i], prev_stream, next_stream, ns, name) + + if j != 0: + prev_stream = copy_streams[j - 1][i] + _copy(batches[i], prev_stream, next_stream) + + def compute( + self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals], + ) -> None: + """Run tasks with synchronization to copy streams.""" + partitions = self.partitions + devices = self.devices + copy_streams = self.copy_streams + checkpoint_stop = self.checkpoint_stop + + # Disable checkpointing if in eval mode. + if not self.partitions[0].training: + checkpoint_stop = 0 + + n = len(partitions) + streams = [current_stream(d) for d in devices] + exc_info: Optional[ExcInfo] = None + + # With checkpointing, the autograd graph looks like this diagram: + # ┌─────┸──────┐ + # │ Copy │ + # └─────┰──────┘ (fence) + # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─ + # ┃ (compute) + # ┌─────┸──────┐ + # │ Wait │ [1] Synchronize the current stream with the copy stream. + # └─────┰──────┘ + # ┌─────┸──────┐ + # │ Checkpoint │ [2] Compute a partition within checkpointing. + # └─────┰──────┘ + # ┌─────┸──────┐ + # │ Wait │ [3] Synchronize the copy stream with the current stream. + # └─────┰──────┘ + # ┠ ─ ─ ─ ┐ + # ┃ ┌─────┴─────┐ + # ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation. + # ┃ └─────┬─────┘ + # ┠ ─ ─ ─ ┘ + # ┃ + # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─ + # ┌─────┸──────┐ (fence) + # │ Copy │ + # └─────┰──────┘ + for i, j in schedule: + batch = batches[i] + partition = partitions[j] + + # Synchronize with the copied input. ([1] in the diagram) + if j != 0: + _wait(batch, copy_streams[j][i], streams[j]) + + # Determine whether checkpointing or not. + checkpoint = i < checkpoint_stop + if checkpoint: + + def function( + *inputs, + partition: nn.Module = partition, + skip_tracker: SkipTrackerThroughPotals = skip_trackers[i], + chunk_id: int = i, + part_id: int = j, + ) -> TensorOrTensors: + with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)): + return partition(*inputs) + + chk = Checkpointing(function, batch) # type: ignore[arg-type] + task = Task(streams[j], compute=chk.checkpoint, finalize=chk.recompute) + del function, chk + + else: + + def compute( + batch: Batch = batch, + partition: nn.Module = partition, + skip_tracker: SkipTrackerThroughPotals = skip_trackers[i], + chunk_id: int = i, + part_id: int = j, + ) -> Batch: + with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)): + return batch.call(partition) + + task = Task(streams[j], compute=compute, finalize=None) + del compute + + # Compute tasks in parallel. ([2] in the diagram) + self.in_queues[j].put(task) + + for i, j in schedule: + ok, payload = self.out_queues[j].get() + + # Hold the first exception. + if exc_info is not None: + continue + elif not ok: + exc_info = cast(ExcInfo, payload) + continue + + task, batch = cast(Tuple[Task, Batch], payload) + + # The copy stream synchronizes to copy the output. ([3] in the + # diagram) + if j != n - 1: + _wait(batch, streams[j], copy_streams[j][i]) + + # Finalize tasks. If checkpointing is enabled, here the + # recomputation is scheduled at backpropagation. ([4] in the + # diagram) + with use_device(devices[j]): + task.finalize(batch) + + batches[i] = batch + + # Fail at the first exception. + if exc_info is not None: + raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bdcb913867a735374cb1df625bbacfc2802b5c1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py @@ -0,0 +1,11 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Supports efficiency with skip connections.""" +from .namespace import Namespace +from .skippable import pop, skippable, stash, verify_skippables + +__all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f19fcfa269c77ba9a603ebcba86fc1240bea186 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d81b9ce13ed8aa263565713d57a9cb159888048a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4f69be61743cea5d396d0a533602fafa2da559a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f52b75d9ce1ae52d35a690078a3828aaf4cb440e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c3d78c63f122b1b09cc79337bef3b2fed50e0e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558e6de2af3a1084afeb3b6f451b8d37c242e140 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py new file mode 100644 index 0000000000000000000000000000000000000000..04d76d34ea16640c94e3d377e3f7ba70ab1689bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py @@ -0,0 +1,92 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Static skip connection layout of ``@skippable`` modules.""" +from typing import Dict, Iterable, List, Tuple + +from torch import nn + +from .namespace import Namespace + +__all__: List[str] = [] + + +class SkipLayout: + """Represents a skip connection layout across partitions.""" + + # Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...} + by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]] + + # Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...] + by_partition: List[List[Tuple[int, Namespace, str]]] + + def __init__(self, num_partitions: int, skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],) -> None: + # The skip routes are already indexed by 'ns, name'. + self.by_ns_name = skip_routes + + # Index skip routes by partition number 'j'. + self.by_partition = [[] for _ in range(num_partitions)] + + for (ns, name), (prev_j, next_j) in skip_routes.items(): + self.by_partition[next_j].append((prev_j, ns, name)) + + for p in self.by_partition: + p.sort() + + def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]: + """Generates skip routes for the given destination partition number. + The skip routes are sorted by source partition number in ascending + order. + + Yields: + Each tuple of (source partition number, namespace, name). + + """ + for prev_j, ns, name in self.by_partition[next_j]: + if prev_j == next_j: + # This skip tensor will be popped at the same partition where + # it is stashed. In this case, copy is not required. + continue + + yield (prev_j, ns, name) + + def requires_copy(self, ns: Namespace, name: str) -> bool: + """Whether the given namespace and name requires partition-to-partition + copy or not. + """ + prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1)) + return prev_j != next_j + + +def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout: + """Inspects the skip connection layout in the given partitions.""" + # NOTE(sublee): Hide circular import inside this subroutine. Circular + # import is not ideal but placing this logic near to SkipLayout may + # increase cohesion of code. + from .skippable import Skippable + + skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {} + stashed_at: Dict[Tuple[Namespace, str], int] = {} + + for j, partition in enumerate(partitions): + def inspect_layer(layer): + if not isinstance(layer, Skippable): + return + + for ns, name in layer.stashable(): + stashed_at[(ns, name)] = j + + for ns, name in layer.poppable(): + prev_j = stashed_at.pop((ns, name)) + skip_routes[(ns, name)] = (prev_j, j) + + if isinstance(partition, nn.Sequential): + for layer in partition: + inspect_layer(layer) + else: + inspect_layer(partition) + + return SkipLayout(len(partitions), skip_routes) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py new file mode 100644 index 0000000000000000000000000000000000000000..67218c3678e418df5f1ab9851d9e4e918ec308b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py @@ -0,0 +1,50 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Provides isolated namespace of skip tensors.""" +import abc +from functools import total_ordering +from typing import Any +import uuid + +__all__ = ["Namespace"] + + +@total_ordering +class Namespace(metaclass=abc.ABCMeta): + """Namespace for isolating skip tensors used by :meth:`isolate() + `. + """ + + __slots__ = ("id",) + + def __init__(self) -> None: + self.id = uuid.uuid4() + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash(self.id) + + # Namespaces should support ordering, since SkipLayout will sort tuples + # including a namespace. But actual order between namespaces is not + # important. That's why they are ordered by version 4 UUID which generates + # random numbers. + def __lt__(self, other: Any) -> bool: + if isinstance(other, Namespace): + return self.id < other.id + return False + + def __eq__(self, other: object) -> bool: + if isinstance(other, Namespace): + return self.id == other.id + return False + + +# 'None' is the default namespace, +# which means that 'isinstance(None, Namespace)' is 'True'. +Namespace.register(type(None)) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py new file mode 100644 index 0000000000000000000000000000000000000000..f3484a1b69d57b087787badb2915b5efc94adeb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py @@ -0,0 +1,231 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Portal keeps a tensor in the pocket plane. The tensor becomes hidden to the +autograd engine. The shared context of three functions (:class:`PortalBlue`, +:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is +one of the most important feature of :mod:`torchpipe.skip`. + +The metaphor is inspired by Portal™ from Valve. + +""" +from typing import List, Optional, Tuple + +import torch +from torch import Tensor + +from ..copy import Context as CopyContext +from ..copy import Copy +from ..phony import get_phony +from ..stream import AbstractStream, get_device + +__all__: List[str] = [] + + +class Portal: + """A portal for a tensor.""" + + def __init__(self, tensor: Optional[Tensor], tensor_life: int) -> None: + self.put_tensor(tensor, tensor_life) + self.grad: Optional[Tensor] = None + + def blue(self) -> Tensor: + """Creates a :class:`PortalBlue` which hides the underlying tensor from + the autograd engine. + + Join the returning phony to the main lane of the autograd graph to + assure the correct backpropagation:: + + PortalBlue --+ + | + ---------- Join -- + + """ + tensor = self.use_tensor() + + if tensor is None: + return get_phony(torch.device("cpu"), requires_grad=False) + + return PortalBlue.apply(self, tensor) + + def orange(self, phony: Tensor) -> Optional[Tensor]: + """Creates a :class:`PortalOrange` which retrieves the hidden tensor + without losing ability of backpropagation. + + Give a phony forked from the main lane of an autograd graph:: + + +-- PortalOrange --+ + | | + -- Fork --------- f(a, b) -- + + """ + self.check_tensor_life() + + if self.tensor is None: + return self.use_tensor() + + return PortalOrange.apply(self, phony) + + def copy(self, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,) -> Tensor: + """Copies the hidden tensor by a :class:`PortalCopy`. + + Give a phony and use the returning phony to keep backpropagation:: + + +-- PortalCopy --+ + | | + -- Fork ---------- Join -- + + """ + if self.tensor is None: + return get_phony(torch.device("cpu"), requires_grad=False) + + return PortalCopy.apply(self, prev_stream, next_stream, phony) + + def check_tensor_life(self) -> None: + if self.tensor_life <= 0: + raise RuntimeError("tensor in portal has been removed") + + def put_tensor(self, tensor: Optional[Tensor], tensor_life: int) -> None: + """Stores a tensor into this portal.""" + # [Life of Tensor through Portal] + # + # The tensor can be retrieved by use_tensor() up to 'tensor_life' + # times. When the life becomes 0, the tensor will be deleted for + # deallocation in CUDA memory. + # + # The below events participate in a tensor through a portal. + # Note that [x] denotes the events which call use_tensor(): + # + # 1. [x] blue() + # 2. [ ] PortalBlue.forward + # 3. [ ] copy() + # 4. [ ] PortalCopy.forward + # 5. [ ] orange() + # 6. [x] PortalOrange.forward + # - - - - - - - - - - - - - - - - - - - - - - - - - - - + # 7. [ ] orange() (recomputed) + # 8. [x] PortalOrange.forward (recomputed) + # 9. [ ] PortalOrange.backward + # 10. [ ] PortalCopy.backward + # 11. [x] blue() (recomputed) + # 12. [ ] PortalBlue.forward (recomputed) + # 13. [ ] PortalBlue.backward + # + self.tensor_life = tensor_life + + if tensor_life > 0: + self.tensor = tensor + else: + self.tensor = None + + def use_tensor(self) -> Optional[Tensor]: + """Retrieves the underlying tensor and decreases the tensor life. When + the life becomes 0, it the tensor will be removed. + """ + self.check_tensor_life() + + tensor = self.tensor + + self.tensor_life -= 1 + + if self.tensor_life <= 0: + self.tensor = None + + return tensor + + def put_grad(self, grad: Tensor) -> None: + """Stores a gradient into this portal.""" + self.grad = grad + + def use_grad(self) -> Tensor: + """Retrieves and removes the underlying gradient. The gradient is + always ephemeral. + """ + if self.grad is None: + raise RuntimeError("grad in portal has been removed or never set") + + grad = self.grad + self.grad = None + return grad + + +# Common interface between :class:`PortalBlue`, :class:`PortalOrange`, and +# :class:`PortalCopy`. +class Context(CopyContext): + portal: Portal + + +class PortalBlue(torch.autograd.Function): + """Hides a tensor from the autograd engine by a :class:`Portal`.""" + + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + portal: Portal, + # This tensor must be retrieved by portal.use_tensor(). + tensor: Tensor, + ) -> Tensor: + ctx.portal = portal + + phony = get_phony(tensor.device, requires_grad=False) + return phony.detach() + + @staticmethod + # type: ignore[override] + def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]: + # The paired PortalOrange should keep the gradient. + grad = ctx.portal.use_grad() + return None, grad + + +class PortalOrange(torch.autograd.Function): + """Retrieves the hidden tensor from a :class:`Portal`.""" + + @staticmethod + # type: ignore[override] + def forward(ctx: Context, portal: Portal, phony: Tensor) -> Tensor: + ctx.portal = portal + + tensor = portal.use_tensor() + assert tensor is not None + + return tensor.detach() + + @staticmethod + def backward(ctx: Context, grad: Tensor) -> Tuple[None, None]: # type: ignore[override] + # The paired PortalBlue will use the gradient. + ctx.portal.put_grad(grad) + return None, None + + +class PortalCopy(torch.autograd.Function): + """Copies the hidden tensor in a :class:`Portal`. It replaces the hidden + tensor with copied one. + """ + + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, portal: Portal, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor, + ) -> Tensor: + ctx.portal = portal + + assert portal.tensor is not None + (portal.tensor,) = Copy.forward(ctx, prev_stream, next_stream, portal.tensor) + + phony = get_phony(get_device(next_stream), requires_grad=False) + return phony.detach() + + @staticmethod + # type: ignore[override] + def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, None, None, None]: + portal = ctx.portal + + assert portal.grad is not None + _, _, portal.grad = Copy.backward(ctx, portal.grad) + + return None, None, None, None diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py new file mode 100644 index 0000000000000000000000000000000000000000..0c01a198f804361185c527ac086694a67a0f673e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py @@ -0,0 +1,431 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The user interface to define skip connections.""" +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + FrozenSet, + Generator, + Iterable, + List, + Optional, + Set, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from torch import Tensor, nn + +from ..microbatch import Batch +from .namespace import Namespace +from .tracker import current_skip_tracker + +__all__ = ["skippable", "stash", "pop", "verify_skippables"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +StashPop = Union["stash", "pop"] +StashPopGenerator = Generator[StashPop, Optional[Tensor], TensorOrTensors] +if TYPE_CHECKING: + # Typechecking: nn.Module is not a Generic + SkippableModule = nn.Module[Union[StashPopGenerator, TensorOrTensors]] # type: ignore[type-arg] +else: + SkippableModule = nn.Module + +T = TypeVar("T", bound="Skippable") + + +class Skippable(nn.Module): + """The base class for skippable modules. + + Do not use this class directly. Define a subclass by :func:`skippable` + instead. + + """ + + module_cls: ClassVar[Type[SkippableModule]] + stashable_names: ClassVar[FrozenSet[str]] + poppable_names: ClassVar[FrozenSet[str]] + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__() + self.module = self.module_cls(*args, **kwargs) # type: ignore[call-arg] + self.namespaces: Dict[str, Namespace] = {} + + def __repr__(self) -> str: + return f"@skippable({self.module})" + + def namespaced(self, name: str) -> Tuple[Namespace, str]: + """Prepend namespace for the given skip name.""" + ns = self.namespaces.get(name) + ns = cast(Namespace, ns) + return (ns, name) + + def stashable(self) -> Iterable[Tuple[Namespace, str]]: + """Iterate over namespaced skip names to be stashed.""" + for name in self.stashable_names: + yield self.namespaced(name) + + def poppable(self) -> Iterable[Tuple[Namespace, str]]: + """Iterate over namespaced skip names to be popped.""" + for name in self.poppable_names: + yield self.namespaced(name) + + def isolate(self: T, ns: Namespace, *, only: Optional[Iterable[str]] = None) -> T: + r"""Isolate a specified subset or the whole set of skip tensors. + + In a single sequential module, skip tensors with the same + name are not allowed unless they are isolated by different namespaces. + + Here's an example using the same name for skip tensors twice. Each pair + of ``Layer1`` and ``Layer2`` is isolated with its own namespace ``ns1`` + and ``ns2``. There is no conflict anymore:: + + ns1 = Namespace() + ns2 = Namespace() + + model = nn.Sequential( + Layer1().isolate(ns1), + Layer1().isolate(ns2), + Layer2(), + Layer3().isolate(ns2), + Layer3().isolate(ns1), + ) + + When `only` parameter is omitted, all skip tensors are isolated. You + can isolate a subset of skip tensors by passing `only` parameter:: + + ns_alice = Namespace() + ns_bob = Namespace() + + model = nn.Sequential( + ... + StashStashPop().isolate(ns_alice, only=['alice']) \ + .isolate(ns_bob, only=['bob']), + ... + ) + + Args: + ns (Namespace): + namespace for isolation + + Keyword Args: + only (iterable of strs): + names of specific skip tensors to be isolated (omit this option + to isolate all skip tensors declared in this module) + + Returns: + this module itself + + """ + names: Iterable[str] + + if only is None: + names = self.stashable_names | self.poppable_names + else: + names = set(only) + + for name in names: + self.namespaces[name] = ns + + return self + + def dispatch( + self, + input, + handle_stash: Callable[[str, Optional[Tensor]], None], + handle_pop: Callable[[str], Optional[Tensor]], + ): + """Dispatch :class:`stash` or :class:`pop` commands. + + The commands are generated by the module's ``forward()``. + """ + generator = self.module(input) + + if not isinstance(generator, Generator): + # The underlying module returned output without any yield. + output = generator + return output + + try: + op = next(generator) + + while True: + if isinstance(op, stash): + handle_stash(op.name, op.tensor) + op = next(generator) + continue + + if isinstance(op, pop): + tensor = handle_pop(op.name) + op = generator.send(tensor) + continue + + raise TypeError(f"{op!r} is not a command from @skippable") + + except StopIteration as stop: + output = stop.args[0] + return output + + def forward(self, input: Union[List[Any], Tensor]) -> TensorOrTensors: + """Perform the forward propagation. + + :class:`stash` or :class:`pop` commands will be handled by portals + silently. The portals won't be exposed to users. + + Raises: + RuntimeError: + illegal 'stash' or 'pop' is found. + + """ + skip_tracker = current_skip_tracker() + stashed_tensors: Dict[str, Optional[Tensor]] = {} + + # Load skip tensors that might be popped. + poppable_tensors = {} + batch = Batch(input) + for ns, name in self.poppable(): + try: + poppable_tensors[name] = skip_tracker.load(batch, ns, name) + except KeyError as e: + raise RuntimeError(f"'{name}' has not been stashed") from e + input = batch.values + + # Handle skip commands. + def handle_stash(name: str, tensor: Optional[Tensor]) -> None: + if name not in self.stashable_names: + raise RuntimeError(f"'{name}' has not been declared as stashable") + stashed_tensors[name] = tensor + + def handle_pop(name: str) -> Optional[Tensor]: + if name not in self.poppable_names: + raise RuntimeError(f"'{name}' has not been declared as poppable") + return poppable_tensors.pop(name) + + output = self.dispatch(input, handle_stash, handle_pop) + + # All declared skips must be stashed or popped. + not_stashed = self.stashable_names - stashed_tensors.keys() + if not_stashed: + comma_names = ", ".join(f"'{n}'" for n in not_stashed) + raise RuntimeError(f"{comma_names} must be stashed but have not") + + not_popped = poppable_tensors.keys() + if not_popped: + comma_names = ", ".join(f"'{n}'" for n in not_popped) + raise RuntimeError(f"{comma_names} must be popped but have not") + + # Save stashed skip tensors. + batch = Batch(output) + for ns, name in self.stashable(): + tensor = stashed_tensors[name] + skip_tracker.save(batch, ns, name, tensor) + output = batch.values + + return output + + +# TODO(sublee): Move to above of Skippable class for better read flow. +def skippable( + stash: Iterable[str] = (), pop: Iterable[str] = (), +) -> Callable[[Type[SkippableModule]], Type[Skippable]]: + """Define a decorator to create :class:`nn.Module ` with skip connections. + + These decorated modules are called "skippable". This functionality works perfectly + fine even when the module is not wrapped by :class:`~torch.distributed.pipeline.sync.Pipe`. + + Each skip tensor is managed by its name. Before manipulating skip tensors, + a skippable module must statically declare the names for skip tensors by + `stash` and/or `pop` parameters. Skip tensors with pre-declared name can be + stashed by ``yield stash(name, tensor)`` or popped by ``tensor = yield + pop(name)``. + + Here is an example with three layers. A skip tensor named "1to3" is stashed + and popped at the first and last layer, respectively:: + + @skippable(stash=['1to3']) + class Layer1(nn.Module): + def forward(self, input): + yield stash('1to3', input) + return f1(input) + + class Layer2(nn.Module): + def forward(self, input): + return f2(input) + + @skippable(pop=['1to3']) + class Layer3(nn.Module): + def forward(self, input): + skip_1to3 = yield pop('1to3') + return f3(input) + skip_1to3 + + model = nn.Sequential(Layer1(), Layer2(), Layer3()) + + One skippable module can stash or pop multiple skip tensors:: + + @skippable(stash=['alice', 'bob'], pop=['carol']) + class StashStashPop(nn.Module): + def forward(self, input): + yield stash('alice', f_alice(input)) + yield stash('bob', f_bob(input)) + carol = yield pop('carol') + return input + carol + + Every skip tensor must be associated with exactly one pair of `stash` and + `pop`. :class:`~torch.distributed.pipeline.sync.Pipe` checks this + restriction automatically when wrapping a module. You can also check the + restriction by :func:`verify_skippables` + without :class:`~torch.distributed.pipeline.sync.Pipe`. + + """ + stashable_names = frozenset(stash) + poppable_names = frozenset(pop) + + def extend_skippable(module_cls: Type[SkippableModule]) -> Type[Skippable]: + name = module_cls.__name__ + bases = (Skippable,) + attrs = {"module_cls": module_cls, "stashable_names": stashable_names, "poppable_names": poppable_names} + return type(name, bases, attrs) + + return extend_skippable + + +class stash: + """The command to stash a skip tensor. + + :: + + def forward(self, input): + yield stash('name', input) + return f(input) + + Args: + name (str): name of skip tensor + input (torch.Tensor or None): tensor to pass to the skip connection + + """ + + __slots__ = ("name", "tensor") + + def __init__(self, name: str, tensor: Optional[Tensor]) -> None: + self.name = name + self.tensor = tensor + + +class pop: + """The command to pop a skip tensor. + + :: + + def forward(self, input): + skip = yield pop('name') + return f(input) + skip + + Args: + name (str): name of skip tensor + + Returns: + the skip tensor previously stashed by another layer under the same name + + """ + + __slots__ = ("name",) + + def __init__(self, name: str) -> None: + self.name = name + + +def verify_skippables(module: nn.Sequential) -> None: + """Verify if the underlying skippable modules satisfy integrity. + + Every skip tensor must have only one pair of `stash` and `pop`. If there + are one or more unmatched pairs, it will raise :exc:`TypeError` with the + detailed messages. + + Here are a few failure cases. :func:`verify_skippables` will report failure + for these cases:: + + # Layer1 stashes "1to3". + # Layer3 pops "1to3". + + nn.Sequential(Layer1(), Layer2()) + # └──── ? + + nn.Sequential(Layer2(), Layer3()) + # ? ────┘ + + nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3()) + # └───────────────────┘ ^^^^^^ + + nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3()) + # ^^^^^^ └───────────────────┘ + + To use the same name for multiple skip tensors, they must be isolated by + different namespaces. See :meth:`isolate() + `. + + Raises: + TypeError: + one or more pairs of `stash` and `pop` are not matched. + + """ + stashed: Set[Tuple[Namespace, str]] = set() + popped: Set[Tuple[Namespace, str]] = set() + msgs: List[str] = [] + + for layer_name, layer in module.named_children(): + if not isinstance(layer, Skippable): + continue + + for name in layer.stashable_names & layer.poppable_names: + msg = f"'{layer_name}' declared '{name}' both as stashable and as poppable" + msgs.append(msg) + + for ns, name in layer.stashable(): + if name in layer.poppable_names: + continue + + if (ns, name) in stashed: + msg = f"'{layer_name}' redeclared '{name}' as stashable but not isolated by namespace" + msgs.append(msg) + continue + + stashed.add((ns, name)) + + for ns, name in layer.poppable(): + if name in layer.stashable_names: + continue + + if (ns, name) in popped: + msg = f"'{layer_name}' redeclared '{name}' as poppable but not isolated by namespace" + msgs.append(msg) + continue + + if (ns, name) not in stashed: + msg = f"'{layer_name}' declared '{name}' as poppable but it was not stashed" + msgs.append(msg) + continue + + popped.add((ns, name)) + + for (_, name) in stashed - popped: + msg = f"no module declared '{name}' as poppable but stashed" + msgs.append(msg) + + if msgs: + raise TypeError( + "one or more pairs of stash and pop do not match:\n\n%s" "" % "\n".join("* %s" % x for x in msgs) + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac82bc05dc9457626aee240cd110eb936b9f176 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py @@ -0,0 +1,180 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Tracks skip tensors on a thread.""" +from contextlib import contextmanager +import threading +from typing import Dict, Generator, List, Optional, Tuple + +from torch import Tensor + +from ..checkpoint import is_checkpointing +from ..dependency import fork, join +from ..microbatch import Batch +from ..stream import AbstractStream +from .layout import SkipLayout +from .namespace import Namespace +from .portal import Portal + +__all__: List[str] = [] + + +class SkipTracker: + """Tracks saved skip tensors. + + It will update the given micro-batch in place. This is because when it + manipulates the underlying skip tensors, the current micro-batch also has + to be connected with the skip tensors. + + One thread has one skip tracker. Call :func:`current_skip_tracker` to get + the skip tracker on the current thread. + + """ + + def __init__(self) -> None: + self.tensors: Dict[Tuple[Namespace, str], Optional[Tensor]] = {} + + def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None: + self.tensors[(ns, name)] = tensor + + def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]: + return self.tensors.pop((ns, name)) + + def copy( + self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, + ) -> None: + raise TypeError("copy is not supported for non-portal skip tensors") + + +class SkipTrackerThroughPotals(SkipTracker): + """Tracks saved skip tensors through portals. The skip tensors will be + hidden in portals so that the autograd engine does not need to track them. + + This tracker is only used when the training or evaluating module is wrapped + with :class:`torchpipe.Pipe`. + + """ + + def __init__(self, skip_layout: SkipLayout) -> None: + super().__init__() + self.skip_layout = skip_layout + self.portals: Dict[Tuple[Namespace, str], Portal] = {} + + def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None: + """Saves the stashed skip tensor in a portal. The portal is then + connected to the given micro-batch with :class:`Join`. + """ + if not self.skip_layout.requires_copy(ns, name): + super().save(batch, ns, name, tensor) + return + + # See [Tensor Life of Portal] at Portal.put_tensor() to understand the + # below tensor_life values. Here are the selected events which retrieve + # the tensor in portal: + # + # 1. [x] blue() + # ... + # 6. [x] PortalOrange.forward + # ... + # 8. [x] PortalOrange.forward (recomputed) + # ... + # 11. [x] blue() (recomputed) + # + if (ns, name) not in self.portals: + if is_checkpointing(): + # Under checkpointing, the tensor used by the first + # PortalOrange should be alive in the portal. This tensor will + # be used again by the second PortalOrange during the + # recomputation. + tensor_life = 3 # Delete at [8. PortalOrange.forward (recomputed)] + else: + tensor_life = 2 # Delete at [6. PortalOrange.forward] + + portal = Portal(tensor, tensor_life) + self.portals[(ns, name)] = portal + + else: + # Under recomputation, the portal already exists. + portal = self.portals[(ns, name)] + + # The existing tensor life already became 0. It should be reset as + # 1 to delete the tensor after the second PortalBlue immediately. + tensor_life = 1 # Delete at [11. blue() (recomputed)] + + portal.put_tensor(tensor, tensor_life) + + phony = portal.blue() + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx] = join(batch[tensor_idx], phony) + + def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]: + """Loads a skip tensor from the corresponding portal to pop. The given + micro-batch is connected to the portal with :class:`Fork`. + """ + if not self.skip_layout.requires_copy(ns, name): + tensor = super().load(batch, ns, name) + return tensor + + portal = self.portals[(ns, name)] + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx], phony = fork(batch[tensor_idx]) + tensor = portal.orange(phony) + return tensor + + def copy( + self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, + ) -> None: + """Copies the skip tensor in the corresponding portal. The given + micro-batch and the portal will be tied with :class:`Fork` and + :class:`Join`. + """ + assert self.skip_layout.requires_copy(ns, name) + + tensor_idx = batch.find_tensor_idx() + batch[tensor_idx], phony = fork(batch[tensor_idx]) + + portal = self.portals[(ns, name)] + phony = portal.copy(prev_stream, next_stream, phony) + + batch[tensor_idx] = join(batch[tensor_idx], phony) + + +class ThreadLocal(threading.local): + def __init__(self) -> None: + self.skip_tracker: Optional[SkipTracker] = None + + +thread_local = ThreadLocal() + + +@contextmanager +def use_skip_tracker(skip_tracker: SkipTracker) -> Generator[None, None, None]: + """Registers the given skip tracker on the current thread within a + context:: + + with use_skip_tracker(my_skip_tracker): + ... + + """ + orig = thread_local.skip_tracker + + thread_local.skip_tracker = skip_tracker + + try: + yield + finally: + thread_local.skip_tracker = orig + + +def current_skip_tracker() -> SkipTracker: + """Gets the skip tracker on the current thread.""" + skip_tracker = thread_local.skip_tracker + + if skip_tracker is None: + skip_tracker = SkipTracker() + thread_local.skip_tracker = skip_tracker + + return skip_tracker diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..59fedf865a42bec31072d531cfb24f285499ac7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py @@ -0,0 +1,120 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Utilities for eliminating boilerplate code to handle abstract streams with +CPU device. +""" +from contextlib import contextmanager +from typing import Generator, List, Union, cast + +import torch + +__all__: List[str] = ["CPUStreamType", "new_stream", "current_stream", "default_stream", + "use_device", "use_stream", "get_device", "wait_stream", "record_stream", + "is_cuda", "as_cuda"] + + +class CPUStreamType: + pass + + +# The placeholder on place of streams for the CPU device instead of CUDA. +CPUStream = CPUStreamType() + +# It represents both CUDA streams and the CPU stream. +AbstractStream = Union[torch.cuda.Stream, CPUStreamType] + + +def new_stream(device: torch.device) -> AbstractStream: + """Creates a new stream for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.Stream(device) + + +def current_stream(device: torch.device) -> AbstractStream: + """:func:`torch.cuda.current_stream` for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.current_stream(device) + + +def default_stream(device: torch.device) -> AbstractStream: + """:func:`torch.cuda.default_stream` for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.default_stream(device) + + +@contextmanager +def use_device(device: torch.device) -> Generator[None, None, None]: + """:func:`torch.cuda.device` for either CPU or CUDA device.""" + if device.type != "cuda": + yield + return + + with torch.cuda.device(device): + yield + + +@contextmanager +def use_stream(stream: AbstractStream) -> Generator[None, None, None]: + """:func:`torch.cuda.stream` for either CPU or CUDA stream.""" + if not is_cuda(stream): + yield + return + + with torch.cuda.stream(as_cuda(stream)): + yield + + +def get_device(stream: AbstractStream) -> torch.device: + """Gets the device from CPU or CUDA stream.""" + if is_cuda(stream): + return as_cuda(stream).device + return torch.device("cpu") + + +def wait_stream(source: AbstractStream, target: AbstractStream) -> None: + """:meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It + makes the source stream wait until the target stream completes work queued. + """ + if is_cuda(target): + if is_cuda(source): + # A CUDA stream waits another CUDA stream. + as_cuda(source).wait_stream(as_cuda(target)) + else: + # CPU waits a CUDA stream. + as_cuda(target).synchronize() + + # If the target is CPU, synchronization is not required. + + +def record_stream(tensor: torch.Tensor, stream: AbstractStream) -> None: + """:meth:`torch.Tensor.record_stream` for either CPU or CUDA stream.""" + if is_cuda(stream): + # NOTE(sublee): record_stream() on a shifted view tensor throws + # RuntimeError in PyTorch 1.1.0, and does nothing in 1.2.0. To safely + # protect the tensor against unexpected reallocation, here we use a + # temporal tensor associated with the same storage without shifting as + # a workaround. + # + # Issue: https://github.com/pytorch/pytorch/issues/27366 + # + tensor = tensor.new_empty([0]).set_(tensor._typed_storage()) + + # Typechecking: torch.cuda.Stream is incompatible with torch._C.Stream + tensor.record_stream(as_cuda(stream)) # type: ignore[arg-type] + + +def is_cuda(stream: AbstractStream) -> bool: + """Returns ``True`` if the given stream is a valid CUDA stream.""" + return stream is not CPUStream + + +def as_cuda(stream: AbstractStream) -> torch.cuda.Stream: + """Casts the given stream as :class:`torch.cuda.Stream`.""" + return cast(torch.cuda.Stream, stream) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..210c475317e2cf695071d25ec14a3127acb3bb4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py @@ -0,0 +1,38 @@ +from torch import nn +from typing import List, Optional + +__all__ = ["partition_model"] + +def partition_model( + module: nn.Sequential, + balance: List[int], + devices: Optional[List[int]] = None): + """ + Partions the model accross multiple GPU devices. + + Given an :class:`nn.Sequential ` module, partitions + the model across multiple GPU devices according the provided ``balance`` + and ``devices``. + + Args: + module (:class:`nn.Sequential `): + Sequential model representing the pipe. + balance (List[int]): + List indicating the number of layers in each partition. + devices (List[int], optional): + List indicating the device to use for each partition. Defaults to + ``range(len(balance))`` + """ + device_idx = 0 + pipe_idx = 0 + balanced_pipe = [] + for num_layers in balance: + layers = [] + for i in range(num_layers): + layers.append(module[pipe_idx]) + pipe_idx += 1 + device = device_idx if devices is None else devices[device_idx] + balanced_pipe.append(nn.Sequential(*layers).to(device)) + device_idx += 1 + + return nn.Sequential(*balanced_pipe) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..87b20c4a5551917e89f8aac0559081f9db513e1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py @@ -0,0 +1,132 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Multithreading in pipeline parallelism.""" +from contextlib import contextmanager +from queue import Queue +import sys +from threading import Thread +from types import TracebackType +from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast + +import torch + +from .microbatch import Batch +from .stream import AbstractStream, use_device, use_stream + +__all__: List[str] = ["Task", "worker", "create_workers", "spawn_workers"] + + +ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] + +# Queue is generic only in stubs. +# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime +if TYPE_CHECKING: + InQueue = Queue[Optional["Task"]] + OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]] +else: + InQueue = Queue + OutQueue = Queue + + +class Task: + """A task represents how to compute a micro-batch on a partition. + + It consists of two parts: :meth:`compute` and :meth:`finalize`. + :meth:`compute` should be executed in worker threads concurrently. + :meth:`finalize` should be executed after when worker threads complete to + execute :meth:`compute`. + + :meth:`compute` might be boosted by worker threads. Because it produces + several CUDA API calls by user code. In PyTorch, parallel CUDA API calls + are not serialized through GIL. So more than one CUDA API call can be + produced at the same time. + + """ + + def __init__( + self, stream: AbstractStream, *, compute: Callable[[], Batch], finalize: Optional[Callable[[Batch], None]], + ) -> None: + self.stream = stream + self._compute = compute + self._finalize = finalize + self._grad_enabled = torch.is_grad_enabled() + + def compute(self) -> Batch: + with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled): + return self._compute() + + def finalize(self, batch: Batch) -> None: + if self._finalize is None: + return + with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled): + self._finalize(batch) + + +def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None: + """Main loop of a worker thread.""" + with use_device(device): + while True: + task = in_queue.get() + + if task is None: + break + + try: + batch = task.compute() + except Exception: + exc_info = cast(ExcInfo, sys.exc_info()) + out_queue.put((False, exc_info)) + continue + + out_queue.put((True, (task, batch))) + + done = (False, None) + out_queue.put(done) + + +def create_workers(devices: List[torch.device],) -> Tuple[List[InQueue], List[OutQueue]]: + """Spawns worker threads. A worker thread is bound to a device.""" + in_queues: List[InQueue] = [] + out_queues: List[OutQueue] = [] + + # Spawn workers. + workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {} + + def normalize_device(device: torch.device) -> torch.device: + if device.type == "cuda" and device.index is None: + return torch.device("cuda", index=torch.cuda.current_device()) + + if device.type == "cpu" and device.index is not None: + return torch.device("cpu") + + return device + + for device in devices: + device = normalize_device(device) + + try: + in_queue, out_queue = workers[device] + except KeyError: + in_queue = Queue() + out_queue = Queue() + workers[device] = (in_queue, out_queue) + + t = Thread(target=worker, args=(in_queue, out_queue, device), daemon=True,) + t.start() + + in_queues.append(in_queue) + out_queues.append(out_queue) + + return (in_queues, out_queues) + +@contextmanager +def spawn_workers(devices: List[torch.device],) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]: + try: + (in_queues, out_queues) = create_workers(devices) + yield (in_queues, out_queues) + finally: + pass