diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34539d633f8fa0532e31a2be38719eaa527a2306 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py @@ -0,0 +1,6 @@ +from .api import ( + _shard_tensor, + load_with_process_group, + shard_module, + shard_parameter, +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31ebfed1d61f8e4f44d9e3be6e170362cf1ed10c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbd756ed761a28cacaab56f8a033c6f120deddd5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6989b6b2b9d73c87abdefeb69fa648d76c980a0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ac0dee6f3b4789052bb70ce77b2acb93672c942 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26305b99cce306b2b6770f3731d6b1c276e8392c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py @@ -0,0 +1,28 @@ +import torch +from torch.distributed._shard.metadata import ShardMetadata +from typing import Sequence + +DEPRECATE_MSG = "Please use DTensor instead and we are deprecating ShardedTensor." + +def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor: + """ + Narrow the tensor according to ``offsets`` and ``sizes``. + """ + narrowed_tensor = tensor + for idx, (offset, size) in enumerate(zip(offsets, sizes)): + if size < tensor.size(idx): + # Reshape to get shard for this rank and we don't want autograd + # recording here for the narrow op and 'local_shard' should be a + # leaf variable in the autograd graph. + narrowed_tensor = narrowed_tensor.narrow( + idx, + offset, + size + ) + return narrowed_tensor + +def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor: + """ + Narrow the tensor according to the metadata + """ + return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5f17237ab50b965e09a51251fa1cb8b901fd22ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py @@ -0,0 +1,290 @@ +from contextlib import contextmanager +from typing import Optional +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed import distributed_c10d +from torch.distributed._shard.sharded_tensor import ( + ShardedTensor, +) +from .sharding_spec import ( + ShardingSpec, + ChunkShardingSpec +) +from .sharding_plan import ( + ShardingPlan +) +from .sharder import Sharder + +def _shard_tensor( + tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None +) -> ShardedTensor: + """ + Given a :class:`torch.Tensor`, it shards that tensor according to the provided + ``sharding_spec``. ``src_rank`` denotes the source rank which would be + used as the ground truth of the data which would be scattered as shards + across the rest of the ranks. + + Args: + tensor (:class:`torch.Tensor`): Tensor needs to be sharded. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Returns: + A :class:`ShardedTensor` sharded from the given tensor. + + .. warning:: + Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is + currently supported as the ``sharding_spec``. + """ + if not tensor.is_contiguous(): + raise ValueError('input tensor is not a contiguous Tensor') + + pg = process_group if process_group is not None else distributed_c10d._get_default_group() + world_size = dist.get_world_size(pg) + current_rank = dist.get_rank(pg) + + # Validate src_rank and sharding_spec are same across all ranks. + gathered_list = [None] * world_size + dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg) + + for idx, entry in enumerate(gathered_list): + if src_rank != entry[0]: # type: ignore[index] + raise ValueError( + f'src_rank={src_rank} on rank: {current_rank} does not ' # type: ignore[index] + f'match with src_rank={entry[0]} on rank: {idx}') + if sharding_spec != entry[1]: # type: ignore[index] + raise ValueError( + f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index] + f'match with sharding_spec={entry[1]} on rank: {idx}') + + st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group) + + return st + +def shard_parameter( + module: torch.nn.Module, + param_name: str, + sharding_spec: ShardingSpec, + src_rank=0, + process_group=None): + """ + Given a :class:`torch.nn.Module`, a ``param_name`` for a parameter in that + module, it shards that parameter according to the provided + ``sharding_spec``. ``src_rank`` denotes the source rank which would be + used as the ground truth of the data which would be scattered as shards + across the rest of the ranks. + + This method replaces ``module.param_name`` with a + :class:`torch.distributed._sharded_tensor.ShardedTensor` + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be sharded. + param_name (str): Name of the parameter of ``module`` that needs to be sharded. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + .. warning:: + Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is + currently supported as the ``sharding_spec``. + """ + # Perform some validation first. + if not hasattr(module, param_name): + raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`') + + tensor = getattr(module, param_name) + if not isinstance(tensor, torch.Tensor): + raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}') + + if not tensor.is_contiguous(): + raise ValueError(f'param: {param_name} is not a contiguous Tensor') + + st = _shard_tensor(tensor, sharding_spec, src_rank, process_group) + + # Replace param with ShardedTensor. + module.register_parameter(param_name, nn.Parameter(st)) + +# Tracks the current process group in the load context manager. +_CURRENT_PROCESS_GROUP: Optional[dist.ProcessGroup] = None + +@contextmanager +def load_with_process_group(process_group): + """ + Context manager to set the process group with which to load a ShardedTensor. + """ + global _CURRENT_PROCESS_GROUP + if _CURRENT_PROCESS_GROUP is not None: + raise RuntimeError( + 'ProcessGroup already set by previous "load_with_process_group" ' + 'context manager') + _CURRENT_PROCESS_GROUP = process_group + try: + yield process_group + finally: + _CURRENT_PROCESS_GROUP = None + +def _get_current_process_group(): + """ + Retrieves the current process group set by ``load_with_process_group``. + If not set, it just returns the default group. + """ + global _CURRENT_PROCESS_GROUP + if _CURRENT_PROCESS_GROUP is None: + return distributed_c10d._get_default_group() + else: + return _CURRENT_PROCESS_GROUP + +def _reshard_output( + module: torch.nn.Module, + resharding_spec: ShardingSpec) -> torch.nn.Module: + """ + Hook a module with output resharding in the forward pass according + to the given ``resharding_spec``. + + Args: + module (:class:`torch.nn.Module`): Module whose output needs to be resharded. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): + The specification describing how the output of the module will be resharded. + + Returns: + A :class:`torch.nn.Module` object with reshard API hooked. + """ + def hook_func(_module, _input, output): + if isinstance(output, ShardedTensor): + return output.reshard(resharding_spec) + return output + module.register_forward_hook(hook_func) + return module + +def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module: + """ + Hook a module with local shards collection in the forward pass. + + This API is typically used to convert a sharded representation back to data parallel + representation. In particular, it returns the local tensor for this Shard. If the + size along the sharding dimension for the local tensor is 1, this dimension is removed + from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically + a local Tensor of size [16] across each rank and not [1, 16] across each rank. + + Args: + module (:class:`torch.nn.Module`): Module whose output is ShardedTensor and the + local tensor value needs to be returned. + + Returns: + A :class:`torch.nn.Module` object with collection API hooked. + """ + + def hook_func(_module, _input, output): + if isinstance(output, ShardedTensor): + local_tensor = output.local_tensor() + # Squeeze the # of dimensions manually, only applicable to ChunkShardingSpec + sharding_spec = output._sharding_spec + if isinstance(sharding_spec, ChunkShardingSpec) \ + and local_tensor.size(sharding_spec.dim) == 1: # type: ignore[attr-defined, arg-type] + local_tensor = local_tensor.squeeze( + output._sharding_spec.dim # type: ignore[attr-defined] + ) + return local_tensor + module.register_forward_hook(hook_func) + return module + +def shard_module( + module: nn.Module, + plan: ShardingPlan, + src_rank=0, + process_group=None +): + """ + Shards a given module according to the provided sharding `plan`. This method + first shards all the parameters according to the given sharding `plan`. Then if + `output_plan` and `return_local_tensor` are specified in the sharding `plan`, it + will tag the output of modules according `output_plan`, convert the module's + output back to data parallel according to `return_local_tensor`. + + Needs to be called on all ranks in an SPMD fashion. + + Args: + module (:class:`torch.nn.Module`): The module to apply sharding to + plan (:class:`torch.distributed._shard.sharding_plan.ShardingPlan`): + The ShardingPlan which specified param name to ShardingSpec to apply to + each parameter. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the module that would be sharded and scattered across the rest + of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + """ + # record Sharder paths for sanity check on the plan to ensure items in the plan + # does not conflict with the submodule tree that the Sharder is working with + sharder_paths = [] + for name, spec in plan.plan.items(): + if isinstance(spec, Sharder): + sharder_paths.append(name) + + # shard the parameter according to the ShardingPlan + for name, spec in plan.plan.items(): + if isinstance(spec, ShardingSpec): + # if found a sharding spec, try to shard the parameter + module_path, _, param_name = name.rpartition(".") + + for sharder_path in sharder_paths: + if module_path.startswith(sharder_path): + raise RuntimeError(f"ShardingPlan is in-valid, trying to shard a parameter: {name}," + f" but there's already a Sharder entry for module {sharder_path}," + f" parameter sharding should not conflict with the submodule tree" + f" that a Sharder is working with!") + + mod = module.get_submodule(module_path) + shard_parameter( + mod, + param_name, + spec, + src_rank=src_rank, + process_group=process_group + ) + elif isinstance(spec, Sharder): + parent_mod_path, _, mod_name = name.rpartition(".") + if name == "": + raise KeyError("Module path must not be empty for custom sharder!") + mod = module.get_submodule(name) + parent_mod = module.get_submodule(parent_mod_path) + sharded_mod = spec.shard(mod) + # swap this submodule with the sharded module + parent_mod.mod_name = sharded_mod + else: + raise TypeError(f"Only `ShardingSpec` and `Sharder` are supported to shard '{name}'") + + # reshard output if there's an entry in `reshard_output` for this module + if plan.output_plan is not None: + for module_path, output_spec in plan.output_plan.items(): + if isinstance(output_spec, ShardingSpec): + mod = module.get_submodule(module_path) + _reshard_output(mod, output_spec) + else: + raise TypeError(f"Only `ShardingSpec` is supported as output_plan for '{module_path}'") + # convert the output back to data parallel for the modules appears in + # `return_local_tensor` of the plan, we will call `_collect_local_shard` + # to collect the local tensor for output of modules + if plan.return_local_tensor is not None: + for module_path in plan.return_local_tensor: + mod = module.get_submodule(module_path) + _collect_local_shard(mod) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c426503161c7cc8425eff62864c5cd5fa834631d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py @@ -0,0 +1,61 @@ +import torch +from torch.utils import _pytree as pytree +from typing import Optional + +def _basic_validation(op, args=(), kwargs=None): + """ + Common validation across all ops go in here. + """ + from torch.distributed._shard.sharded_tensor import ShardedTensor + + if len(args) == 0 and (kwargs is None or len(kwargs) == 0): + raise ValueError(f" No input for '{op.__name__}'!") + + # Validate types + has_distributed_tensor = False + + def is_distributed_tensor(e): + nonlocal has_distributed_tensor + if isinstance(e, ShardedTensor): + has_distributed_tensor = True + + pytree.tree_map_(is_distributed_tensor, args) + pytree.tree_map_(is_distributed_tensor, kwargs) + + if not has_distributed_tensor: + raise TypeError( + f"torch function '{op.__name__}', with args: {args} and " + f"kwargs: {kwargs} are called without any distributed tensor!" + ) + + # Validate all distributed tensors use the same PG. + cur_pg: Optional[torch.distributed.ProcessGroup] = None + + def validate_pg(e): + nonlocal cur_pg + if isinstance(e, ShardedTensor): + if cur_pg is not None and e._process_group is not cur_pg: + raise RuntimeError( + 'All distributed tensors should use the ' + 'same ProcessGroup if used together in an op.' + ) + cur_pg = e._process_group + + pytree.tree_map_(validate_pg, args) + pytree.tree_map_(validate_pg, kwargs) + +def _register_default_op(op, decorator): + @decorator(op) + def tensor_default_op(types, args=(), kwargs=None, pg=None): + """ + Handles ``__torch_function__`` dispatch for the default tensor ops that + behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or + ``torch.Tensor.dtype``. We simply lower to the real op call with + DisableTorchFunctionSubclass context like ``torch.Tensor.__torch_function__`` + to avoid recursions. + """ + if kwargs is None: + kwargs = {} + + with torch._C.DisableTorchFunctionSubclass(): + return op(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bae9e6664a6ef4bbbf9f0c52623a1f68c9649e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +from typing import List, Union, Optional +from functools import reduce + +from torch.distributed.remote_device import _remote_device + +@dataclass +class ShardMetadata: + """ + Represents a shard of the overall Tensor including its + offsets, lengths and device placement. + + Args: + shard_offsets(List[int]): Offsets in the original tensor indicating + the start offsets for this shard. Should have the same rank as + the original tensor. + shard_sizes(List[int]): Integers indicating the size of each + dimension for this shard. Should have the same rank as the + original tensor. + placement(:class:`torch.distributed._remote_device`): + Specifies the placement of this shard. + """ + + __slots__ = ['shard_offsets', 'shard_sizes', 'placement'] + + shard_offsets: List[int] + shard_sizes: List[int] + placement: Optional[_remote_device] + + def __init__( + self, + shard_offsets: List[int], + shard_sizes: List[int], + placement: Optional[Union[str, _remote_device]] = None + ): + self.shard_offsets = shard_offsets + self.shard_sizes = shard_sizes + if isinstance(placement, str): + self.placement = _remote_device(placement) + else: + self.placement = placement + if len(self.shard_offsets) != len(self.shard_sizes): + raise ValueError( + f'shard_offsets and shard_sizes should have ' + f'the same number of elements, found {len(self.shard_offsets)} ' + f'and {self.shard_sizes} respectively') + + for i in range(len(self.shard_offsets)): + if self.shard_offsets[i] < 0: + raise ValueError('shard_offsets should be >=0') + if self.shard_sizes[i] < 0: + raise ValueError('shard_sizes should be >= 0') + + def __hash__(self): + def _hash_reduce(a, b): + return (a << 8) + hash(b) + + res = reduce(_hash_reduce, self.shard_offsets, 37) + res = reduce(_hash_reduce, self.shard_sizes, res) + res = _hash_reduce(res, self.placement) + return res diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4febe841186a5f19d49dcb2aecee385255b4955a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py @@ -0,0 +1,35 @@ +import functools +from inspect import signature +from .common_op_utils import _basic_validation + +""" +Common utilities to register ops on ShardedTensor +and PartialTensor. +""" + +def _register_op(op, func, op_table): + """ + Performs basic validation and registers the provided op in the given + op_table. + """ + if len(signature(func).parameters) != 4: + raise TypeError( + f'Custom sharded op function expects signature: ' + f'(types, args, kwargs, process_group), but received ' + f'signature: {signature(func)}') + + op_table[op] = func + +def _decorator_func(wrapped_func, op, op_table): + """ + Decorator function to register the given ``op`` in the provided + ``op_table`` + """ + + @functools.wraps(wrapped_func) + def wrapper(types, args, kwargs, process_group): + _basic_validation(op, args, kwargs) + return wrapped_func(types, args, kwargs, process_group) + + _register_op(op, wrapper, op_table) + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..172213fb0c171312f78d2088f17d69dacc96c1eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py @@ -0,0 +1,54 @@ +from typing import Iterator, Tuple, Union +from .api import ShardedOptimizer + +import torch.nn as nn + +from torch.distributed._shard.sharded_tensor import ( + ShardedTensor +) + +def named_params_with_sharded_tensor( + module: nn.Module, + prefix: str = '', + recurse: bool = True, +) -> Iterator[Tuple[str, Union[nn.Parameter, ShardedTensor]]]: + + r"""Returns an iterator over module parameters (together with the + ShardedTensor parameters), yielding both the name of the parameter + as well as the parameter itself. This is typically passed to a + :class:torch.distributed._shard.sharded_optim.ShardedOptimizer + + Args: + prefix (str): prefix to prepend to all parameter names. + recurse (bool): if True, then yields parameters of this module + and all submodules. Otherwise, yields only parameters that + are direct members of this module. + + Yields: + (str, Union[Tensor, ShardedTensor]): Tuple containing + the name and parameter (or ShardedTensor parameter) + + Example:: + + >>> # xdoctest: +SKIP + >>> model = torch.nn.Linear(*linear_size) + >>> shard_parameter(model, "weight", spec) + >>> for name, param in named_params_with_sharded_tensor(model): + >>> if name in ['weight']: + >>> print(param.size()) + + """ + modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)] + + memo = set() + for mod_prefix, mod in modules: + # find all sharded tensor params + for name, val in vars(mod).items(): + if isinstance(val, ShardedTensor) and val not in memo: + memo.add(val) + name = mod_prefix + ('.' if mod_prefix else '') + name + yield name, val + + # find all nn.Parameters + for name, val in module.named_parameters(): + yield name, val diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebad7d0eb3b6a7356d818c407f1bb18bb73fb059 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1895b2c0cb14a0ecce6da42df7e4fcdeaaec308 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py new file mode 100644 index 0000000000000000000000000000000000000000..54d8a94ad3fe00e3380202a93a4b2ea5c03bc407 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py @@ -0,0 +1,97 @@ +from typing import List, Union, Mapping, Dict, Any + +import torch.optim as optim +from torch import Tensor +from torch.distributed._shard.sharded_tensor import ShardedTensor + + +class ShardedOptimizer(optim.Optimizer): + def __init__( + self, + named_params: Mapping[str, Union[Tensor, ShardedTensor]], + optimizer_class, + *optimizer_args, + **optimizer_kwargs + ): + """ + ShardedOptimizer collects all tensors and local shard tensors of + ShardedTensor, then use these tensors as ``params`` for optimizers + + Args: + named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict + of parameters, where key is the parameter key, value is either + Tensor or ShardedTensor parameter. + optimizer_class (torch.optim.Optimizer): the Optimizer to use + locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc. + *optimizer_args: the arguments to initialize the optimizer. + **optimizer_kwargs: the key-word arguments to initialize the optimizer. + + """ + tensors: List[Tensor] = [] + for value in named_params.values(): + if isinstance(value, ShardedTensor): + for local_shard in value.local_shards(): + tensors.append(local_shard.tensor) + else: + tensors.append(value) + + self.named_params = named_params + self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs) + self.param_groups = self._optim.param_groups + self.state = self._optim.state + + def zero_grad(self, set_to_none: bool = True): # type: ignore[override] + r"""Resets the gradients of all optimized :class:`torch.Tensor` s. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + This will in general have lower memory footprint, and can modestly improve performance. + However, it changes certain behaviors. For example: + 1. When the user tries to access a gradient and perform manual ops on it, + a None attribute or a Tensor full of 0s will behave differently. + 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s + are guaranteed to be None for params that did not receive a gradient. + 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None + (in one case it does the step with a gradient of 0 and in the other it skips + the step altogether). + """ + self._optim.zero_grad(set_to_none) + + def step(self, closure=None): + r"""Performs a single optimization step (parameter update). + + Args: + closure (Callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + + .. note:: + Unless otherwise specified, this function should not modify the + ``.grad`` field of the parameters. + """ + self._optim.step(closure) + + def state_dict(self) -> Dict[str, Any]: + """ + Returned state and param_groups will contain parameter keys + instead of parameter indices like torch.optim.Optimizer. + This allows for advanced functionality like optimizer re-sharding to be implemented. + """ + # TODO: implement state_dict + raise NotImplementedError("ShardedOptimizer state_dict not implemented yet!") + + + def load_state_dict(self, state_dict: Mapping[str, Any]): + r"""Loads the ShardedOptimizer state. + + Args: + state_dict (dict): ShardedOptimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # TODO: implement load_state_dict + raise NotImplementedError("ShardedOptimizer load_state_dict not implemented yet!") + + def add_param_group(self, param_group: Any): + r"""Add a new param group + """ + # TODO: implement add_param_group + raise NotImplementedError("ShardedOptimizer add_param_group not implemented yet!") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..152c287ee7036f71593a9d3e0aa7f5d5176a750e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,469 @@ +import functools +from typing import List, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from torch.distributed._shard.sharding_spec import ShardingSpec +else: + ShardingSpec = "ShardingSpec" + +from .api import ( + _CUSTOM_SHARDED_OPS, + _SHARDED_OPS, + Shard, + ShardedTensorBase, + ShardedTensor, + ShardedTensorMetadata, + TensorProperties, +) +from .metadata import ShardMetadata # noqa: F401 +from torch.distributed._shard.op_registry_utils import _decorator_func + + +def empty(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` filled with uninitialized data. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + +def ones(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` with the scalar value 1. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return full( + sharding_spec, + size, + fill_value=1, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def zeros(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` filled with the scalar value 0. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return full( + sharding_spec, + size, + fill_value=0, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def full(sharding_spec: ShardingSpec, + size, + fill_value, + *, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype + is inferred from fill_value. If dtype is specified, it will override the + inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. + Args: + sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + fill_value (Scalar) – the value to fill the output tensor with. + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type] + return sharded_tensor + +def rand(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)`. The shape of the tensor is defined by the + variable argument `size`. Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type] + return sharded_tensor + +def randn(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution + with mean `0` and variance `1` (also called standard normal distribution). The shape + of the tensor is defined by the variable argument `size`. Needs to be called on all ranks + in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type] + return sharded_tensor + +def init_from_local_shards( + local_shards: List[Shard], + *global_size, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates an :class:`ShardedTensor` from local shards and the global metadata. + Needs to be called on all ranks in an SPMD fashion. + + Args: + local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list + of shards that represent the local shards on this rank. + global_size (int...): a list, tuple, or `torch.Size` of integers defining the + shape of the overall sharded tensor. + + Keyword args: + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object handle on this rank + + + Examples: + Suppose we want construct a sharded tensor on two ranks, global size = (10, 5), + each shard have a (5, 5) local tensor, we can do it like below: + + on rank 0: + >>> # xdoctest: +SKIP("not distributed") + >>> local_shard_metadata = ShardMetadata( + >>> shard_offsets=[0, 0], + >>> shard_lengths=[5, 5], + >>> placement="rank:0/cuda:0" + >>> ) + >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] + >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) + + on rank 1: + >>> # xdoctest: +SKIP("not distributed") + >>> local_shard_metadata = ShardMetadata( + >>> shard_offsets=[5, 0], + >>> shard_lengths=[5, 5], + >>> placement="rank:1/cuda:1" + >>> ) + >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] + >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) + """ + return ShardedTensor._init_from_local_shards( + local_shards, + *global_size, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def state_dict_hook(module, destination, prefix, local_metadata): + """ + Hook to add ShardedTensor to Module's ``state_dict``. Needs to be + registered to the Module using + :meth:`torch.nn.Module._register_state_dict_hook`. + """ + for submodule_name, submodule in module.named_modules(): + for attr_name, attr in submodule.__dict__.items(): + if isinstance(attr, ShardedTensor): + mod_prefix = prefix + submodule_name + key = mod_prefix + ('.' if mod_prefix else '') + attr_name + destination[key] = attr + +def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + """ + Pre-load state dict hook to add ShardedTensor to the module. + """ + for submodule_name, submodule in module.named_modules(): + for attr_name in submodule.__dict__.keys(): + mod_prefix = prefix + submodule_name + key = mod_prefix + ('.' if mod_prefix else '') + attr_name + if key in state_dict: + if isinstance(state_dict[key], ShardedTensor): + setattr(submodule, attr_name, state_dict[key]) + +def custom_sharded_op_impl(func): + """ + Provides a way for users to write their own custom sharded operator. This + can be used to override existing ShardedTensor operators or write a new + one not supported by ShardedTensor. If the operator in question is covered + by ``__torch_function__`` dispatch and has a ShardedTensor as any of its + parameters, the function provided will be invoked for that operator. + + Example:: + >>> # xdoctest: +SKIP + >>> @custom_sharded_op_impl(torch.nn.functional.linear) + >>> def my_custom_sharded_linear(types, args, kwargs, process_group): + >>> ... + >>> # xdoctest: +SKIP("Undefined variables") + >>> input = torch.rand(10, 32) + >>> weight = sharded_tensor.rand(32, 16) + >>> bias = torch.rand(16) + >>> # This will call 'my_custom_sharded_linear' + >>> torch.nn.functional.linear(input, weight, bias) + + The types, args and kwargs parameters are the same parameters that are + passed to ``__torch_function__`` dispatch API + (https://pytorch.org/docs/stable/notes/extending.html#extending-torch). + There is an additional ``process_group`` parameter which is the + process_group used for the ShardedTensor and can be used by + implementations for communications within a sharded implementation. + + Args: + func(Callable): Torch function for which we want to provide a sharded + implementation (ex: torch.nn.functional.linear) + """ + return functools.partial( + _decorator_func, + op=func, + op_table=_CUSTOM_SHARDED_OPS + ) + +def _sharded_op_impl(func): + """ + Decorator to register a default sharded op. + """ + return functools.partial( + _decorator_func, + op=func, + op_table=_SHARDED_OPS + ) + +# Import all builtin sharded ops +from ._ops import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23922e75c160e0118684fa9772884cfd749d5a68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe41923b946e586fedbb5aec51728a37766c2acf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9626948d0d6154ceedc26ad3271621662982c94b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d85d7a661e3e67d5dfdcb1d25c70a65edf467db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d38ea9f70ba3acdb331b09ab22d4f8cefd9152 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d36a59604ea70d51f0d6346b92c2a0d69220cbae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93703839150d90f072f099b7318bcf338e9b58a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aa4228ba4b9a7f00ea89db15bb590a29a68fdf8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fba69d36cf3e29280a1f633fdf5a003ade8202c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py new file mode 100644 index 0000000000000000000000000000000000000000..dfb661653e71b7f1e46ed111a02a87d2fe04e478 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py @@ -0,0 +1,143 @@ +import torch +import torch.distributed._shard.sharded_tensor as sharded_tensor +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, +) + +def validate_param(param, param_name): + if param is None: + raise ValueError(f"param: {param_name} shouldn't be None!") + +@_sharded_op_impl(torch.nn.init.uniform_) +def uniform_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensor in tensor.local_shards with values drawn from the uniform + distribution :math:`\mathcal{U}(a, b)`. + Args: + tensor: tensor sharded across devices + a: the lower bound of the uniform distribution + b: the upper bound of the uniform distribution + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + a = kwargs['a'] + validate_param(a, "a") + b = kwargs['b'] + validate_param(b, "b") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.uniform_(shard.tensor, a=a, b=b) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.normal_) +def normal_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensors in tensor.local_shards with values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + Args: + tensor: tensor sharded across devices + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + mean = kwargs['mean'] + validate_param(mean, "mean") + std = kwargs['std'] + validate_param(std, "std") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.normal_(shard.tensor, mean=mean, std=std) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.kaiming_uniform_) +def kaiming_uniform_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensors in tensor.local_shards with values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification` - He, K. et al. (2015), using a + uniform distribution. The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + .. math:: + \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} + Also known as He initialization. + Args: + tensor: tensor sharded across devices + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + a = kwargs['a'] + validate_param(a, "a") + mode = kwargs['mode'] + validate_param(mode, "mode") + nonlinearity = kwargs['nonlinearity'] + validate_param(nonlinearity, "nonlinearity") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.constant_) +def constant_(types, args=(), kwargs=None, pg=None): + r""" + Fills the input ShardedTensor with the value \text{val}val. + Args: + tensor: tensor sharded across devices + val: the value to fill the tensor with + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + val = kwargs['val'] + validate_param(val, "val") + for shard in sharded_tensor.local_shards(): + torch.nn.init.constant_(shard.tensor, val=val) + return sharded_tensor + +tensor_like_creation_op_map = { + torch.full_like: sharded_tensor.full, + torch.empty_like: sharded_tensor.empty, + torch.zeros_like: sharded_tensor.zeros, + torch.ones_like: sharded_tensor.ones, + torch.rand_like: sharded_tensor.rand, + torch.randn_like: sharded_tensor.randn, +} + +# tensor ops that behave the same as the default tensor +def register_tensor_creation_op(op): + @_sharded_op_impl(op) + def tensor_creation_op(types, args=(), kwargs=None, pg=None): + """ + Handles ``__torch_function__`` dispatch for tensor creation ops that + takes a ShardedTensor as argument, such as ``torch.zeros_like`` or + ``torch.full_like``. + """ + creation_op = tensor_like_creation_op_map.get(op, None) + if creation_op is None: + raise RuntimeError(f"Tensor creation {op} not supported!") + if kwargs is None: + kwargs = {} + + st = args[0] + + new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator] + return new_st + + +register_tensor_creation_op(torch.full_like) +register_tensor_creation_op(torch.empty_like) +register_tensor_creation_op(torch.zeros_like) +register_tensor_creation_op(torch.ones_like) +register_tensor_creation_op(torch.rand_like) +register_tensor_creation_op(torch.randn_like) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py new file mode 100644 index 0000000000000000000000000000000000000000..06141fd20c923f6de3d555233f21bcd2fc750d20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py @@ -0,0 +1,1253 @@ +from __future__ import annotations # type: ignore[attr-defined] +from dataclasses import dataclass +from typing import ( + Callable, + Dict, + List, + Optional, + Sequence, + Tuple, + cast, +) +import copy +import warnings +from functools import reduce +import weakref + +import threading +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.distributed import distributed_c10d +from torch.distributed._shard.metadata import ShardMetadata +import torch.distributed._shard.sharding_spec as shard_spec +from torch.distributed._shard.sharding_spec.api import ( + _dispatch_custom_op, + _has_custom_op, +) +from torch.distributed._shard.sharding_spec._internals import ( + check_tensor, + validate_non_overlapping_shards_metadata, +) +from torch.distributed._shard._utils import ( + DEPRECATE_MSG, +) + +from .metadata import TensorProperties, ShardedTensorMetadata +from .shard import Shard +from .reshard import reshuffle_local_shard, reshard_local_shard +from .utils import ( + _flatten_tensor_size, + _parse_and_validate_remote_device, + _validate_output_tensor_for_gather, + build_metadata_from_local_shards, + build_global_metadata +) +from torch.distributed.remote_device import _remote_device +from torch.utils import _pytree as pytree +import operator + +# Tracking for sharded tensor objects. +_sharded_tensor_lock = threading.Lock() +_sharded_tensor_current_id = 0 +_sharded_tensor_map: Dict[int, weakref.ReferenceType[ShardedTensor]] = {} + +# Default sharded ops +_SHARDED_OPS: Dict[Callable, Callable] = {} + +# Customized user ops +_CUSTOM_SHARDED_OPS: Dict[Callable, Callable] = {} + +def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int): + with _sharded_tensor_lock: + if sharded_tensor_id not in _sharded_tensor_map: + raise RuntimeError( + f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}') + + sharded_tensor = _sharded_tensor_map[sharded_tensor_id]() + if sharded_tensor is None: + raise RuntimeError('ShardedTensor weakref has been deallocated') + else: + sharded_tensor._register_remote_shards(rrefs, rpc_rank) + +class ShardedTensorBase(torch.Tensor): + _sharding_spec: shard_spec.ShardingSpec + _metadata: ShardedTensorMetadata + _local_shards: List[Shard] + + def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs): + # Use __new__ to construct a wrapper tensor, for recording tensor + # properties and logging purposes. + torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor") + + # check sharding spec and build sharded tensor metadata + if not isinstance(sharding_spec, shard_spec.ShardingSpec): + raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}") + + sizes = _flatten_tensor_size(size) + dtype = kwargs["dtype"] + layout = kwargs["layout"] + pin_memory = kwargs["pin_memory"] + requires_grad = kwargs["requires_grad"] + + if dtype is None: + dtype = torch.get_default_dtype() + + tensor_properties = TensorProperties( + dtype, layout, requires_grad, pin_memory=pin_memory + ) + sharded_tensor_metadata = sharding_spec.build_metadata( + sizes, tensor_properties=tensor_properties + ) + + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + sizes, + dtype=dtype, + layout=layout, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + # set sharding spec + r._sharding_spec = sharding_spec + # set metadata + r._metadata = sharded_tensor_metadata + # set local shards + r._local_shards = [] + return r + + def metadata(self) -> ShardedTensorMetadata: + """ + Returns a :class:`ShardedTensorMetadata` object corresponding to the + metadata for the entire tensor. + """ + return self._metadata + + def local_shards(self) -> List[Shard]: + """ + Returns a list of :class:`Shard' corresponding to the + local shards for this rank. Returns an empty list if the current rank + does not host any shards for this Tensor. + """ + return self._local_shards + + @classmethod + def _init_from_local_shards_and_global_metadata( + cls, + local_shards: List[Shard], + sharded_tensor_metadata: ShardedTensorMetadata, + sharding_spec=None, + ) -> ShardedTensorBase: + """ + Initialize a ShardedTensorBase with local shards and a global + ShardedTensorMetadata built on each rank. + Warning: This API is experimental and subject to change. It does + not do cross rank validations, and fully rely on the user + for the correctness of sharded_tensor_metadata on each rank + """ + shards_metadata = sharded_tensor_metadata.shards_metadata + tensor_properties = sharded_tensor_metadata.tensor_properties + + if len(shards_metadata) == 0: + raise ValueError("shards_metadata must not be empty!") + + if tensor_properties.layout != torch.strided: + raise ValueError("Only torch.strided layout is currently supported") + + if sharding_spec is None: + spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata) + else: + spec = sharding_spec + + sharded_tensor_base = ShardedTensorBase.__new__( + ShardedTensor, + spec, + sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad, + ) + + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(shards_metadata) + + # check if the shards_metadata is compatible with overall size of the sharded tensor. + check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) + + # done validation, add local_shards + sharded_tensor_base._local_shards = local_shards + return sharded_tensor_base + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + raise RuntimeError( + f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} " + "but the there is no custom __torch_dispatch__ implementation for it." + ) + +class ShardedTensor(ShardedTensorBase): + """ + ShardedTensor is an torch.Tensor subclass to represent Tensors that are sharded + across multiple devices and multiple processes. + + ShardedTensor is initialized in an SPMD like fashion where each rank + initializes the ShardedTensor. The ShardedTensor object on each rank + then only stores the local shard for the Tensor and provides global + metadata for all the shards. + + ShardedTensor doesn't provide any Tensor like operations but is a wrapper + providing the Tensor representing the local shard and the global metadata. + Using these, users can build their custom distributed._sharded computations + on top of this primitive. The local shards are all initialized using the + create_op specified by tensor_init_params.create_op, e.g., torch.ones, or + torch.empty + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + .. note:: ShardedTensor uses collectives to do various operations, i.e. it + uses all_gather to do cross rank validations. For NCCL-based process + groups, internal tensor representations of objects must be moved to the + GPU device before communication takes place. In this case, the device + used is given by ``torch.cuda.current_device()`` and it is the user's + responsibility to ensure that this is set so that each rank has an + individual GPU, via ``torch.cuda.set_device()`` + + """ + def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs): + self = super().__new__(cls, sharding_spec, *size, **kwargs) + return self + + def __init__( + self, + sharding_spec: shard_spec.ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False, + ): + # prepare initialization, initialize fields like + # _process_group, _local_shards, etc. + self._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + if layout != torch.strided: + raise ValueError('Only torch.strided layout is currently supported') + + if memory_format != torch.contiguous_format: + raise ValueError('Only torch.contiguous_format memory_format is currently supported') + + self._metadata.tensor_properties.memory_format = memory_format + + current_rank = dist.get_rank(self._process_group) + + for shard_metadata in self._metadata.shards_metadata: + rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement) + if rank == current_rank: + local_tensor = _create_tensor_from_params( + shard_metadata.shard_sizes, + local_device=device, + tensor_properties=self._metadata.tensor_properties + ) + self._local_shards.append(Shard(local_tensor, shard_metadata)) + + # do post initialization (i.e. register sharded_tensor_id, initialize_rpc) + self._post_init() + + def _prepare_init(self, process_group=None, init_rrefs=False): + self._init_rrefs = init_rrefs + self._sharded_tensor_id = None + + self._process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + + self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {} + + def _post_init(self): + # Initialize RPC if available. + if self._init_rrefs: + with _sharded_tensor_lock: + global _sharded_tensor_current_id, _sharded_tensor_map + self._sharded_tensor_id = _sharded_tensor_current_id + _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self) + _sharded_tensor_current_id += 1 + + if not rpc._is_current_rpc_agent_set(): + raise RuntimeError( + 'RPC Framework needs to be initialized using' + ' torch.distributed.rpc.init_rpc if init_rrefs is set to True') + self._init_rpc() + + def __del__(self): + # Clean up the global map. + with _sharded_tensor_lock: + global _sharded_tensor_current_id, _sharded_tensor_map + if ( + hasattr(self, "_sharded_tensor_id") + and self._sharded_tensor_id in _sharded_tensor_map + ): + _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload] + + def _init_rpc(self): + # Validate PG and RPC ranks match. + pg_rank = dist.get_rank() + rpc_rank = rpc.get_worker_info().id + if pg_rank != rpc_rank: + raise ValueError( + f'Default ProcessGroup and RPC ranks must be ' + f'the same for ShardedTensor, found process group rank: ' + f'{pg_rank} and RPC rank: {rpc_rank}' + ) + + self._remote_shards = {} + + # Gather all the sharded tensor ids. + worker_infos = rpc._get_current_rpc_agent().get_worker_infos() + rank_to_name = {} + name_to_rank = {} + + for worker_info in worker_infos: + rank_to_name[worker_info.id] = worker_info.name + name_to_rank[worker_info.name] = worker_info.id + + all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id) + + # Share the local shards to the entire world. + futs = [] + rpc_rank = rpc.get_worker_info().id + for rank in range(dist.get_world_size()): + # Skip self. + if rank == dist.get_rank(): + continue + + if len(self.local_shards()) != 0: + rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()] + fut = rpc.rpc_async( + rank, + _register_remote_shards, + args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank)) + futs.append(fut) + + torch.futures.wait_all(futs) + + # Barrier for all RPCs to finish on all ranks. + rpc.api._all_gather(None) + + def _get_preferred_device(self) -> torch.device: + """ + Return the preferred device to be used when creating tensors for collectives. + This method takes into account the associated process group + """ + if dist.get_backend(self._process_group) == dist.Backend.NCCL: + return torch.device(torch.cuda.current_device()) + return torch.device("cpu") + + def gather( # type: ignore[override] + self, + dst: int = 0, + out: Optional[torch.Tensor] = None, + enforce_dtype: bool = False, + dtype: Optional[torch.dtype] = None, + ) -> None: + """ + Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the + sharded tensor. + + The API needs to be called on all ranks in SPMD fashion. All ranks should have + the same ``dst``. ``out`` should be a tensor of the same size as the overall + size of the sharded tensor on ``dst`` and ``None`` on all other ranks. + + Args: + dst(int): The rank where full tensor is constructed. + Default: 0 + out (:class `torch.Tensor`, optional): The output full tensor. + Must to be provided ONLY on ``dst`` rank. + Default: ``None`` + enforce_dtype (bool): Deprecated, please use dtype instead. Force the + gathered tensors to be the same type as input and output. + dtype (torch.dtype): Force the gathered tensors to be this dtype. + Default: ``None`` + """ + def shard_size(shard_md): + return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined] + + if enforce_dtype: + warnings.warn("enforce_dtype is deprecated. Please use dtype instead.") + + rank = dist.get_rank(self._process_group) + full_size = self.metadata().size + _validate_output_tensor_for_gather(rank, dst, full_size, out) + + local_shards = self.local_shards() + world_size = dist.get_world_size(self._process_group) + rank_sizes = [0 for _ in range(world_size)] + max_rank_size = 0 + shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {} + # collect sizes + for shard_md in self.metadata().shards_metadata: + shard_rank = cast(_remote_device, shard_md.placement).rank() + assert shard_rank is not None + + shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank]) + rank_sizes[shard_rank] += shard_size(shard_md) + max_rank_size = max(max_rank_size, rank_sizes[shard_rank]) + + gather_list: Optional[List[torch.Tensor]] + if rank == dst: + assert out is not None + if enforce_dtype: + # enforce_dtype is deprecated. Do it for backward compatibility. + dtype = out.dtype + # TODO make it as a view of out tensor + gather_list = [torch.empty((max_rank_size,), device=out.device, dtype=dtype) for _ in range(world_size)] + else: + gather_list = None + + with torch.no_grad(): + if enforce_dtype and len(local_shards) > 0: + # enforce_dtype is deprecated. Do it for backward compatibility. + dtype = local_shards[0].tensor.dtype + data = torch.empty(max_rank_size, device=self._get_preferred_device(), dtype=dtype) + + for shard in local_shards: + src = shard.tensor.flatten() + if src.nelement() == 0 : + warnings.warn("Gathering a tensor with zero elements on rank " + str(rank)) + return + shard_offset = shard_placement[shard.metadata][1] + data[shard_offset: shard_offset + src.numel()].copy_(src) + + dist.gather( + tensor=data, + gather_list=gather_list, + dst=dst, + group=self._process_group, + ) + if rank != dst: + return + # In _validate_output_tensor_for_gather, we raise if out == None and rank == dst + out = cast(torch.Tensor, out) + assert gather_list is not None + + full_size = self.metadata().size + dims = len(full_size) + for shard_md in self.metadata().shards_metadata: + rank, rank_offset = shard_placement[shard_md] + tensor = gather_list[rank] + tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)] + tensor = tensor.view(shard_md.shard_sizes) + + out_narrow_view = out + for dim in range(dims): + out_narrow_view = out_narrow_view.narrow( + dim, + shard_md.shard_offsets[dim], + shard_md.shard_sizes[dim], + ) + + out_narrow_view.copy_(tensor) + + def cpu( + self, + memory_format=torch.preserve_format, + process_group=None + ) -> ShardedTensor: + """ + Returns a copy of this object in CPU memory. + + If this ShardedTensor is already on CPU memory, then no copy is + performed and original object is returned. + + .. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might + need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo), + it is the user's responsiblity to explicitly pass in a new process_group that + is compatible with CPU. + """ + # TODO: make this a __torch_function__ op once ShardedTensor becomes a + # torch.Tensor subclass, see https://github.com/pytorch/pytorch/issues/75402 + if memory_format != torch.preserve_format and \ + memory_format != torch.contiguous_format: + raise RuntimeError("Only `torch.contiguous_format` or " + "`torch.preserve_format` is supported!") + all_on_cpu = True + for meta in self.metadata().shards_metadata: + all_on_cpu &= (meta.placement.device().type == "cpu") # type: ignore[union-attr] + + # if every shard is already on CPU, return the original object + if all_on_cpu: + return self + + # if not, returns a copy of this object on CPU + list_shards: List[Shard] = [] + # move all local shards to cpu, and change metadata + for shard in self._local_shards: + cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg] + metadata = copy.deepcopy(shard.metadata) + metadata.placement._device = torch.device("cpu") # type: ignore[union-attr] + list_shards.append( + Shard(cpu_tensor, metadata) + ) + + st_meta = copy.deepcopy(self.metadata()) + for meta in st_meta.shards_metadata: + if meta.placement.device().type != "cpu": # type: ignore[union-attr] + meta.placement._device = torch.device("cpu") # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_cpu + + def cuda( + self, + device=None, + non_blocking=False, + memory_format=torch.preserve_format, + process_group=None + ) -> ShardedTensor: + """ + Returns a copy of this object in CUDA memory, if the original ShardedTensor + is on CPU, we will move the local shard to the current GPU device of each + process in a SPMD fashion. + If this ShardedTensor is already on CUDA memory and local shards on each rank are + already on current device, we still returns a new ShardedTensor object with new + metadata, but no underlying data movements are performed. + .. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might + need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL), + it is the user's responsiblity to explicitly pass in a new process_group that + is compatible with GPU. + """ + if memory_format != torch.preserve_format and \ + memory_format != torch.contiguous_format: + raise RuntimeError("Only `torch.contiguous_format` or " + "`torch.preserve_format` is supported!") + + if device is not None: + device = torch.device(device) if isinstance(device, str) else device + assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), \ + '''Only device without device id (e.g. "cpu" or "cuda") is expected for ShardedTensor!''' + + current_device = torch.device(torch.cuda.current_device()) + # returns a copy of ShardedTensor on CUDA current device + list_shards: List[Shard] = [] + # move all local shards to current device, and change metadata + # if local shards already on the current device, there's no + # real data movement, only the metadata are copied. + for shard in self._local_shards: + cuda_tensor = shard.tensor.cuda( + device=current_device, + non_blocking=non_blocking, + memory_format=memory_format + ) # type: ignore[call-arg] + metadata = copy.deepcopy(shard.metadata) + metadata.placement._device = current_device # type: ignore[union-attr] + + list_shards.append( + Shard(cuda_tensor, metadata) + ) + + st_meta = copy.deepcopy(self.metadata()) + for meta in st_meta.shards_metadata: + if meta.placement.device().type != "cuda": # type: ignore[union-attr] + meta.placement._device = current_device # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + # we need to use `init_from_local_shards` to communicate between ranks + # and update the sharding spec/shards metadata. + st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_cuda + + def to(self, *args, **kwargs) -> ShardedTensor: + current_device: torch.device + if self._local_shards: + current_device = self._local_shards[0].tensor.device + elif self._process_group._get_backend_name() == "gloo": + current_device = torch.device("cpu") + else: + current_device = torch.device(torch.cuda.current_device()) + current_dtype = self.dtype + device_to = current_device + dtype_to = current_dtype + if len(args) == 1: + if isinstance(args[0], torch.dtype): + dtype_to = args[0] + elif isinstance(args[0], torch.device): + device_to = args[0] + elif isinstance(args[0], (str, int)): + device_to = torch.device(args[0]) + elif isinstance(args[0], torch.Tensor): + dtype_to = args[0].dtype + device_to = args[0].device + else: + raise RuntimeError(f"ShardedTensor.to() have wrong arguments: {args}") + elif len(args) == 2: + device_to, dtype_to = args + else: + dtype_to = kwargs.get("dtype", current_dtype) + device_to = kwargs.get("device", current_device) + + device_to = torch.device(device_to) if isinstance(device_to, (str, int)) else device_to + + if device_to.type == "cuda": + # if device_to set to cuda, set to current device even + # if user specify the device index. + current_idx = torch.cuda.current_device() + if device_to.index != current_idx: + warnings.warn("ShardedTensor.to only move tensor to its current device" + "If you want to put to different device, use `reshard` instead.") + device_to = torch.device(current_idx) + + copy_tensor = kwargs.get("copy", False) + non_blocking = kwargs.get("non_blocking", False) + memory_format = kwargs.get("memory_format", torch.preserve_format) + process_group = kwargs.get("process_group", None) + + if not copy_tensor and dtype_to == current_dtype and device_to == current_device: + # already have correct dtype and device, return itself + return self + + # returns a copy of ShardedTensor on CUDA current device + list_shards: List[Shard] = [] + + for shard in self._local_shards: + new_tensor = shard.tensor.to( # type: ignore[call-overload] + device=device_to, + dtype=dtype_to, + non_blocking=non_blocking, + copy=copy_tensor, + memory_format=memory_format + ) + metadata = copy.deepcopy(shard.metadata) + if metadata.placement is not None: + metadata.placement._device = device_to + list_shards.append(Shard(new_tensor, metadata)) + + # update metadata + st_meta = copy.deepcopy(self.metadata()) + st_meta.tensor_properties.dtype = dtype_to + for meta in st_meta.shards_metadata: + meta.placement._device = device_to # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + # we need to use `init_from_local_shards` to communicate between ranks + # and update the sharding spec/shards metadata. + st_to = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_to + + + @classmethod + def _init_from_local_shards( + cls, + local_shards: List[Shard], + *global_size, + process_group=None, + init_rrefs=False, + ): + # STEP 1: Validate the Shardmetadatas locally + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + world_size = dist.get_world_size(process_group) + + local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None + global_tensor_size = _flatten_tensor_size(global_size) + + if len(local_shards) > 0: + local_sharded_tensor_metadata = \ + build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group) + + # STEP 2. Validate metadata across ranks, and build a global sharded tensor + # metadata by gathering local ShardedTensorMetadata + gathered_metadatas: List[Optional[ShardedTensorMetadata]] = [] + if world_size > 1: + gathered_metadatas = [None for _ in range(world_size)] + + dist.all_gather_object( + gathered_metadatas, + local_sharded_tensor_metadata, + group=process_group + ) + else: + gathered_metadatas = [local_sharded_tensor_metadata] + + global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas) + tensor_properties = global_sharded_tensor_metadata.tensor_properties + + # STEP 3: Validation done, create the actual ShardedTensor and populate fields + # prepare initialization + spec = shard_spec._infer_sharding_spec_from_shards_metadata( + global_sharded_tensor_metadata.shards_metadata + ) + sharded_tensor = cls.__new__(cls, + spec, + global_sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad) + sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + # attach local_shards to the ShardedTensor created + sharded_tensor._local_shards = local_shards + + # run post initialization, i.e. map registration, rpc initialization + sharded_tensor._post_init() + return sharded_tensor + + @classmethod + def _init_from_local_tensor( + cls, + local_tensor: torch.Tensor, + sharding_spec: shard_spec.ShardingSpec, + *global_size: Sequence[int], + process_group: Optional[dist.ProcessGroup] = None, + init_rrefs=False, + ) -> ShardedTensor: + """ + Initialize a ShardedTensor given only one local tensor, global sharded tensor + size and sharding spec on each rank. + + Args: + local_tensor (Tensor): Single tensor of local shard stored in each rank. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): + The specification describing how to shard the Tensor. + global_size (Sequence[int]): Size of the sharded tensor. + process_group (ProcessGroup, optional): The process group to aggregate on. + Default: None + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` sharded based on the given sharding_spec with local + tensor stored in the current rank. + + Examples: + >>> # xdoctest: +SKIP + >>> # All tensors below are of torch.int64 type. + >>> # We have 2 process groups, 2 ranks. + >>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank + >>> local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2])) + >>> local_tensor + tensor([[1, 2, 3, 4]]) # Rank 0 + tensor([[3, 4, 5, 6]]) # Rank 1 + >>> sharding_dim = 0 + >>> sharding_spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ) + >>> st = ShardedTensor._init_from_local_tensor(local_tensor, sharding_spec, [2, 4]) + >>> st + ShardedTensor( + ShardedTensorMetadata( + shards_metadata=[ + ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1, 4], placement=rank:0/cuda:0), + ShardMetadata(shard_offsets=[1, 0], shard_sizes=[1, 4], placement=rank:1/cuda:1), + ], + size=torch.Size([2, 4]) + ) + >>> st.local_tensor() + tensor([1, 2, 3, 4]) # Rank 0 + tensor([3, 4, 5, 6]) # Rank 1 + + Warning: This API is experimental and subject to change. It lacks of a fully across + rank validations, and we only validate the local shard on the current rank. + We fully rely on the user to ensure local tensor is sharded based on the + sharding spec. + """ + warnings.warn(DEPRECATE_MSG) + + if not local_tensor.is_contiguous(): + raise ValueError('local_tensor is not a contiguous Tensor.') + + global_tensor_size = _flatten_tensor_size(global_size) + tensor_properties = TensorProperties( + dtype=local_tensor.dtype, + layout=local_tensor.layout, + requires_grad=local_tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=local_tensor.is_pinned()) + sharded_tensor_metadata = sharding_spec.build_metadata( + global_tensor_size, + tensor_properties + ) + + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + + local_shards: List[Shard] = [] + for shard_metadata in sharded_tensor_metadata.shards_metadata: + rank, device = _parse_and_validate_remote_device(process_group, shard_metadata.placement) + if rank == current_rank: + local_shards.append(Shard(local_tensor, shard_metadata)) + + # TODO: figure out what the API should behave when some rank have no shard + # see https://github.com/pytorch/pytorch/issues/7313 + return ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, + sharded_tensor_metadata, + process_group=process_group, + init_rrefs=init_rrefs, + sharding_spec=sharding_spec, + ) + + @classmethod + def _init_from_local_shards_and_global_metadata( # type: ignore[override] + cls, + local_shards: List[Shard], + sharded_tensor_metadata: ShardedTensorMetadata, + process_group=None, + init_rrefs=False, + sharding_spec=None, + ) -> ShardedTensor: + """ + Initialize a ShardedTensor with local shards and a global + ShardedTensorMetadata built on each rank. + + Warning: This API is experimental and subject to change. It does + not do cross rank validations, and fully rely on the user + for the correctness of sharded_tensor_metadata on each rank + """ + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + + shards_metadata = sharded_tensor_metadata.shards_metadata + + local_shard_metadatas = [] + + # collect local shard metadatas from the global sharded_tensor_metadata + for shard_metadata in shards_metadata: # type: ignore[attr-defined] + rank, local_device = _parse_and_validate_remote_device(process_group, shard_metadata.placement) + + if current_rank == rank: + local_shard_metadatas.append(shard_metadata) + + if len(local_shards) != len(local_shard_metadatas): + raise RuntimeError( + f'Number of local shards ({len(local_shards)}) does not match number of local ' + f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' + f'on rank ({current_rank}) ' + ) + + shards_metadata = sharded_tensor_metadata.shards_metadata + tensor_properties = sharded_tensor_metadata.tensor_properties + + if len(shards_metadata) == 0: + raise ValueError("shards_metadata must not be empty!") + + if tensor_properties.layout != torch.strided: + raise ValueError("Only torch.strided layout is currently supported") + + if sharding_spec is None: + spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata) + else: + spec = sharding_spec + + sharded_tensor = ShardedTensor.__new__( + ShardedTensor, + spec, + sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad, + ) + + def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): + tensor_property_or_metadata = ( + "tensor property" if is_property else "local ShardMetadata" + ) + if expected != actual: + raise ValueError( + f"Local shards' tensor {prop_name} property is incompatible with " + f"{tensor_property_or_metadata} on rank {rank}: " + f"{tensor_property_or_metadata} {prop_name}={expected}, " + f"local shard tensor {prop_name}={actual}." + ) + + for shard in local_shards: + shard_meta = shard.metadata + local_shard_tensor = shard.tensor + placement = shard_meta.placement + assert placement is not None, "Must specify placement for `Shard`!" + rank = placement.rank() + local_device = placement.device() + + _raise_if_mismatch( + tensor_properties.layout, + local_shard_tensor.layout, + "layout", + rank, + True, + ) + if not local_shard_tensor.is_contiguous(): + raise ValueError( + "Only torch.contiguous_format memory_format is currently supported" + ) + + _raise_if_mismatch( + shard_meta.shard_sizes, + list(local_shard_tensor.size()), + "size", + rank, + ) + _raise_if_mismatch( + tensor_properties.pin_memory, + local_shard_tensor.is_pinned(), + "pin_memory", + rank, + True, + ) + _raise_if_mismatch(local_device, local_shard_tensor.device, "device", rank) + _raise_if_mismatch( + tensor_properties.dtype, + local_shard_tensor.dtype, + "dtype", + rank, + True, + ) + _raise_if_mismatch( + tensor_properties.requires_grad, + local_shard_tensor.requires_grad, + "requires_grad", + rank, + True, + ) + + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(shards_metadata) + + # check if the shards_metadata is compatible with overall size of the sharded tensor. + check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) + + # done validation, add local_shards + sharded_tensor._local_shards = local_shards + sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + # run post initialization, i.e. map registration, rpc initialization + sharded_tensor._post_init() + return sharded_tensor + + def sharding_spec(self) -> shard_spec.ShardingSpec: + """ + Returns the ShardingSpec for the tensor. + """ + return self._sharding_spec + + def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor: + """ + Reshard a sharded tensor given the ``resharding_spec``. For now, we only support + single local shard. + + If ``resharding_spec`` is same as the original one, this becomes a no-op. + If only ``resharding_spec`` shares the same sharding dim with the original one, + we swap local shards directly. + For more generic cases, we merge different shards across different ranks and split + the local shards based on the ``resharding_spec`` via `all_to_all` collective API. + + Args: + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded. + + Returns: + A :class:`ShardedTensor` object whose local shards are resharded. + + Examples: + >>> # xdoctest: +SKIP + >>> # We have 2 process groups, 2 ranks. + >>> tensor = torch.arange(4, dtype=torch.int64) + 1 + 2 * rank + >>> tensor = torch.stack([tensor, tensor]) + >>> tensor + tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) # Rank 0 + tensor([[3, 4, 5, 6], [3, 4, 5, 6]]) # Rank 1 + tensor([[5, 6, 7, 8], [5, 6, 7, 8]]) # Rank 2 + tensor([[7, 8, 9, 10], [7, 8, 9, 10]]) # Rank 3 + >>> sharding_dim = 0 + >>> spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ) + >>> current_offsets = [0] * 2 + >>> current_offsets[0] = rank * 2 + >>> shard_metadata = ShardMetadata( + shard_offsets=copy.deepcopy(current_offsets), + shard_sizes=tensor.size(), + placement=spec.placements[rank], + ) + >>> local_shards = [ + Shard( + tensor=tensor, + metadata=shard_metadata, + ) + ] + >>> st = ShardedTensor._init_from_local_shards(local_shards, tensor.size()) + >>> sharding_dim = 1 + >>> resharding_spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ) + >>> st.reshard(resharding_spec) + >>> tensor = st.local_shards()[0].tensor + >>> tensor + tensor([[1], [1], [3], [3], [5], [5], [7], [7]]) # Rank 0 + tensor([[2], [2], [4], [4], [6], [6], [8], [8]]) # Rank 1 + tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2 + tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3 + """ + warnings.warn(DEPRECATE_MSG) + + if ( + not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or + not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec) + ): + raise NotImplementedError("Only ChunkShardingSpec supported for reshard.") + if (len(self.local_shards()) != 1): + raise NotImplementedError("Only single local shard supported for reshard.") + + if self._sharding_spec.dim == resharding_spec.dim: # type: ignore[attr-defined] + if self._sharding_spec.placements == resharding_spec.placements: # type: ignore[attr-defined] + return self + else: + local_shards, shards_metadata = reshuffle_local_shard( + self.local_tensor(), + self.size(), # type: ignore[arg-type] + self._sharding_spec, + resharding_spec, + self._process_group, + ) + else: + local_shards, shards_metadata = reshard_local_shard( + self.local_tensor(), + self.size(), # type: ignore[arg-type] + self._sharding_spec, + resharding_spec, + self._process_group, + ) + self._local_shards = local_shards + self._metadata.shards_metadata = shards_metadata + self._sharding_spec = resharding_spec + return self + + def local_tensor(self) -> torch.Tensor: + """ + Return local tensor for a sharded_tensor. For now we only support single local shard. + + Returns: + A :class:`torch.Tensor` of the local shard. + """ + if len(self.local_shards()) != 1: + raise NotImplementedError("Only single local shard is supported.") + return self.local_shards()[0].tensor + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + def dispatch(st: ShardedTensor, func: Callable): + # Dispatch to custom user provided op first if it exists. + if func in _CUSTOM_SHARDED_OPS: + return _CUSTOM_SHARDED_OPS[func](types, args, kwargs, st._process_group) + + # Dispatch to custom sharding spec op if it has one. + if _has_custom_op(st._sharding_spec, func): + return _dispatch_custom_op( + st._sharding_spec, + func, + types, + args, + kwargs, + st._process_group + ) + + if func in _SHARDED_OPS: + return _SHARDED_OPS[func](types, args, kwargs, st._process_group) + + raise RuntimeError( + f"torch function '{func.__name__}', with args: {args} and " + f"kwargs: {kwargs} not supported for ShardedTensor!") + + warnings.warn(DEPRECATE_MSG) + # Find ShardedTensor instance to get process_group and sharding_spec. + st_instance = None + + def find_sharded_tensor(e): + nonlocal st_instance + if st_instance is None and isinstance(e, ShardedTensor): + st_instance = e + + pytree.tree_map_(find_sharded_tensor, args) + pytree.tree_map_(find_sharded_tensor, kwargs) + + if st_instance is not None: + return dispatch(st_instance, func) + + raise RuntimeError( + f"torch function '{func.__name__}', with args: {args} and " + f"kwargs: {kwargs} not supported for ShardedTensor!") + + def is_pinned(self) -> bool: # type: ignore[override] + """ + Returns True if the sharded tensor (each local shard) resides in pinned memory. + """ + return self._metadata.tensor_properties.pin_memory + + def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int): + self._remote_shards[rpc_rank] = remote_shards + + def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]: + """ + Returns a Dict[int, RRef] with keys being the RPC rank and values + being RRefs to shards on that rank. Need to initialize the + RPC framework for this functionality. + + Raises an exception if ShardedTensor was created with ``init_rrefs=False`` + """ + if not self._init_rrefs: + raise RuntimeError( + 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available' + ) + return self._remote_shards + + def __hash__(self): + return id(self) + + def __repr__(self): + return f'ShardedTensor({self._metadata})' + + @dataclass + class ProcessGroupState: + """ + State for ser-de of process group + """ + local_rank: int + global_rank: int + local_world_size: int + global_world_size: int + + def __getstate__(self): + pg_state = ShardedTensor.ProcessGroupState( + distributed_c10d.get_rank(self._process_group), + distributed_c10d.get_rank(), + distributed_c10d.get_world_size(self._process_group), + distributed_c10d.get_world_size(), + ) + + return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs + + def __setstate__(self, state): + self._sharded_tensor_id = None + if not distributed_c10d.is_initialized(): + raise RuntimeError( + 'Need to initialize default process group using ' + '"init_process_group" before loading ShardedTensor') + + self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state + + # Setup process group + from torch.distributed._shard.api import _get_current_process_group + self._process_group = _get_current_process_group() + + # Validate process group. + local_rank = distributed_c10d.get_rank(self._process_group) + if pg_state.local_rank != local_rank: + raise RuntimeError( + f'Local rank at save time was {pg_state.local_rank}, but at ' + f'load time was {local_rank}') + + global_rank = distributed_c10d.get_rank() + if pg_state.global_rank != global_rank: + raise RuntimeError( + f'Global rank at save time was {pg_state.global_rank}, but at ' + f'load time was {global_rank}') + + local_world_size = distributed_c10d.get_world_size(self._process_group) + if pg_state.local_world_size != local_world_size: + raise RuntimeError( + f'Local world size at save time was {pg_state.local_world_size}, ' + f'but at load time was {local_world_size}') + + global_world_size = distributed_c10d.get_world_size() + if pg_state.global_world_size != global_world_size: + raise RuntimeError( + f'Global world size at save time was {pg_state.global_world_size}, ' + f'but at load time was {global_world_size}') + + self._post_init() + + +def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties): + """ Helper to construct tensor from size, device and common params. """ + dtype = tensor_properties.dtype + layout = tensor_properties.layout + requires_grad = tensor_properties.requires_grad + memory_format = tensor_properties.memory_format + pin_memory = tensor_properties.pin_memory + + return torch.empty( + *size, dtype=dtype, layout=layout, + device=local_device, requires_grad=requires_grad, + memory_format=memory_format, pin_memory=pin_memory + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..87cb74fbd01d20dd41fe475184f25929b9ab8833 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import List, Tuple + +from torch.distributed._shard.sharded_tensor.logging_handlers import ( + _log_handlers, +) + +__all__: List[str] = [] + + +def _get_or_create_logger() -> logging.Logger: + logging_handler, log_handler_name = _get_logging_handler() + logger = logging.getLogger(f"sharding-spec-{log_handler_name}") + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + logging_handler.setFormatter(formatter) + logger.propagate = False + logger.addHandler(logging_handler) + return logger + + +def _get_logging_handler( + destination: str = "default", +) -> Tuple[logging.Handler, str]: + log_handler = _log_handlers[destination] + log_handler_name = type(log_handler).__name__ + return (log_handler, log_handler_name) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c607fe45da7713dc52ca01ce70abb53cdebb42f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import Dict, List + +__all__: List[str] = [] + +_log_handlers: Dict[str, logging.Handler] = { + "default": logging.NullHandler(), +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..cb112da5686b5b92809be393d035cc674eaddd86 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py @@ -0,0 +1,82 @@ +from dataclasses import dataclass, field +from enum import Enum +from typing import List + +import torch +from torch.distributed._shard.metadata import ShardMetadata + +class MEM_FORMAT_ENCODING(Enum): + TORCH_CONTIGUOUS_FORMAT = 0 + TORCH_CHANNELS_LAST = 1 + TORCH_PRESERVE_FORMAT = 2 + +@dataclass +class TensorProperties: + """ Properties used to create :class:`Tensor` """ + + # Regular tensor fields + dtype: torch.dtype = field(default=torch.get_default_dtype()) + layout: torch.layout = field(default=torch.strided) + requires_grad: bool = False + memory_format: torch.memory_format = field(default=torch.contiguous_format) + pin_memory: bool = False + + def __getstate__(self): + # Since torch.memory_format cannot be pickled! + memory_format = self.memory_format + if memory_format == torch.contiguous_format: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT + elif memory_format == torch.channels_last: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST + elif memory_format == torch.preserve_format: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT + else: + raise RuntimeError(f'Invalid torch.memory_format: {memory_format}') + + return ( + self.dtype, + self.layout, + self.requires_grad, + mem_format_encoding, + self.pin_memory, + ) + + def __setstate__( + self, + state, + ): + (self.dtype, self.layout, self.requires_grad, mem_format_encoding, self.pin_memory) = state + + if mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT: + memory_format = torch.contiguous_format + elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST: + memory_format = torch.channels_last + elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT: + memory_format = torch.preserve_format + else: + raise RuntimeError(f'Invalid torch.memory_format encoding: {mem_format_encoding}') + + self.memory_format = memory_format + + @staticmethod + def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties": + return TensorProperties( + dtype=tensor.dtype, + layout=tensor.layout, + requires_grad=tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=tensor.is_pinned() + ) +@dataclass +class ShardedTensorMetadata: + """ + Represents metadata for :class:`ShardedTensor` + """ + + # Metadata about each shard of the Tensor + shards_metadata: List[ShardMetadata] = field(default_factory=list) + + # Size of each dim of the overall Tensor. + size: torch.Size = field(default=torch.Size([])) + + tensor_properties: TensorProperties = field(default_factory=TensorProperties) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py new file mode 100644 index 0000000000000000000000000000000000000000..de7a44bb820090e34bbbf7e80788e93948c801ec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py @@ -0,0 +1,248 @@ +import copy +from typing import List, Tuple + +import torch +import torch.distributed as dist +from torch._C._distributed_c10d import ( + ProcessGroup, +) +import torch.distributed._shard.sharding_spec as shard_spec +from torch.distributed._shard.sharding_spec._internals import ( + get_split_size, + get_chunked_dim_size, +) +from torch.distributed.nn.functional import ( + all_to_all, + all_to_all_single, +) +from torch.distributed._shard.metadata import ShardMetadata + +from .shard import Shard + + +def get_idx_from_placements(placements, current_rank) -> int: + """ + Return the position of the current rank in the given placements. + + Args: + placements(List[Union[_remote_device, str]]): + Specifies the placement of each shard of the Tensor. The size of + the list represents the number of shards to be created. This could + be a list of + :class:`torch.distributed._remote_device`'s. This list + could also contain a string which represents remote + device as accepted by + :class:`torch.distributed._remote_device` + current_rank (int): number of current device. + + Returns: + A int which contains the position of current device in the placement list. + """ + for idx, placement in enumerate(placements): # type: ignore[attr-defined] + if current_rank == placement.rank(): # type: ignore[union-attr] + return idx + raise RuntimeError('current_rank not in the placement.') + + +def build_reshard_metadata( + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + world_size: int, +) -> Tuple[List[ShardMetadata], List[int]]: + """ + Based the given sharding spec, we calculate the offset and local shard size. + We then build a ShardMetadata on top of the calculation result. + + Args: + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded. + world_size (int): number of ranks. + + Returns: + A Tuple of the followings: + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + A List[int] which contains the ranks in the order of placement. + """ + shard_dim = int(sharding_spec.dim) # type: ignore[attr-defined] + shards_metadata = [None] * world_size + ranks = [] + offsets = [0] * len(st_size) + split_size = get_split_size(st_size[shard_dim], world_size) + for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined] + ranks.append(placement.rank()) + sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx) + local_tensor_size = list(st_size) + local_tensor_size[shard_dim] = sharded_dim_size + shards_metadata[placement.rank()] = ShardMetadata( # type: ignore[call-overload] + shard_offsets=copy.deepcopy(offsets), + shard_sizes=local_tensor_size, + placement=placement, + ) + offsets[shard_dim] += sharded_dim_size + return shards_metadata, ranks # type: ignore[return-value] + + +def reshuffle_local_shard( + local_shard: torch.Tensor, + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + resharding_spec: shard_spec.ShardingSpec, + pg: ProcessGroup, +) -> Tuple[List[Shard], List[ShardMetadata]]: + """ + Reshuffle the local shard directly when the reshard dim is same as the original + sharding dim. Logically we do this in two step: + 1. To collect all shards based on original sharding spec. + 2. Reshard the tensor based on the given resharding spec. + + In reality, we consolidate the two steps into one by sending the local tensor to + the new shard directly based on the resharding spec. + + Args: + local_shard (Tensor): Local tensor stored in the current rank. + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded originally. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor will be resharded. + pg (ProcessGroup): The process group to aggregate on. + + Returns: + A Tuple of the followings: + A List[`Shard`] which contains the local tensor and its metadata. + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + """ + current_rank = dist.get_rank(pg) + world_size = dist.get_world_size(pg) + # Build shards_metadata first. + shards_metadata, ranks = build_reshard_metadata( + st_size, resharding_spec, world_size + ) + # Get input split size for all2all. + reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined] + split_size = get_split_size(st_size[reshard_dim], world_size) + input_split_sizes = [0] * world_size + idx = get_idx_from_placements(sharding_spec.placements, current_rank) # type: ignore[attr-defined] + new_rank = resharding_spec.placements[idx].rank() # type: ignore[union-attr, attr-defined] + input_split_sizes[new_rank] = local_shard.size(reshard_dim) + # Get output split size for all2all. + output_split_sizes = [0] * world_size + new_idx = ranks.index(current_rank) + sharded_dim_size = get_chunked_dim_size(st_size[reshard_dim], split_size, new_idx) + output_split_sizes[new_rank] = sharded_dim_size + # Get gathered_input for all2all. + local_shard = local_shard.transpose(0, reshard_dim).contiguous() + gathered_input_size = list(local_shard.size()) + gathered_input_size[0] = sharded_dim_size + gathered_input = torch.empty(gathered_input_size, device=local_shard.device, dtype=local_shard.dtype) + # all2all. + local_shard = all_to_all_single( + gathered_input, + local_shard, + input_split_sizes=input_split_sizes, + output_split_sizes=output_split_sizes, + group=pg, + ) + local_tensor = local_shard.transpose(0, reshard_dim).contiguous() + local_shards = [Shard(local_tensor, shards_metadata[current_rank])] + return local_shards, shards_metadata + + +def reshard_local_shard( + local_tensor: torch.Tensor, + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + resharding_spec: shard_spec.ShardingSpec, + pg: ProcessGroup, +) -> Tuple[List[Shard], List[ShardMetadata]]: + """ + Reshard a sharded tensor given the ``resharding_spec``. When the reshard dim is + different from the original sharding dim, we need to do two steps logically: + 1. To collect all shards based on original sharding spec. + 2. Reshard the tensor based on the given resharding spec. + + In reality, we consolidate the two steps into one by sending each rank the new + shard based on the resharding spec. + + Args: + local_tensor (Tensor): Local tensor stored in the current rank. + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded originally. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor will be resharded. + pg (ProcessGroup): The process group to aggregate on. + + Returns: + A Tuple of the followings: + A List[`Shard`] which contains the local tensor and its metadata. + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + """ + current_rank = dist.get_rank(pg) + world_size = dist.get_world_size(pg) + current_sharding_dim = int(sharding_spec.dim) # type: ignore[attr-defined] + reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined] + + # Build shards_metadata first. + shards_metadata, ranks = build_reshard_metadata( + st_size, resharding_spec, world_size + ) + + # Compute expected size + input_split_sizes = [] + for metadata in shards_metadata: + input_split_sizes.append(metadata.shard_sizes[reshard_dim]) + rearrange_input = any(ranks[i] > ranks[i + 1] for i in range(len(ranks) - 1)) + + if rearrange_input: + # Need to re-arrange reshard_dim of local_tensor before all2all. + indices: List[int] = [] + for metadata in shards_metadata: + offset_start_idx = metadata.shard_offsets[reshard_dim] + split_size = metadata.shard_sizes[reshard_dim] + indices += range(offset_start_idx, offset_start_idx + split_size) + local_tensor = local_tensor.index_select( + reshard_dim, torch.tensor(indices, device=local_tensor.device) + ) + + # Because reshard_dim != original shard_dim. We need to compute the + # size of tensor from each rank. + output_tensor_list = [torch.tensor(1)] * world_size + split_size = get_split_size(st_size[current_sharding_dim], world_size) + rearrange_output_list = False + indices = [] + for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined] + sharded_dim_size = get_chunked_dim_size( + st_size[current_sharding_dim], split_size, idx + ) + output_tensor_size = list(st_size) + output_tensor_size[current_sharding_dim] = sharded_dim_size + output_tensor_size[reshard_dim] = input_split_sizes[current_rank] + output_tensor_list[ + placement.rank() + ] = torch.empty( # type: ignore[union-attr, index] + output_tensor_size, device=local_tensor.device, dtype=local_tensor.dtype + ) + indices.append(placement.rank()) # type: ignore[union-attr, index, arg-type] + if idx != placement.rank(): # type: ignore[union-attr] + rearrange_output_list = True + + # Perform autograd enabled all2all. + input_tensor_tuple = torch.split(local_tensor, input_split_sizes, dim=reshard_dim) + input_tensor_list = [tensor.contiguous() for tensor in input_tensor_tuple] + output_tensor_list = all_to_all( + output_tensor_list, + input_tensor_list, + group=pg, + ) + + if rearrange_output_list: + # Need to re-arrange original shard_dim of output_tensor_list. + output_tensor_list = [output_tensor_list[idx] for idx in indices] # type: ignore[call-overload] + local_tensor = torch.cat(output_tensor_list, dim=current_sharding_dim) + local_shards = [Shard(local_tensor, shards_metadata[current_rank])] + return local_shards, shards_metadata diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py new file mode 100644 index 0000000000000000000000000000000000000000..d448cc6321b1015498815bb2c243be60896bcbf4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py @@ -0,0 +1,58 @@ +from dataclasses import dataclass +from typing import List + +import torch +from torch.distributed._shard.metadata import ShardMetadata +from torch.distributed.remote_device import _remote_device + + +@dataclass +class Shard: + """ + Container which holds the data for a shard as a Tensor and also + the associated metadata for that shard. + + Args: + tensor(torch.Tensor): Local tensor for the shard. + metadata(:class `torch.distributed._shard.sharded_tensor.ShardMetadata`): + The metadata for the shard, including offsets, lengths and device placement. + """ + __slots__ = ['tensor', 'metadata'] + tensor: torch.Tensor + metadata: ShardMetadata + + def __post_init__(self): + # verification between local tensor and metadata + if list(self.tensor.size()) != self.metadata.shard_sizes: + raise ValueError( + "Shard tensor size does not match with metadata.shard_lengths! " + f"Found shard tensor size: {list(self.tensor.size())}, " + f"metadata.shard_lengths: {self.metadata.shard_sizes}, " + ) + placement_device = self.metadata.placement + if placement_device is not None and placement_device.device() != self.tensor.device: + raise ValueError( + f"Local shard tensor device does not match with local Shard's placement! " + f"Found local shard tensor device: {self.tensor.device}, " + f"local shard metadata placement device: {placement_device.device()}" + ) + + @classmethod + def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int): + """ + Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank. + + Args: + tensor(torch.Tensor): Local tensor for the shard. + shard_offsets(List[int]): List of integers specify the offset + of the shard on each dimension. + rank(int): Specify the rank for the shard. + """ + shard_sizes = list(tensor.size()) + placement = _remote_device(f"rank:{rank}/{str(tensor.device)}") + shard_meta = ShardMetadata( + shard_offsets=shard_offsets, + shard_sizes=shard_sizes, + placement=placement + ) + return Shard(tensor, shard_meta) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca0ee0eba743284186317febadfafec6c7a9a44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py @@ -0,0 +1,211 @@ +import collections.abc +import copy +from typing import Optional, List, Sequence + +import torch +from torch.distributed import distributed_c10d +from torch.distributed import rpc +from torch.distributed._shard.sharding_spec._internals import ( + check_tensor, + validate_non_overlapping_shards_metadata, +) + +from torch.distributed._shard.metadata import ShardMetadata +from .metadata import TensorProperties, ShardedTensorMetadata +from .shard import Shard + +def _parse_and_validate_remote_device(pg, remote_device): + if remote_device is None: + raise ValueError("remote device is None") + + worker_name = remote_device.worker_name() + rank = remote_device.rank() + device = remote_device.device() + + # Validate rank, skip validation if rank is not part of process group. + if not distributed_c10d._rank_not_in_group(pg): + if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)): + raise ValueError(f'Invalid rank: {rank}') + + if worker_name is not None: + if not rpc._is_current_rpc_agent_set(): + raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}') + + workers = rpc._get_current_rpc_agent().get_worker_infos() + for worker in workers: + if worker.name == worker_name: + return worker.id, device + + raise ValueError(f'Invalid worker name: {worker_name}') + + return rank, device + +def _validate_output_tensor_for_gather( + my_rank: int, + dst_rank: int, + size: torch.Size, + dst_tensor: Optional[torch.Tensor], +) -> None: + if dst_rank == my_rank: + if dst_tensor is None: + raise ValueError( + f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}" + ) + if tuple(size) != (dst_tensor.size()): + raise ValueError( + f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())}," + f"but should be {tuple(size)}" + ) + elif dst_tensor: + raise ValueError( + "Argument ``dst_tensor`` must NOT be specified " + "on non-destination ranks." + ) + +def _flatten_tensor_size(size) -> torch.Size: + """ + Checks if tensor size is valid, then flatten/return a torch.Size object. + """ + if len(size) == 1 and isinstance(size[0], collections.abc.Sequence): + dims = list(*size) + else: + dims = list(size) + + for dim in dims: + if not isinstance(dim, int): + raise TypeError(f'size has to be a sequence of ints, found: {dims}') + + return torch.Size(dims) + +def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True): + if is_local: + assert isinstance(ranks, int) + if expected != actual: + raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! " + f"Found one local shard tensor {prop_name}={expected}, " + f"the other local shard tensor {prop_name}={actual}.") + else: + # compare failure check across ranks, ranks list should have two rank + assert len(ranks) == 2 + if expected != actual: + raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! " + f"Found {prop_name}={expected} on rank:{ranks[0]}, " + f"and {prop_name}={actual} on rank:{ranks[1]}.") + + +def build_metadata_from_local_shards( + local_shards: List[Shard], + global_size: torch.Size, + current_rank: int, + pg: distributed_c10d.ProcessGroup +) -> ShardedTensorMetadata: + + assert len(local_shards) > 0, "must have local shards!" + local_shard_metadatas: List[ShardMetadata] = [] + + first_shard_dtype = local_shards[0].tensor.dtype + first_shard_layout = local_shards[0].tensor.layout + first_shard_requires_grad = local_shards[0].tensor.requires_grad + first_shard_is_pinned = local_shards[0].tensor.is_pinned() + + # 1). Validate local tensors and associated metadatas + for local_shard in local_shards: + local_shard_tensor = local_shard.tensor + local_shard_meta = local_shard.metadata + local_shard_metadatas.append(local_shard_meta) + rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement) + + if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout: + raise ValueError( + f'Only torch.strided layout is currently supported, but found ' + f'{local_shard_tensor.layout} on rank:{current_rank}!' + ) + + if not local_shard_tensor.is_contiguous(): + raise ValueError('Only torch.contiguous_format memory_format is currently supported!') + + if rank != current_rank: + raise ValueError( + f"Local shard metadata's rank does not match with the rank in its process group! " + f'Found current rank in the process group: {current_rank}, ' + f"local ShardMetadata placement's rank: {rank}" + ) + if local_shard_tensor.device != local_device: + raise ValueError( + f"Local shard tensor device does not match with local Shard's placement! " + f"Found local shard tensor device: {local_shard_tensor.device}, " + f"local shard metadata placement device: {local_device}" + ) + + _raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) + _raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank) + _raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank) + _raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank) + + # 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then + # do all_gather to collect local_sharded_tensor_metadata from all ranks + local_tensor_properties = TensorProperties( + dtype=first_shard_dtype, + layout=first_shard_layout, + requires_grad=first_shard_requires_grad, + memory_format=torch.contiguous_format, + pin_memory=first_shard_is_pinned + ) + + local_sharded_tensor_metadata = ShardedTensorMetadata( + shards_metadata=local_shard_metadatas, + size=global_size, + tensor_properties=local_tensor_properties) + + return local_sharded_tensor_metadata + + +def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]): + global_sharded_tensor_metadata = None + global_metadata_rank = 0 + + for rank, rank_metadata in enumerate(gathered_metadatas): + if rank_metadata is None: + continue + + if global_sharded_tensor_metadata is None: + global_sharded_tensor_metadata = copy.deepcopy(rank_metadata) + global_metadata_rank = rank + else: + _raise_if_mismatch(global_sharded_tensor_metadata.size, + rank_metadata.size, + "global_size", + [global_metadata_rank, rank], + is_local=False) + + # don't need to check layout and memory format as we already checked in local shards validation stage + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype, + rank_metadata.tensor_properties.dtype, + "dtype", + [global_metadata_rank, rank], + is_local=False) + + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad, + rank_metadata.tensor_properties.requires_grad, + "requires_grad", + [global_metadata_rank, rank], + is_local=False) + + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory, + rank_metadata.tensor_properties.pin_memory, + "pin_memory", + [global_metadata_rank, rank], + is_local=False) + # pass all validations, extend shards metadata + global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata) + + if global_sharded_tensor_metadata is not None: + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata) + + # check if the shards_metadata is compatible with global size of the sharded tensor. + check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size) + else: + raise ValueError("ShardedTensor have no local shards on all ranks!") + + return global_sharded_tensor_metadata diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3b3596d1beea8b50c9c81db74d91ffcb42c16b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py @@ -0,0 +1,27 @@ +import abc +import torch.nn as nn + +class Sharder(abc.ABC): + """ + This is an interface which allows user to create more advanced + sharding strategies that are not easily be composed by the + `ShardingSpec`. + + :class:`torch.distributed._shard.sharding_plan.ShardingPlan` could + take an object of the `Sharder` and call `shard` to shard the module, + then replace the original module with sharded module returned. + """ + @abc.abstractmethod + def shard(self, module: nn.Module) -> nn.Module: + """ + Shard a module base on the implementation of this method, and + return the sharded version of the module. + + Args: + module (:class:`torch.nn.Module`): + The module to apply sharding to. + Returns: + A :class:`torch.nn.Module` object that represents a module + that's already been sharded. + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..269dfd8af76052f20852a2492adbc7adaf83c040 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py @@ -0,0 +1,4 @@ +from .api import ( + ShardingPlan, + ShardingPlanner +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b96de19c32750504591a734e318b96057edd34f8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..970f76f47f586febbf9726cc7b19a2207878313a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py new file mode 100644 index 0000000000000000000000000000000000000000..fa92bf70788876216f6b6c139f6c4389bd42747a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py @@ -0,0 +1,86 @@ +import abc +import torch.nn as nn + +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +from torch.distributed._shard.sharder import Sharder +from torch.distributed._shard.sharding_spec import ShardingSpec + +@dataclass +class ShardingPlan: + """ + Representation of a sharding plan, describes how to shard a module + across hosts. `plan` is used to shard module parameters according to the spec provided, + `output_plan` and `return_local_tensor` are optional, they are used to specify the output + layout of a module with a spec, and when to convert back to data parallel fashion. + + Args: + plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`, + :class:`torch.distributed._shard.sharder.Sharder`]): + a dict describes how to shard a module, there're currently two ways to shard a module: + 1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of + a parameter to a `ShardingSpec`. + 2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module + to a `Sharder` object. + output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional): + a dict specifies the layout of a module's output which produces a ShardedTensor, + keyed by the name of module to ShardingSpec("" in key means the root module). + Default: `None` + return_local_tensor (List[str], optional): a list of string, each element enables + a module's sharded output to be returned as a Tensor from its local shards to + ensure further processing in a data parallel fashion. ("" in list means the + root module). + Default: None + Example: + Suppose we want to shard a module with two linear layers and then run it with DDP, we also + want to convert the output of the second linear layer back to DDP, we can do it as follows: + + >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d) + >>> class MyModule(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.fc1 = nn.Linear() + >>> self.gelu = nn.GELU() + >>> self.fc2 = nn.Linear() + >>> self.relu = nn.Linear() + >>> + >>> def forward(self, input): + >>> return self.relu(self.fc2(self.gelu(self.fc1(input)))) + + + >>> # xdoctest: +SKIP("Undefined spec1, spec2) + >>> sharding_plan = ShardingPlan( + >>> plan={ + >>> "fc1.weight": spec1, + >>> "fc2.weight": spec2 + >>> }, + >>> output_plan={ + >>> "fc2": output_spec + >>> }, + >>> return_local_tensor=["fc2"] + >>> ) + """ + plan: Dict[str, Union[ShardingSpec, Sharder]] + output_plan: Optional[Dict[str, ShardingSpec]] = None + return_local_tensor: Optional[List[str]] = None + + +class ShardingPlanner(abc.ABC): + """ + Default ShardingPlanner interface, can be extended and + implement advanced sharding strategies. + """ + @abc.abstractmethod + def build_plan(self, module: nn.Module) -> ShardingPlan: + """ + Given a nn.Module, define how to shard the module across + ranks, return a ShardingPlan + Args: + module (:class:`torch.nn.Module`): + The module to apply sharding to. + Returns: + A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that + represents how to shard the module. + """ + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e5e628b0522b58ea4fb12554ff3556c40b7a2c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__init__.py @@ -0,0 +1,342 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Optional, Sequence + +# Import all builtin dist tensor ops +import torch +import torch.distributed._tensor.ops +import torch.distributed._tensor.random as random +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.api import distribute_module, distribute_tensor, DTensor +from torch.distributed._tensor.ops.utils import normalize_to_torch_size +from torch.distributed._tensor.placement_types import Placement, Replicate, Shard +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh + +# All public APIs from dtensor package +__all__ = [ + "DTensor", + "DeviceMesh", + "distribute_tensor", + "distribute_module", + "init_device_mesh,", + "Shard", + "Replicate", +] + + +def _dtensor_init_helper( + init_op, + size: torch.Size, + device_mesh=None, + placements=None, + **kwargs, +) -> DTensor: + # if device_mesh is None, use the one from mesh resources + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + kwargs["device"] = device_mesh.device_type + + # set default placements to replicated if not specified + placements = placements or tuple(Replicate() for _ in range(device_mesh.ndim)) + + # check device_mesh againts placements + assert device_mesh.ndim == len( + placements + ), "mesh dimension does not match the length of placements" + + assert kwargs["layout"] == torch.strided, "layout value not supported!" + torch_stride = torch._prims_common.make_contiguous_strides_for(size) + + # get local tensor shape + local_shape = compute_local_shape(size, device_mesh, placements) + # initialize the local tensor + if init_op == torch.full: + fill_value = kwargs.pop("fill_value", 0) + local_tensor = init_op(local_shape, fill_value, **kwargs) + elif init_op == torch.rand or init_op == torch.randn: + # this tensor meta is not used except `shape` + dtype = kwargs.get("dtype", torch.get_default_dtype()) + + from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + + tensor_meta = TensorMeta(size, (0,), dtype) + spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta) + + if random.is_rng_supported_mesh(device_mesh) and not random._rng_tracker: + random._rng_tracker = random.OffsetBasedRNGTracker() + + assert random._rng_tracker is not None + with random._rng_tracker._distribute_region(spec): + local_tensor = init_op(local_shape, **kwargs) + else: + local_tensor = init_op(local_shape, **kwargs) + + return DTensor( + local_tensor=local_tensor, + device_mesh=device_mesh, + placements=tuple(placements), + shape=size, + dtype=local_tensor.dtype, + stride=torch_stride, + requires_grad=kwargs["requires_grad"], + ) + + +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with the scalar value 1, with the shape defined + by the variable argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.ones, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def empty( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with uninitialized data. The shape of the :class:`DTensor` + is defined by the variable argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: empty(1,2,3..) or empty([1,2,3..]) or empty((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).\ + layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.empty, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def full( + size, + fill_value, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with ``fill_value``. The scalar value type should match + ``device_mesh.device_type``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + fill_value(Scalar): the value to fill the output tensor with. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.full, + torch_size, + fill_value=fill_value, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def rand( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with random numbers from a uniform distribution + on the interval ``[0, 1)``. The shape of the tensor is defined by the variable + argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.rand, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def randn( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with random numbers from a normal distribution + with mean 0 and variance 1. The shape of the tensor is defined by the variable + argument ``size``. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: ones(1,2,3..) or ones([1,2,3..]) or ones((1,2,3..)) + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned DTensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks. + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.randn, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) + + +def zeros( + *size, + requires_grad: bool = False, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Returns a :class:`DTensor` filled with the scalar value 0. + + Args: + size (int...): a sequence of integers defining the shape of the output :class:`DTensor`. + Can be a variable number of arguments or a collection like a list or tuple. + E.g.: zeros(1,2,3..) or zeros([1,2,3..]) or zeros((1,2,3..)) + Keyword args: + requires_grad (bool, optional): If autograd should record operations on the + returned :class:`DTensor`. Default: ``False``. + dtype (:class:`torch.dtype`, optional): the desired data type of returned :class:`DTensor`. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned :class:`DTensor`. + Default: ``torch.strided``. + device_mesh: :class:`DeviceMesh` type, contains the mesh info of ranks + placements: a sequence of :class:`Placement` type: ``Shard``, ``Replicate`` + + Returns: + A :class:`DTensor` object on each rank + """ + torch_size = normalize_to_torch_size(size) + + return _dtensor_init_helper( + torch.zeros, + torch_size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + device_mesh=device_mesh, + placements=placements, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a5609feabcb068951c2518901078db238a09864 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da5044e1a31df752905499ba3b3c7223b31d0e4d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_collective_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf02a31ac5a56a1f6450528e3136a7da697cdd1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd0c2b626ffdf32b884b1c47cb0f63bb9d63bc7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/device_mesh.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7c01f8c643a3c60753be3d6f7f21396b3445395 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/dispatch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..428d7779694b4f0a03f3a050eea3dc5482287ffc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/op_schema.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6be51962c5503491ca47b06e0df378488c90a915 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/placement_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325cf805d47e85c322c3cd33e12d0a7bdfceeca9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/random.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b8651ce6525d3c1294c60dbfa163669339954ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/redistribute.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb481247c097982189a8d0ba79e2a5189f91410c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/__pycache__/sharding_prop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf8376bd232811dd8d135884c9369dfc6d1ac89 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/_collective_utils.py @@ -0,0 +1,313 @@ +import logging +import math +from dataclasses import dataclass +from functools import lru_cache + +from typing import List, Optional + +import torch +import torch.distributed._tensor.placement_types as placement_types +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh +from torch.distributed.distributed_c10d import ( + all_to_all, + broadcast, + get_global_rank, + get_rank, + get_world_size, + GroupMember, + ProcessGroup, + scatter, + Work, +) + +logger = logging.getLogger(__name__) + + +# TODO: we need to migrate these APIs to be functional collectives + + +def mesh_scatter( + output: torch.Tensor, + scatter_list: List[torch.Tensor], + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + """ + scatter a list of tensors to a device mesh dimension. We by default + use the first rank of the mesh dimension as the source of truth, i.e + for a 2d mesh [[0, 1], [2, 3]], if we scatter on mesh_dim = 1, we will + scatter the tensor list on rank 0 to rank 0/1, and tensor list on rank + 2 to rank 2/3. + + Args: + output (torch.Tensor): the tensor to receive the scattered list. + scatter_list (List[torch.Tensor]): the tensor list to be scattered. + mesh_dim (int, optional): indicate which mesh dimension we want + to scatter on, we by default choose the first rank on the + mesh dimension as source of truth. + + Returns: + A :class:`Work` object + """ + # TODO: Ideally we should use the meta tensor way + # (to register a meta kernel for the collective op) + # so that it would avoid the communication. Need to + # remove the check below once that is done. + if output.is_meta: + return None + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + # src need to be global rank + src_for_dim = 0 + + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, 0) + + if src_for_dim == get_rank(): + fut = scatter( + output, + scatter_list=scatter_list, + src=src_for_dim, + group=dim_group, + async_op=async_op, + ) + else: + fut = scatter( + output, + scatter_list=None, + src=src_for_dim, + group=dim_group, + async_op=async_op, + ) + + return fut + + +def mesh_broadcast( + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + """ + broadcast the tensor to a device mesh dimension. We by default + use the first rank of the mesh dimension as the source of truth, i.e + for a 2d mesh [[0, 1], [2, 3]], if we broadcast on mesh_dim = 1, we will + broadcast the tensor on rank 0 to rank 0/1, and tensor on rank 2 + to rank 2/3. + + Args: + tensor (torch.Tensor): tensor to broadcast. + mesh_dim (int, optional): indicate which mesh dimension we want + to scatter on, we by default choose the first rank on the + mesh dimension as source of truth. + + Returns: + A :class:`Work` object + """ + # TODO: Ideally we should use the meta tensor way + # (to register a meta kernel for the collective op) + # so that it would avoid the communication. Need to + # remove the check below once that is done. + if tensor.is_meta: + return None + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + # src need to be global rank + src_for_dim = 0 + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, 0) + + return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op) + + +# TODO: test uneven split on GLOO and NCCL +def mesh_all_to_all( + output_tensor_list: List[torch.Tensor], + input_tensor_list: List[torch.Tensor], + mesh: DeviceMesh, + mesh_dim: int = 0, + async_op: bool = False, +) -> Optional[Work]: + dim_group = mesh.get_group(mesh_dim) + assert isinstance(dim_group, ProcessGroup) + + work = None + # no direct dist.all_to_all support on 'gloo' so we manually do scatters + if mesh.device_type == "cpu": + logger.warning( + "ProcessGroupGloo does not support all_to_all, falling back with scatters!" + ) + # TODO: pull the handle of uneven case in #492 + dim_group_size = get_world_size(dim_group) + for i in range(dim_group_size): + # src need to be global rank + src_for_dim = i + if dim_group is not GroupMember.WORLD: + src_for_dim = get_global_rank(dim_group, i) + + work = scatter( + output_tensor_list[i], + input_tensor_list if mesh.get_rank() == src_for_dim else [], + group=dim_group, + src=src_for_dim, + async_op=async_op, + ) + else: + work = all_to_all( + output_tensor_list, + input_tensor_list, + dim_group, + async_op=async_op, + ) + return work + + +def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int: + assert spec.tensor_meta is not None, "spec should have tensor meta defined!" + return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape) + + +@dataclass +class MeshTopoInfo: + """ + Mesh information for collective cost estimation + """ + + mesh: DeviceMesh + mesh_dim_devices: List[int] + mesh_dim_bandwidth: List[float] + mesh_dim_latency: List[float] + + @staticmethod + @lru_cache(None) + def build_from_mesh(mesh: DeviceMesh) -> "MeshTopoInfo": + # Generate mesh topology info for intra-host/inter-host communication pattern + # Note that we made bunch of assumptions for simplicity: + # 1. we assume the mesh is homogeneous, and it's gpu/nccl model + # 2. we assume gpu arch is Ampere or Hopper + # 3. we assume collectives are all ring base algo for now + num_devices_per_host = _mesh_resources.num_devices_per_host(mesh.device_type) + # the base bw number (intra-node), GB/s + base_bw = 87.7 + mesh_dim_bandwidth = [base_bw] * mesh.ndim + # the latency in terms of us (intra-node, nv-link) + mesh_dim_latency = [0.6] * mesh.ndim + mesh_dim_devices = [1] * mesh.ndim + + total_num_devices = 1 + for mesh_dim in reversed(range(mesh.ndim)): + num_devices = mesh.size(mesh_dim) + mesh_dim_devices[mesh_dim] = num_devices + total_num_devices *= num_devices + if total_num_devices > num_devices_per_host: + # magic number for inter-host communication bandwidth/latency factor + # This number assumes latest GPU arch, i.e. Ampere or Hopper + # TODO: see if we need to tweak this or offer a way for user + # to specify the bandwidths/latency + mesh_dim_bandwidth[mesh_dim] *= 0.22 + # set to ethernet latency for inter-host + mesh_dim_latency[mesh_dim] = 2.7 + + return MeshTopoInfo( + mesh, mesh_dim_devices, mesh_dim_bandwidth, mesh_dim_latency + ) + + +def allgather_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + num_hops = num_devices_on_mesh_dim - 1 + # base latency + comm latency + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] # us + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth # s + return latency + bw * 1e6 # rescale to us + + +def allreduce_cost(bytes_gb: float, mesh_topo: MeshTopoInfo, mesh_dim: int) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + # allreduce have almost 2x comm bytes compare to allgather/reduce_scatter + num_hops = 2 * num_devices_on_mesh_dim - 1 + + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth + return latency + bw * 1e6 + + +def reduce_scatter_cost( + bytes_gb: float, + mesh_topo: MeshTopoInfo, + mesh_dim: int, +) -> float: + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[mesh_dim] + mesh_dim_bandwidth = mesh_topo.mesh_dim_bandwidth[mesh_dim] + num_hops = num_devices_on_mesh_dim - 1 + # base latency + comm latency + latency = 6.6 + num_hops * mesh_topo.mesh_dim_latency[mesh_dim] + bw = (bytes_gb * num_hops / num_devices_on_mesh_dim) / mesh_dim_bandwidth + return latency + bw * 1e6 + + +def redistribute_cost( + current_spec: "placement_types.DTensorSpec", + target_spec: "placement_types.DTensorSpec", +) -> float: + """ + This function returns the cost of redistribute from current to target DTensorSpec. + + NOTE: + 1. Only consider communication cost here, since computation costs for redistribute + are quite trival (i.e. we only need to narrow or simple division) + 2. Only consider redistribute cost on same mesh, cross mesh communication cost is + not quite needed for operator strategy estimation/selection. + """ + if current_spec.mesh != target_spec.mesh: + # make infinite cost if meshes are not same + # TODO: see if we want to support this once there's cross mesh communication + return float("inf") + + if current_spec.is_replicated(): + # short-cut: + # comm cost is 0 if current spec is already full replication + return 0.0 + + mesh_topo = MeshTopoInfo.build_from_mesh(current_spec.mesh) + cost = 0.0 + comm_bytes_gb = ( + spec_to_bytes(current_spec) / current_spec.num_shards / 1024 / 1024 / 1024 + ) + # Transformation that considered for redistribute cost: + # 1. allgather 2. alltoall + # 3. allreduce 4. reduce_scatter + for i, (current, target) in enumerate( + zip(current_spec.placements, target_spec.placements) + ): + if current == target: + continue + + num_devices_on_mesh_dim = mesh_topo.mesh_dim_devices[i] + if current.is_shard() and target.is_replicate(): + # allgather gives larger comm bytes + comm_bytes_gb *= num_devices_on_mesh_dim + # add up allgather comm cost + cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + elif current.is_shard() and target.is_shard(): + # should be alltoall comm, since we haven't implement it yet, add penalty + # to favor allgather instead + cost += allgather_cost(comm_bytes_gb, mesh_topo, i) + 1.0 + elif current.is_partial() and target.is_replicate(): + # add up allreduce comm cost + cost += allreduce_cost(comm_bytes_gb, mesh_topo, i) + elif current.is_partial() and target.is_shard(): + # add up reduce_scatter comm cost + cost += reduce_scatter_cost(comm_bytes_gb, mesh_topo, i) + # after reduce_scatter the comm bytes for further collectives halved. + comm_bytes_gb /= num_devices_on_mesh_dim + elif current.is_shard() and target.is_partial(): + # ban shard -> partial as it does not make sense to perform + # this redistribute + return float("inf") + + return cost diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/api.py new file mode 100644 index 0000000000000000000000000000000000000000..acafdb1ccb2deaa544dc9448e5f52c12aa69c683 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/api.py @@ -0,0 +1,760 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import inspect +import warnings +from typing import Any, Callable, cast, Optional, Sequence, Tuple + +import torch + +import torch.distributed._tensor.dispatch as op_dispatch +import torch.distributed._tensor.random as random +import torch.nn as nn +from torch.distributed._tensor._collective_utils import mesh_broadcast +from torch.distributed._tensor._utils import compute_global_tensor_info +from torch.distributed._tensor.placement_types import ( + DTensorSpec, + Placement, + Replicate, + Shard, + TensorMeta, +) +from torch.distributed._tensor.random import ( + is_rng_supported_mesh, + OffsetBasedRNGTracker, +) +from torch.distributed._tensor.redistribute import ( + Redistribute, + redistribute_local_tensor, +) +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh + + +__all__ = ["DTensor", "distribute_tensor", "distribute_module"] + +aten = torch.ops.aten + + +# NOTE [Autograd interaction between torch.Tensor] +# +# The autograd functions defined below are being used by the public +# facing APIs (i.e. from_local, to_local) to ensure our DTensor +# works together with torch.Tensor within autograd engine. This +# allows DistributedTensor to exist on part of the module hierarchy +# and still able to calculate gradients across the torch.Tensor and +# DistributedTensor boundary. +# As an example, we have the a module that consists of submodules +# A, B, and C, the execution flow would be like: +# input(torch.Tensor) -> Module A -> Module B -> Module C -> output (torch.Tensor) +# +# Suppose I only want to make Module B be a sharded module with +# DistributedTensor params, we would need to make the following +# flow to work: +# +# input(torch.Tensor) -> Module A +# -> DTensor input -> Sharded Module B -> DTensor output +# -> output (torch.Tensor) -> Module C -> output (torch.Tensor) +# +# We need the conversion from Module A to DTensor input, which is +# `from_local`, and conversion from DTensor output to output, which +# is `to_local`, thus these two functions must be Autograd functions. +# +class _ToTorchTensor(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, + input: "DTensor", + grad_placements: Optional[Sequence[Placement]], + ): + ctx.dtensor_spec = input._spec + ctx.grad_placements = grad_placements + local_tensor = input._local_tensor + + # We need to return a fresh Tensor object there as autograd metadata + # will be inplaced into it. So we don't want to pollute the Tensor + # object stored in the _local_tensor of this DTensor. + return local_tensor.view_as(local_tensor) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): # type: ignore[override] + dtensor_spec = ctx.dtensor_spec + mesh = dtensor_spec.mesh + grad_placements = ctx.grad_placements + dtensor_meta = dtensor_spec.tensor_meta + + _, tensor_stride = compute_global_tensor_info( + grad_output, mesh, dtensor_spec.placements + ) + tensor_stride = tuple(tensor_stride) + grad_placements = grad_placements or dtensor_spec.placements + + return ( + DTensor( + grad_output, + mesh, + grad_placements, + shape=dtensor_meta.shape, + dtype=dtensor_meta.dtype, + requires_grad=grad_output.requires_grad, + stride=tensor_stride, + ), + None, + ) + + +class _FromTorchTensor(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, # pyre-ignore[2]: Parameter must be annotated. + input: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + run_check: bool, + shape: Optional[torch.Size] = None, + stride: Optional[Tuple[int, ...]] = None, + ) -> "DTensor": + ctx.previous_placement = placements + ctx.previous_device_mesh = device_mesh + + if shape and stride: + tensor_shape, tensor_stride = shape, stride + elif not shape and not stride: + # if it's not by default run_check, we assume user is certain that each + # rank has the same tensor shape, and we just use that to calculate the + # global shape + global_shape, global_stride = compute_global_tensor_info( + input, device_mesh, placements + ) + tensor_shape, tensor_stride = torch.Size(global_shape), tuple(global_stride) + else: + raise RuntimeError( + f"Found shape:{shape}, stride:{stride}.", + "Please pass both shape and stride at the same time.", + ) + + if device_mesh.get_coordinate() is None: + # if the global rank is not participating in the device mesh, we + # simply set the local tensor to an empty tensor + input = input.new_empty(0, requires_grad=input.requires_grad) + elif run_check: + # TODO: by default check tensor metas across rank + # TODO: See if we need to make this run_check logic + # have a corresponding backward. + for idx, placement in enumerate(placements): + if placement.is_replicate(): + # broadcast rank 0 tensor to all ranks + # only broadcast if run_check is True + input = input.contiguous() + mesh_broadcast(input, device_mesh, mesh_dim=idx) + + # We want a fresh Tensor object that shares memory with the input tensor + dist_tensor = DTensor( + input.view_as(input), + device_mesh, + placements, + shape=tensor_shape, + dtype=input.dtype, + # requires_grad of the dist tensor depends on if input + # requires_grad or not + requires_grad=input.requires_grad, + stride=tensor_stride, + ) + return dist_tensor + + @staticmethod + def backward(ctx, grad_output: "DTensor"): # type: ignore[override] + previous_placement = ctx.previous_placement + previous_device_mesh = ctx.previous_device_mesh + + # reshard to the placement when creating DistributedTensor + # so that the gradient layout matches, and we could return + # local gradients directly + if grad_output.placements != previous_placement: + current_spec = grad_output._spec + target_spec = DTensorSpec( + previous_device_mesh, + previous_placement, + tensor_meta=grad_output._spec.tensor_meta, + ) + local_tensor = grad_output._local_tensor + output = redistribute_local_tensor( + local_tensor, current_spec, target_spec, is_backward=True + ) + # TODO: return the redistributed local tensor directly without + # differentiable backward. see if this make sense for all cases. + return output, None, None, None, None, None + + # TODO: backward is also differentiable now, add a test + # to test higher level gradients. + return grad_output.to_local(), None, None, None, None, None + + +class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__ + _local_tensor: torch.Tensor + _spec: DTensorSpec + __slots__ = ["_local_tensor", "_spec"] + + # class attribute that handles operator placements propagation + # rules, keyed by aten op name, value is propagation func + _op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher() + + @staticmethod + def __new__( + cls, + local_tensor: torch.Tensor, + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + *, + shape: torch.Size, + dtype: torch.dtype, + requires_grad: bool, + stride: Tuple[int, ...], + ) -> "DTensor": + """ + Construct a DTensor from a local tensor, device mesh, and placement and + other tensor properties (i.e. shape, requires_grad, strides, etc). + Note: This is not a public API and it's only supposed to be used by the + operator implementations and internals. If you want to construct a + DTensor from a local tensor, consider using `DTensor.from_local`, if + you want to construct a DTensor from a "global" tensor (where you + already have tensor initialized and want to shard this tensor), + consider using `distribute_tensor`. + """ + if local_tensor.requires_grad and not requires_grad: + warnings.warn( + "To construct DTensor from torch.Tensor, it's recommended to " + "use local_tensor.detach() and make requires_grad consistent." + ) + + # new method instruct wrapper tensor from local_tensor and add + # placement spec, it does not do actual distribution + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + shape, + strides=stride, + dtype=dtype, + device=local_tensor.device, + layout=local_tensor.layout, + requires_grad=requires_grad, + ) + + tensor_meta = TensorMeta(shape, stride, dtype) + # deepcopy and set spec + r._spec = DTensorSpec(device_mesh, placements, tensor_meta=tensor_meta) + r._local_tensor = local_tensor + return r + + # pyre-fixme[14]: `__repr__` overrides method defined in `DTensor` inconsistently. + # pyre-fixme[3]: Return type must be annotated. + def __repr__(self): + # TODO: consider all_gather the local tensors for better debugging + return f"DTensor(local_tensor={self._local_tensor}, device_mesh={self._spec.mesh}, placements={self._spec.placements})" + + def __tensor_flatten__(self): + """ + protocol to inform how to flatten a DTensor to local tensor + for PT2 tracing + """ + return ["_local_tensor"], (self._spec, self.requires_grad) + + @staticmethod + def __tensor_unflatten__(inner_tensors, flatten_spec, outer_size, outer_stride): + assert ( + flatten_spec is not None + ), "Expecting spec to be not None from `__tensor_flatten__` return value!" + local_tensor = inner_tensors["_local_tensor"] + spec, requires_grad = flatten_spec + return DTensor( + local_tensor, + spec.mesh, + spec.placements, + shape=outer_size, + dtype=spec.tensor_meta.dtype, + requires_grad=requires_grad, + stride=outer_stride, + ) + + @classmethod + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + return DTensor._op_dispatcher.dispatch( + func, + args, + kwargs or {}, + ) + + @staticmethod + def from_local( + local_tensor: torch.Tensor, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, + *, + run_check: bool = True, + shape: Optional[torch.Size] = None, + stride: Optional[Tuple[int, ...]] = None, + ) -> "DTensor": + """ + Create a :class:`DTensor` from a local torch.Tensor on each rank + according to the `device_mesh` and `placements` specified. + + Args: + local_tensor (torch.Tensor): local torch.Tensor on each rank. + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the + tensor, if not specified, must be called under a DeviceMesh + context manager, default: None + placements (List[:class:`Placement`], optional): the placements that + describes how to place the local torch.Tensor on DeviceMesh, must + have the same number of elements as `device_mesh.ndim`. If not + specified, we will by default replicate the tensor across the + `device_mesh` from the first rank of each dimension of the `device_mesh`. + + Keyword args: + run_check (bool, optional): indicate whether to run check across ranks + to check meta information and data. if have :class:`Replicate` in + `placements`, the data on first rank of the device mesh dimension + will be broadcasted to other ranks. + shape (torch.Size, optional): A List of int which specifies the size of + DTensor which build on top of `local_tensor`. Note this needs to be + provided if the shape of `local_tensor` are different across the ranks. + If not provided, `shape` will be computed assuming the given distributed + tensor is evenly sharded across ranks. + stride (tuple, optional): A List of int which specifies the stride of DTensor. + If not provided, `stride` will be computed assuming the given distributed + tensor is evenly sharded across ranks. + + Returns: + A :class:`DTensor` object + + .. note:: `from_local` is differentiable, the `requires_grad` of the created + `DTensor` object will depend on if `local_tensor` requires_grad or not. + """ + # if same shape/dtype, no need to run_check, if not, must allgather + # the metadatas to check the size/dtype across ranks + # There should be no data communication unless there's replication + # strategy, where we broadcast the replication from the first rank + # in the mesh dimension + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + + # convert the local tensor to desired device base on device mesh's device_type + if device_type != local_tensor.device.type and not local_tensor.is_meta: + local_tensor = local_tensor.to(device_type) + + # set default placements to replicated if not specified + if placements is None: + placements = [Replicate() for _ in range(device_mesh.ndim)] + else: + placements = list(placements) + for idx, placement in enumerate(placements): + # normalize shard dim to be positive + if placement.is_shard(): + placement = cast(Shard, placement) + if placement.dim < 0: + placements[idx] = Shard(placement.dim + local_tensor.ndim) + + # `from_local` is differentiable, and the gradient of the dist tensor this function + # created should flow back the gradients to the local_tensor, so we call an autograd + # function to construct the dist tensor instead. + return _FromTorchTensor.apply( # pyre-ignore[16]: autograd func + local_tensor, + device_mesh, + tuple(placements), + run_check, + shape, + stride, + ) + + def to_local( + self, *, grad_placements: Optional[Sequence[Placement]] = None + ) -> torch.Tensor: + """ + Get the local tensor of this DTensor on its current rank. For sharding it returns + a local shard of the logical tensor view, for replication it returns the replica on + its current rank. + + Keyword args: + grad_placements (List[:class:`Placement`], optional): the placements describes + the future layout of any gradient layout of the Tensor returned from this + function. + `to_local` converts DTensor to local tensor and the returned local tensor + might not be used as the original DTensor layout later in the code. This + argument is the hint that user can give to autograd in case the gradient + layout of the returned tensor does not match the original DTensor layout. + If not specified, we will assume the gradient layout remains the same + as the original DTensor and use that for gradient computation. + + Returns: + A :class:`torch.Tensor` or `AsyncCollectiveTensor` object. it represents the + local tensor on its current rank. + + .. note:: `to_local` is differentiable, the `requires_grad` of the local tensor returned + will depend on if the `DTensor` requires_grad or not. + """ + if grad_placements is not None and not isinstance(grad_placements, tuple): + grad_placements = tuple(grad_placements) + return _ToTorchTensor.apply( + self, grad_placements + ) # pyre-ignore[16]: autograd func + + def redistribute( + self, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, + *, + async_op: bool = False, + ) -> "DTensor": + """ + `redistribute` performs necessary collective operations that redistribute the current + DTensor from its current placements to a new placements, or from is current DeviceMesh + to a new DeviceMesh. i.e. we can turn a Sharded DTensor to a Replicated DTensor by + specifying a Replicate placement for each dimension of the DeviceMesh. + + Args: + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to place the + DTensor, if not specified, must be called under a DeviceMesh + context manager, default: None + placements (List[:class:`Placement`], optional): the new placements that + describes how to place the DTensor into the DeviceMesh, must + have the same number of elements as `device_mesh.ndim`. + + Keyword args: + async_op (bool, optional): whether to perform the DTensor redistribute operation + asynchronously or not. Default: False + + Returns: + A :class:`DTensor` object + + .. note:: `redistribute` is differentiable. + """ + # NOTE: This redistribute API currently only supports out + # of place redistribution, i.e. it always create a new + # DTensor object and leave the original one unchanged. + + # if device_mesh is not specified, use the current device_mesh + device_mesh = device_mesh or self.device_mesh + # raise error if new placements not specified + if placements is None: + raise RuntimeError("placements is needed for redistribute!") + + placements = list(placements) + for i, placement in enumerate(placements): + if placement.is_partial(): + raise RuntimeError( + "Can not redistribute to _Partial, _Partial is for internal use only!" + ) + elif isinstance(placement, Shard) and placement.dim < 0: + # normalize shard dim to be positive + placements[i] = Shard(placement.dim + self.ndim) + placements = tuple(placements) + + # Early return the original DTensor if the placements are the same. + if self._spec.placements == placements: + return self + + # pyre-fixme[16]: `Redistribute` has no attribute `apply`. + return Redistribute.apply(self, device_mesh, placements, async_op) + + def full_tensor( + self, *, grad_placements: Optional[Sequence[Placement]] = None + ) -> torch.Tensor: + """ + Return the full tensor of this DTensor. It will perform necessary collectives + to gather the local tensors from other ranks in its DeviceMesh and concatenate + them together. It's a syntatic sugar of the following code: + + `dtensor.redistribute(placements=[Replicate()] * mesh.ndim).to_local()` + + Keyword args: + grad_placements (List[:class:`Placement`], optional): the placements describes + the future layout of any gradient layout of the full Tensor returned from this + function. + `full_tensor` converts DTensor to a full torch.Tensor and the returned torch.tensor + might not be used as the original replicated DTensor layout later in the code. This + argument is the hint that user can give to autograd in case the gradient + layout of the returned tensor does not match the original replicated DTensor layout. + If not specified, we will assume the gradient layout of the full tensor be replicated. + + Returns: + A :class:`torch.Tensor` object that represents the full tensor of this DTensor. + + .. note:: `full_tensor` is differentiable. + """ + + redist_res = self.redistribute( + placements=[Replicate()] * self.device_mesh.ndim, async_op=False + ) + return _ToTorchTensor.apply(redist_res, grad_placements) + + @property + def device_mesh(self) -> DeviceMesh: + """ + The :class:`DeviceMesh` attribute that associates with this DTensor object. + + .. note:: device_mesh is a read-only property, it can not be set. + """ + return self._spec.mesh + + @property + def placements(self) -> Sequence[Placement]: + """ + The placements attribute of this DTensor that describes the layout of this + DTensor on the its DeviceMesh. + + .. note:: placements is a read-only property, it can not be set. + """ + return self._spec.placements + + +def distribute_tensor( + tensor: torch.Tensor, + device_mesh: Optional[DeviceMesh] = None, + placements: Optional[Sequence[Placement]] = None, +) -> DTensor: + """ + Distribute a torch.Tensor to the `device_mesh` according to the `placements` + specified. The rank of `device_mesh` and `placements` must be the same. + + Args: + tensor (torch.Tensor): torch.Tensor to be distributed. Note that if you + want to shard a tensor on a dimension that is not evenly divisible by + the number of devices in that mesh dimension, we use `torch.chunk` + semantic to shard the tensor and scatter the shards. + device_mesh (:class:`DeviceMesh`, optional): DeviceMesh to distribute the + tensor, if not specified, must be called under a DeviceMesh context + manager, default: None + placements (List[:class:`Placement`], optional): the placements that + describes how to place the tensor on DeviceMesh, must have the same + number of elements as `device_mesh.ndim`. If not specified, we will + by default replicate the tensor across the `device_mesh` from the + first rank of each dimension of the `device_mesh`. + + Returns: + A :class:`DTensor` or `XLAShardedTensor` object. + + Note: + When initialize the DeviceMesh with the `xla` device_type, `distribute_tensor` + return `XLAShardedTensor` instead. see [link](https://github.com/pytorch/pytorch/issues/92909) + for more details. The XLA integration is experimental and subject to change. + """ + + torch._C._log_api_usage_once("torch.dtensor.distribute_tensor") + + # get default device mesh if there's nothing specified + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + if device_type == "xla": + try: + # call PyTorch/XLA SPMD for `xla` backend type device mesh. + # This returns XLAShardedTensor + from torch_xla.distributed.spmd import ( # type:ignore[import] + xla_distribute_tensor, + ) + + return xla_distribute_tensor( + tensor, device_mesh, placements + ) # type:ignore[return-value] + except ImportError as e: + msg = "To use DTensor API with xla, you must install the torch_xla package!" + raise ImportError(msg) from e + + # instantiate a RNG tracker if haven't. By default DTensor uses an + # OffsetBasedRNGTracker to perform random operators. + # TODO: the value assignment to global variable is not the ideal solution + # we can replace it in future. + if is_rng_supported_mesh(device_mesh) and not random._rng_tracker: + random._rng_tracker = OffsetBasedRNGTracker(device_type) + + if not tensor.is_leaf: + raise RuntimeError( + "`distribute_tensor` should be used to distribute leaf tensors! but found non-leaf tensor!" + ) + + # convert tensor to the corresponding device type if it's not in that device type + if device_type != tensor.device.type and not tensor.is_meta: + tensor = tensor.to(device_type) + + # set default placements to replicated if not specified + if placements is None: + placements = [Replicate() for _ in range(device_mesh.ndim)] + + if len(placements) != device_mesh.ndim: + raise ValueError( + f"`placements` must have the same length as `device_mesh.ndim`! " + f"Found placements length: {len(placements)}, and device_mesh.ndim: {device_mesh.ndim}." + ) + if isinstance(tensor, DTensor): + # if the tensor is already a DTensor, we just need to check if the + # device mesh and placements are the same + if tensor.device_mesh != device_mesh: + raise ValueError( + f"Cannot distribute a DTensor with device mesh {tensor.device_mesh} " + f"to a different device mesh {device_mesh}." + ) + if tensor.placements != tuple(placements): + raise ValueError( + f"Cannot distribute a DTensor with placements {tensor.placements} " + f"to a different placements {placements}. do you want to call " + f"`redistribute` instead?" + ) + return tensor + + local_tensor = tensor + + # distribute the tensor according to the placements. + placements = list(placements) + for idx, placement in enumerate(placements): + if placement.is_shard(): + placement = cast(Shard, placement) + if placement.dim < 0: + # normalize shard placement dim + placement = Shard(placement.dim + tensor.ndim) + placements[idx] = placement + local_tensor = placement._shard_tensor(local_tensor, device_mesh, idx) + elif placement.is_replicate(): + placement = cast(Replicate, placement) + local_tensor = placement._replicate_tensor(local_tensor, device_mesh, idx) + else: + raise RuntimeError( + f"Trying to distribute tensor with unsupported placements {placement} on device mesh dimension {idx}!" + ) + placements = tuple(placements) + + assert local_tensor is not None, "distributing a tensor should not be None" + # detach the local tensor passed to DTensor since after the construction + # of DTensor, autograd would work on top of DTensor instead of local tensor + return DTensor( + local_tensor.detach().requires_grad_(tensor.requires_grad), + device_mesh, + placements, + shape=tensor.size(), + dtype=tensor.dtype, + requires_grad=tensor.requires_grad, + stride=tensor.stride(), + ) + + +def distribute_module( + module: nn.Module, + device_mesh: Optional[DeviceMesh] = None, + partition_fn: Optional[Callable[[str, nn.Module, DeviceMesh], None]] = None, + input_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None, + output_fn: Optional[Callable[[nn.Module, Any, DeviceMesh], None]] = None, +) -> nn.Module: + """ + This function converts all module parameters to :class:`DTensor` parameters + according to the `partition_fn` specified. It could also control the input or + output of the module by specifying the `input_fn` and `output_fn`. (i.e. convert + the input to :class:`DTensor`, convert the output back to torch.Tensor) + Args: + module (:class:`nn.Module`): user module to be partitioned. + device_mesh (:class:`DeviceMesh`): the device mesh to place the module. + partition_fn (Callable): the function to partition parameters (i.e. shard certain + parameters across the `device_mesh`). If `partition_fn` is not specified, + by default we replicate all module parameters of `module` across the mesh. + input_fn (Callable): specify the input distribution, i.e. could control how the + input of the module is sharded. `input_fn` will be installed as a module + `forward_pre_hook` (pre forward hook). + output_fn (Callable): specify the output distribution, i.e. could control how the + output is sharded, or convert it back to torch.Tensor. output_fn will be + installed as a module `forward_hook` (post forward hook). + + Returns: + A module that contains parameters/buffers that are all `DTensor`s. + + Note: + When initialize the DeviceMesh with the `xla` device_type, `distribute_module` + return nn.Module with PyTorch/XLA SPMD annotated parameters. See [link](https://github.com/pytorch/pytorch/issues/92909) + for more details. The XLA integration is experimental and subject to change. + """ + + torch._C._log_api_usage_once("torch.dtensor.distribute_module") + + device_mesh = device_mesh or _mesh_resources.get_current_mesh() + device_type = device_mesh.device_type + if device_type == "xla": + try: + # This function annotates all module parameters for auto-partitioning with + # PyTorch/XLA SPMD or explicitly partition to :class:`XLAShardedTensor` parameters + # according to the `partition_fn` specified. + from torch_xla.distributed.spmd import ( # type:ignore[import] + xla_distribute_module, + ) + + return xla_distribute_module( + module, device_mesh, partition_fn, input_fn, output_fn + ) # type:ignore[return-value] + except ImportError as e: + msg = "To use DTensor API with xla, you must install the torch_xla package!" + raise ImportError(msg) from e + + def replicate_module_params_buffers(m: nn.Module, mesh: DeviceMesh) -> None: + # This function loop over the immediate module parameters and + # buffers, replicate all non DTensor params/buffers to DTensor + # parameters/buffers, if they have not been partitioned in the + # partition_fn, we can't easily use `module._apply` here + # because we don't know what happened inside partition_fn as + # user could do anything, i.e. install hooks, and we want to + # preserve those. + full_replicate = [Replicate()] * mesh.ndim + for key, param in m._parameters.items(): + if param is not None and not isinstance(param, DTensor): + m.register_parameter( + key, + nn.Parameter(distribute_tensor(param.data, mesh, full_replicate)), + ) + for key, buffer in m._buffers.items(): + if buffer is not None and not isinstance(buffer, DTensor): + m._buffers[key] = distribute_tensor(buffer, mesh, full_replicate) + + if partition_fn is None: + # if partition_fn not specified, we by default replicate + # all module params/buffers + for name, submod in module.named_modules(): + replicate_module_params_buffers(submod, device_mesh) + else: + # apply partition_fun to submodules + for name, submod in module.named_modules(): + partition_fn(name, submod, device_mesh) + replicate_module_params_buffers(submod, device_mesh) + + # register input_fn as module forward pre hook + if input_fn is not None: + # check the input_fn signature + num_args = len(inspect.signature(input_fn).parameters) + if num_args == 2: + # input_fn only takes in inputs and device mesh + warnings.warn( + "Deprecating input_fn that takes two arguments (inputs, device_mesh), " + "please use input_fn that takes in (module, inputs, device_mesh) instead!", + ) + module.register_forward_pre_hook(lambda _, inputs: input_fn(inputs, device_mesh)) # type: ignore[call-arg] + elif num_args == 3: + # input_fn takes in module, inputs, device mesh + module.register_forward_pre_hook( + lambda mod, inputs: input_fn(mod, inputs, device_mesh) + ) + else: + raise ValueError( + f"input_fn should take in 3 arguments, but got {num_args} arguments!" + ) + # register output_fn as module forward hook + if output_fn is not None: + num_args = len(inspect.signature(output_fn).parameters) + if num_args == 2: + # output_fn only takes in outputs and device mesh + warnings.warn( + "Deprecating output_fn that takes two arguments (inputs, device_mesh), " + "please use output_fn that takes in (module, inputs, device_mesh) instead!", + ) + module.register_forward_hook( + lambda mod, inputs, outputs: output_fn(outputs, device_mesh) # type: ignore[call-arg] + ) + elif num_args == 3: + module.register_forward_hook( + lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh) + ) + else: + raise ValueError( + f"output_fn should take in 3 arguments, but got {num_args} arguments!" + ) + + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a9bbe7ecc8fe4c8359a56e834048b0d5251324 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__init__.py @@ -0,0 +1,12 @@ +from contextlib import contextmanager + +from torch.distributed._tensor.api import DTensor + + +@contextmanager +def implicit_replication(): + try: + DTensor._op_dispatcher._allow_implicit_replication = True + yield + finally: + DTensor._op_dispatcher._allow_implicit_replication = False diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5bf005106adef121679dc6fe1bc1de0c060a254 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7344f02e3b02c5e64d55b9a7675ebc69696a372 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/__pycache__/tp_transform.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..4b95061598b286a71ca4e30761aefd62c5059367 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/experimental/tp_transform.py @@ -0,0 +1,547 @@ +import copy +import operator +from typing import Any, cast, Dict, List, Optional, Sequence, Tuple + +import torch +from torch._subclasses.fake_tensor import FakeTensor +from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor +from torch.distributed._tensor.op_schema import ( + DTensorSpec, + OpSchema, + OutputSharding, + OutputSpecType, + PlacementStrategy, +) +from torch.distributed._tensor.placement_types import ( + Placement, + Replicate, + Shard, + TensorMeta, +) +from torch.distributed._tensor.redistribute import redistribute_local_tensor +from torch.distributed.tensor.parallel.style import ColwiseParallel, ParallelStyle +from torch.export import ExportedProgram +from torch.export.exported_program import ExportGraphSignature +from torch.fx import GraphModule +from torch.fx.experimental.proxy_tensor import make_fx +from torch.fx.node import Node +from torch.fx.passes.infra.pass_base import PassBase, PassResult +from torch.fx.passes.shape_prop import _extract_tensor_metadata +from torch.utils import _pytree as pytree + + +aten = torch.ops.aten + + +def tensor_parallel_transformation( + exported_program: ExportedProgram, + rank: int, + world_size: int, + device_type: str, + parallel_strategies: Dict[str, ParallelStyle], +) -> ExportedProgram: + """ + The entry point function to perform graph transformations on an exported program + to transform a single-device graph into a tensor parallel graph. + + .. warning:: + This API is experimental and subject to change. + """ + + gm = exported_program.graph_module + sig = copy.deepcopy(exported_program.graph_signature) + state_dict = copy.copy(exported_program.state_dict) + + with gm._set_replace_hook(sig.get_replace_hook()): + res = TensorParallelTransformPass( + rank, + world_size, + device_type, + state_dict, + exported_program.graph_signature, + parallel_strategies, + )(gm) + assert res is not None + gm = res.graph_module + + return exported_program._update(gm, sig, state_dict) + + +class TensorParallelTransformPass(PassBase): + """ + This pass is responsible for transforming a single-device graph into a tensor parallel + graph. It will mark the placement strategy of each node in the graph, + partition the graph into distributed graph, then shard the parameters/buffers accordingly. + """ + + def __init__( + self, + rank: int, + world_size: int, + device_type: str, + state_dict: Dict[str, torch.Tensor], + graph_signature: ExportGraphSignature, + parallel_strategies: Dict[str, ParallelStyle], + ) -> None: + super().__init__() + self.rank = rank + self.mesh = DeviceMesh(device_type, torch.arange(world_size)) + self.state_dict: Dict[str, torch.Tensor] = state_dict + self.graph_signature = graph_signature + self.parallel_strategies = parallel_strategies + + def call(self, graph_module) -> PassResult: + gm = copy.deepcopy(graph_module) + + parameter_placements = _generate_parameter_and_buffer_placements( + list(self.state_dict.keys()), self.parallel_strategies + ) + placement_strategies = _mark_sharding( + gm, self.graph_signature, self.mesh, parameter_placements + ) + _partitioner(gm) + _shard_state_dict( + self.state_dict, placement_strategies, self.graph_signature, self.mesh + ) + return PassResult(gm, True) + + +def _generate_parameter_and_buffer_placements( + params_and_buffers: List[str], + parallel_strategies: Dict[str, ParallelStyle], +) -> Dict[str, Placement]: + """ + Build parameter placements based on the give parallel style of linear layers. + """ + parameter_placements: Dict[str, Placement] = {} + for linear_fqn, parallel_style in parallel_strategies.items(): + weight_fqn = f"{linear_fqn}.weight" + bias_fqn = f"{linear_fqn}.bias" + assert weight_fqn in params_and_buffers + parameter_placements[weight_fqn] = ( + Shard(0) if parallel_style == ColwiseParallel else Shard(1) + ) + if bias_fqn in params_and_buffers: + parameter_placements[bias_fqn] = ( + Shard(0) if parallel_style == ColwiseParallel else Replicate() + ) + return parameter_placements + + +def _mark_tensor_parallel_shardings( + gm: GraphModule, + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, + parameter_placements: Dict[str, Placement], +) -> Dict[Node, PlacementStrategy]: + """ + Mark the placement strategies of the parameter and buffer placeholder nodes. + """ + placement_strategies: Dict[Node, PlacementStrategy] = {} + num_params_and_buffers = len(graph_signature.inputs_to_parameters) + len( + graph_signature.inputs_to_buffers + ) + placeholder_idx: int = 0 + for node in gm.graph.nodes: + if node.op == "placeholder": + if placeholder_idx < num_params_and_buffers: + fqn: str = _get_input_node_fqn(node.name, graph_signature) + placement: Placement = ( + parameter_placements[fqn] + if fqn in parameter_placements + else Replicate() + ) + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=(placement,), + ) + placeholder_idx += 1 + else: + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=(Replicate(),), + ) + return placement_strategies + + +def _get_input_node_fqn(input_name: str, graph_signature: ExportGraphSignature) -> str: + """ + Return the FQN of an input node. + """ + if input_name in graph_signature.inputs_to_parameters: + return graph_signature.inputs_to_parameters[input_name] + elif input_name in graph_signature.inputs_to_buffers: + return graph_signature.inputs_to_buffers[input_name] + else: + raise ValueError( + f"{input_name} not found in inputs_to_parameters or inputs_to_buffers" + ) + + +def _mark_sharding( + gm: GraphModule, + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, + parameter_placements: Dict[str, Placement], +) -> Dict[Node, PlacementStrategy]: + """ + Mark the sharding strategy for each node in the graph module. + """ + placement_strategies: Dict[ + Node, PlacementStrategy + ] = _mark_tensor_parallel_shardings(gm, graph_signature, mesh, parameter_placements) + + for node in gm.graph.nodes: + if node.op == "placeholder": + if node not in placement_strategies: + placement_strategies[node] = _create_placement_strategy( + node, mesh, placements=(Replicate(),) + ) + node.meta["sharding"] = placement_strategies[node] + elif node.op == "call_function": + if node.target == operator.getitem: + input_nodes = node.all_input_nodes + assert ( + len(input_nodes) == 1 + ), f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}" + arg_strategy = placement_strategies[input_nodes[0]] + placement_strategies[node] = _create_placement_strategy( + node, + mesh, + placements=arg_strategy.output_spec.placements, + input_specs=_get_input_node_specs(node, placement_strategies), + ) + node.meta["sharding"] = placement_strategies[node] + else: + op_schema = _get_op_schema(node, placement_strategies) + + # get DTensor specs for inputs and outputs + if ( + op_schema.op + not in DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs + and op_schema.op + not in DTensor._op_dispatcher.sharding_propagator.op_to_rules + ): + # Mark all as replicated + output_sharding = _generate_default_output_sharding( + node, + mesh, + op_schema, + ) + else: + output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding( + op_schema, + ) + placement_strategies[node] = PlacementStrategy( + output_specs=_get_output_spec_from_output_sharding(output_sharding), + input_specs=output_sharding.schema_suggestions[0].args_spec + if output_sharding.schema_suggestions is not None + else _get_input_node_specs(node, placement_strategies), + ) + node.meta["sharding"] = placement_strategies[node] + elif node.op == "output": + node.meta["sharding"] = None + else: + raise RuntimeError(f"op code {node.op} not supported") + return placement_strategies + + +def _get_output_spec_from_output_sharding( + output_sharding: OutputSharding, +) -> DTensorSpec: + """ + Util function to extract output spec from output sharding. + """ + if isinstance(output_sharding.output_spec, DTensorSpec): + return output_sharding.output_spec + else: + # For ops that return multiple outputs, the outputs should have the same output spec + assert isinstance(output_sharding.output_spec, Sequence) + assert output_sharding.output_spec[0] is not None + output_sharding.output_spec[0].tensor_meta = None + return output_sharding.output_spec[0] + + +def _create_placement_strategy( + node: Node, + mesh: DeviceMesh, + placements: Tuple[Placement, ...], + input_specs: Optional[Sequence[DTensorSpec]] = None, +) -> PlacementStrategy: + """ + Util function to construct a placement strategy for a given node. + """ + placement = PlacementStrategy( + input_specs=input_specs, + output_specs=DTensorSpec( + mesh=mesh, + placements=placements, + ), + ) + _populate_tensor_meta(node, placement.output_specs) + return placement + + +def _populate_tensor_meta(node: Node, output_spec: OutputSpecType) -> None: + """ + Util function to populate tensor meta of output_spec based on node metadata. + """ + if isinstance(node.meta["val"], Sequence): + assert isinstance(output_spec, Sequence) + for spec, fake_tensor in zip(output_spec, node.meta["val"]): + assert spec is not None + spec.tensor_meta = TensorMeta( + shape=fake_tensor.shape, + stride=fake_tensor.stride(), + dtype=fake_tensor.dtype, + ) + else: + assert isinstance(output_spec, DTensorSpec) + output_spec.tensor_meta = TensorMeta( + shape=node.meta["val"].shape, + stride=node.meta["val"].stride(), + dtype=node.meta["val"].dtype, + ) + + +def _generate_default_output_sharding( + node: Node, + mesh: DeviceMesh, + op_schema: OpSchema, +) -> OutputSharding: + """ + Util function to create a default output sharding that suggests Replicate placement for both args and outputs. + """ + + def update_arg_spec(arg_spec: DTensorSpec) -> DTensorSpec: + return DTensorSpec( + mesh=arg_spec.mesh, + placements=(Replicate(),), + tensor_meta=arg_spec.tensor_meta, + ) + + new_op_schema = OpSchema( + op=op_schema.op, + args_schema=pytree.tree_map_only( + DTensorSpec, update_arg_spec, op_schema.args_schema + ), + kwargs_schema=op_schema.kwargs_schema, + ) + + def create_output_spec(tensor: FakeTensor) -> DTensorSpec: + return DTensorSpec( + mesh=mesh, + placements=(Replicate(),), + tensor_meta=TensorMeta( + shape=tensor.shape, + stride=tensor.stride(), + dtype=tensor.dtype, + ), + ) + + return OutputSharding( + output_spec=pytree.tree_map_only( + FakeTensor, create_output_spec, node.meta["val"] + ), + schema_suggestions=[new_op_schema], + failed_reason=f"{node.op} does not have sharding strategy registered", + needs_redistribute=True, + ) + + +def _partitioner(gm: torch.fx.GraphModule) -> torch.fx.GraphModule: + """ + Graph partitioner that partitions the single device graph + to distributed graph + """ + for node in gm.graph.nodes: + node_sharding = node.meta["sharding"] + if node.op == "placeholder": + out_spec = node_sharding.output_spec + local_val = _partition_val(node.meta["val"], out_spec) + # update node value + node.meta["val"] = local_val + elif node.op == "call_function": + out_spec = node_sharding.output_spec + # check if there's misaligned sharding, insert reshard if there is + expected_input_specs = node_sharding.input_specs + for idx, input_arg in enumerate(node.all_input_nodes): + input_arg_sharding = input_arg.meta["sharding"] + input_arg_spec = input_arg_sharding.output_spec + desired_spec = ( + out_spec + if expected_input_specs is None + else expected_input_specs[idx] + ) + if input_arg_spec != desired_spec: + _insert_reshard_gm( + gm, node, input_arg, input_arg_spec, desired_spec + ) + # convert output val to its local component + output_val = node.meta["val"] + node.meta["val"] = _partition_val(output_val, out_spec) + elif node.op == "output": + for input_arg in node.all_input_nodes: + # input args of output should be Replicate, otherwise redistribution is needed. + input_args_to_check: Sequence[Node] = ( + input_arg if isinstance(input_arg, Sequence) else [input_arg] + ) + for arg in input_args_to_check: + arg_sharding = arg.meta["sharding"] + arg_spec = arg_sharding.output_spec + desired_spec = copy.copy(arg_spec) + desired_spec.placements = (Replicate(),) + if arg_spec != desired_spec: + _insert_reshard_gm(gm, node, arg, arg_spec, desired_spec) + else: + raise RuntimeError(f"op code {node} not supported") + + _clean_up_graph_metadata(gm) + gm.graph.lint() + gm.recompile() + return gm + + +def _partition_val(val: Any, spec: DTensorSpec) -> Any: + """ + util function to convert a full tensor val to its local component + """ + if isinstance(val, torch.Tensor): + local_shard = val + if val.ndim == 0: + # If it's already a scalar tensor, it is already local, we don't + # need to do anything + return local_shard + + for idx, placement in enumerate(spec.placements): + if placement.is_shard(): + placement = cast(Shard, placement) + num_chunks = spec.mesh.size(mesh_dim=idx) + my_coord = spec.mesh.get_coordinate() + assert my_coord is not None, "current rank not in mesh!" + my_coord_on_mesh_dim = my_coord[idx] + local_shard = placement._split_tensor( + local_shard, num_chunks, with_padding=False, contiguous=True + )[0][my_coord_on_mesh_dim] + return local_shard + elif isinstance(val, (list, tuple)): + return val.__class__(_partition_val(v, spec) for v in val) + else: + raise RuntimeError(f"val type {type(val)} not supported") + + +def _insert_reshard_gm( + gm: torch.fx.GraphModule, + node: Node, + input_arg: Node, + input_arg_spec: DTensorSpec, + desired_spec: DTensorSpec, +) -> None: + """ + Transform the graph for tensor redistribution. + """ + input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"] + desired_spec.tensor_meta = input_arg.meta["tensor_meta"] + input_arg_tensor = input_arg.meta["val"] + + # insert reshard operation + def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor: + return redistribute_local_tensor( + local_tensor, + input_arg_spec, + desired_spec, + ) + + reshard_gm = make_fx(reshard_fn)(input_arg_tensor) + reshard_gm_nodes = list(reshard_gm.graph.nodes) + input_node = reshard_gm_nodes[0] + with gm.graph.inserting_before(node): + output_node = gm.graph.graph_copy( + reshard_gm.graph, + val_map={ + input_node: input_arg, + }, + ) + node.replace_input_with(input_arg, output_node) + + +def _clean_up_graph_metadata(gm: torch.fx.GraphModule) -> None: + """ + Clean up the graph by removing sharding and partitioning related metadata + """ + for node in gm.graph.nodes: + if "sharding" in node.meta: + del node.meta["sharding"] + if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor): + local_tensor_meta = _extract_tensor_metadata(node.meta["val"]) + node.meta["tensor_meta"] = local_tensor_meta + + +def _get_input_node_specs( + node: Node, placement_strategies: Dict[Node, PlacementStrategy] +) -> Tuple[DTensorSpec, ...]: + """ + Get the input specs of a node. + """ + input_specs_list: List[DTensorSpec] = [] + for input_arg in node.all_input_nodes: + if input_arg in placement_strategies: + output_spec = placement_strategies[input_arg].output_specs + assert isinstance(output_spec, DTensorSpec) + input_specs_list.append(output_spec) + else: + raise ValueError(f"{input_arg} does not have output_spec populated.") + return tuple(input_specs_list) + + +def _get_op_schema( + node: Node, placement_strategies: Dict[Node, PlacementStrategy] +) -> OpSchema: + """ + Util function to construct the operator schema of a node. + """ + args_schema_list = pytree.tree_map_only( + Node, lambda arg: placement_strategies[arg].output_specs, node.args + ) + op_schema = OpSchema( + op=cast(torch._ops.OpOverload, node.target), + args_schema=tuple(args_schema_list), + kwargs_schema=cast(Dict[str, object], node.kwargs), + ) + return op_schema + + +def _shard_state_dict( + state_dict: Dict[str, torch.Tensor], + placement_strategies: Dict[Node, PlacementStrategy], + graph_signature: ExportGraphSignature, + mesh: DeviceMesh, +) -> None: + """ + Inplace partition the weights based on the placement strategy + """ + for node, placement_strategy in placement_strategies.items(): + if node.op != "placeholder": + continue + if node.name in graph_signature.inputs_to_parameters: + fqn = graph_signature.inputs_to_parameters[node.name] + elif node.name in graph_signature.inputs_to_buffers: + fqn = graph_signature.inputs_to_buffers[node.name] + else: + continue + assert fqn in state_dict, f"{fqn} not found in state dict: {state_dict.keys()}" + + original_param = state_dict[fqn] + dtensor_param = distribute_tensor( + original_param, + mesh, + placement_strategy.output_spec.placements, + ) + local_param = dtensor_param.to_local() + state_dict[fqn] = ( + torch.nn.Parameter(local_param) + if isinstance(original_param, torch.nn.Parameter) + else local_param + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8125f000432257e2b5e59e034bd529cd0bc2e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/op_schema.py @@ -0,0 +1,427 @@ +from dataclasses import dataclass +from functools import cached_property +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +from torch._ops import OpOverload +from torch.distributed._tensor.placement_types import DTensorSpec +from torch.distributed.device_mesh import DeviceMesh + +try: + from torch.utils._cxx_pytree import tree_map_only, TreeSpec +except ImportError: + from torch.utils._pytree import ( # type: ignore[no-redef, assignment] + tree_map_only, + TreeSpec, + ) + + +# Common type aliases +ArgsType = Tuple[object, ...] +KwargsType = Dict[str, object] +# ATen op schemas could have Tensor, Tuple[Tensor] and List[Tensor], so output type sould +# be the same set of possibilities. +OutputSpecType = Optional[Union[DTensorSpec, Sequence[Optional[DTensorSpec]]]] + + +def _rebuild_tensor_from_dtensor_meta(arg) -> object: + """ + This is used to propagate tensor metadata, must be under fake mode + """ + assert arg.tensor_meta is not None, "DTensorSpec does not contain tensor_meta." + return torch.empty_strided( + arg.tensor_meta.shape, + arg.tensor_meta.stride, + dtype=arg.tensor_meta.dtype, + ) + + +def _is_inplace_op(op: OpOverload): + # simple analysis of function schema to determine + # if this is an inplace variant, it might not + # be entirely correct, but it's good enough for now. + return op._schema.name[-1] == "_" + + +def _is_out_variant_op(op: OpOverload): + # simple analysis of function schema to determine + # if this is an out variant, it might not + # be entirely correct, but it's good enough for now. + return "out" in op._schema.overload_name + + +def _pretty_print_spec(spec: object) -> str: + if spec is None: + return "None" + elif isinstance(spec, DTensorSpec): + return "".join([str(p) for p in spec.placements]) + elif isinstance(spec, Sequence): + return "(" + ", ".join([_pretty_print_spec(s) for s in spec]) + ")" + else: + raise RuntimeError(f"Unknown spec type to print: spec={spec}") + + +@dataclass +class PlacementStrategy: + """ + A placement strategy describes acceptable sharding placements of the output + and the tensor arguments of an operation. + + note: when the op return value is a single DTensor object, output_specs is + DTensorSpec; when the return value is a tuple of Optional[DTensor], + output_specs is a tuple of Optional[DTensorSpec]. + """ + + output_specs: Union[DTensorSpec, Tuple[Optional[DTensorSpec], ...]] + input_specs: Optional[Sequence[DTensorSpec]] = None + + # redistribute costs for this op placement strategy + # we need a nested list to record the cost for each + # operand of this operator, and for each operand of + # this operator it might have multiple placement strategies + redistribute_cost: Optional[List[List[float]]] = None + + @cached_property + def output_spec(self) -> DTensorSpec: + """ + This function requires that the strategy have exactly one DTensorSpec as the + output spec. If the output_specs is a tuple, we throw an exception. + """ + if isinstance(self.output_specs, DTensorSpec): + return self.output_specs + else: + raise ValueError( + f"function output_spec expects a single DTensorSpec but got: {self.output_specs}" + ) + + def input_spec(self, index: int = 0) -> DTensorSpec: + assert self.input_specs is not None, "input_specs of PlacementStrategy is None!" + assert len(self.input_specs) > index, ( + f"Invalid index {index} for input_specs of length " + f"{len(self.input_specs)}: {self.input_specs}" + ) + return self.input_specs[index] + + def __str__(self) -> str: + input_specs_str = _pretty_print_spec(self.input_specs) + output_spec_str = _pretty_print_spec(self.output_specs) + return f"{input_specs_str} -> {output_spec_str}" + + +class StrategyType: + """ + Base class type for op strategy, We have two StrategyType: + OpStrategy and TupleStrategy + """ + + pass + + +class OpStrategy(StrategyType): + """ + OpStrategy that consists of a list of placement strategies associated with the op + """ + + def __init__(self, strategies: List[PlacementStrategy]) -> None: + super().__init__() + self.strategies: List[PlacementStrategy] = strategies + + def __str__(self) -> str: + strategy_list_str = ", ".join([str(strategy) for strategy in self.strategies]) + mesh_shape = self.output_mesh_shape + return f"OpStrategy:[{strategy_list_str}] @ mesh: {mesh_shape}" + + def max_num_shards(self) -> int: + """ + Returns the max number of shards across all placement strategies + """ + return max([strategy.output_spec.num_shards for strategy in self.strategies]) + + @property + def output_mesh_shape(self): + output_spec = self.strategies[0].output_specs + if isinstance(output_spec, DTensorSpec): + return output_spec.mesh.shape + else: + assert isinstance( + output_spec, tuple + ), "found no DTensorSpec in the OpStrategy!" + assert output_spec[0] is not None + return output_spec[0].mesh.shape + + @property + def output_ndim(self): + return self.strategies[0].output_spec.ndim + + @property + def output_shape(self): + return self.strategies[0].output_spec.shape + + +class TupleStrategy(StrategyType): + """ + TupleStrategy represents the output strategy of this op is a tuple + of strategy, i.e. If the output of this op is a tuple of tensors or list of tensors + with possibly different placement strategies, we should return a TupleStrategy that + contains a tuple of OpStrategy, where each child represents the sharding strategy + of "each element" of the tuple/list of tensors the op returns. + + NOTE: if the output of the op is a List[Tensor] and they share the same placement + strategy, then we should return a single OpStrategy instead of a TupleStrategy + """ + + def __init__(self, childs: Sequence[StrategyType]) -> None: + super().__init__() + self.childs: Sequence[StrategyType] = childs + + def __str__(self) -> str: + child_strategies_str = ", ".join( + [f"{str(strat)}" for idx, strat in enumerate(self.childs)] + ) + return f"TupleStrategy({child_strategies_str})" + + +@dataclass +class RuntimeSchemaInfo: + """ + RuntimeSchemaInfo stores the operator schema related information for runtime (eager) + execution. This is mainly used for two ways: 1. to generate hash for args to determine + whether to re-run sharding prop or not 2. to determine if we need pytree + """ + + # This static_argnum records static arg "starting index" for ops that have non-tensor + # args/kwargs which would affect sharding propagation results. All args starting from + # this index would be hashed to our sharding cache. + # Note that only a few ops need this information, e.g. view, transpose, var.dim, etc. + static_argnum: int = 100 + # This static_kwargkey records static kwarg names which would affect sharding prop + static_kwargkey: Optional[List[str]] = None + # each op can decide if it wants to use pytree flatten/unflatten during operator + # eager execution, by default we don't need to do flatten/unflatten, only if the + # op indicate it needs to, this is to accelate eager performance. + needs_pytree: bool = False + + +@dataclass +class OpSchema: + """ + OpSchema is a data class that describes an operator input schemas, it + includes DTensor DTensorSpecs and non-tensor args/kwargs (positional order + preserved). It is mainly used by the dispatching logic below to run things like + sharding propagation. + + NOTE: this should be used as a read only data class + TODO: make this a frozen dataclass + + Args: + op: the operator overload we are intercepting + args_schema: contains args except that the DTensor args have been replaced + with its DTensorSpec + kwargs_schema: contains kwargs except that the DTensor kwargs have been replaced + with its DTensorSpec + """ + + op: OpOverload + args_schema: ArgsType + kwargs_schema: KwargsType + + schema_info: Optional[RuntimeSchemaInfo] = None + + @property + def args_spec(self) -> Tuple[DTensorSpec, ...]: + """ + args_spec: Tuple[DTensorSpec, ...]: contains a clean list of args spec list + with NO non-DTensor positional arguments (i.e. int/float/tuple, etc) + mainly used by sharding propagation to propagate the output spec + """ + # filter out non-relevant values from args schema to get a clean spec list + # this would mainly be used by sharding propagation rules + return tuple(item for item in self.args_schema if isinstance(item, DTensorSpec)) + + def __repr__(self) -> str: + return ( + f"OpSchema(op={self.op}," + f" args_schema={self.args_schema}," + f" kwargs_schema={self.kwargs_schema})" + ) + + def __str__(self) -> str: + args_sharding: List[str] = [] + mesh_shape = None + for arg in self.args_schema: + if isinstance(arg, DTensorSpec): + args_sharding.append(str(arg)) + mesh_shape = arg.mesh.shape + elif isinstance(arg, OpStrategy): + assert len(arg.strategies) == 1 + args_sharding.append(_pretty_print_spec(arg.strategies[0].output_specs)) + mesh_shape = arg.output_mesh_shape + elif isinstance(arg, TupleStrategy): + first_op_strtgy = arg.childs[0] + assert isinstance(first_op_strtgy, OpStrategy) + mesh_shape = first_op_strtgy.output_mesh_shape + args_sharding.append(str(arg)) + else: + args_sharding.append(str(arg)) + return f"Op(op={self.op}, args_sharding={', '.join(args_sharding)} @ mesh: {mesh_shape})" + + def __post_init__(self) -> None: + has_symints = False + for a in self.args_schema: + if isinstance(a, DTensorSpec) and a.tensor_meta is not None: + if any(isinstance(s, torch.SymInt) for s in a.tensor_meta.shape): + has_symints = True + break + self.has_symints = has_symints + + def arg_type_tensor_or_tensor_list_like(self, arg_idx: int) -> bool: + arg = self.args_schema[arg_idx] + is_tensor = isinstance(arg, DTensorSpec) + if is_tensor: + return True + + if not isinstance(arg, list): + return False + + return all(isinstance(e, DTensorSpec) or e is None for e in arg) + + def return_type_tuple_tensor_like(self) -> bool: + # all dispatch ops could only return Tuple[Tensor] or have None/ints/floats + # in the tuple, but the first element must be a Tensor, so this check is enough + return_types = self.op._schema.returns + return len(return_types) > 1 and isinstance( + return_types[0].type, torch.TensorType + ) + + def return_type_tensor(self) -> bool: + return_types = self.op._schema.returns + # all dispatch ops only return Tensor or Tuple[Tensor] for tensor like + # return types, so this check is enough for tensor like types + return isinstance(return_types[0].type, torch.TensorType) + + def __hash__(self) -> int: + # Only hash args and kwargs that op indicates to hash + if not self.schema_info: + static_argnum = len(self.args_schema) + static_kwargkey = None + else: + static_argnum = self.schema_info.static_argnum + static_kwargkey = self.schema_info.static_kwargkey + + args_to_hash = tuple( + tuple(e) if isinstance(e, list) else e + for i, e in enumerate(self.args_schema) + if self.arg_type_tensor_or_tensor_list_like(i) or i >= static_argnum + ) + if static_kwargkey is not None: + kwargs_to_hash = tuple( + self.kwargs_schema.get(k, None) for k in static_kwargkey + ) + return hash((self.op, args_to_hash, kwargs_to_hash)) + else: + return hash((self.op, args_to_hash)) + + def __eq__(self, other: object) -> bool: + # early return checks + if not isinstance(other, OpSchema): + return False + + if self.op != other.op: + return False + + if len(self.args_schema) != len(other.args_schema): + return False + + # compare each element and early return if any of them is different + if not self.schema_info: + static_argnum = len(self.args_schema) + static_kwargkey = None + else: + static_argnum = self.schema_info.static_argnum + static_kwargkey = self.schema_info.static_kwargkey + + for i, (self_arg, other_arg) in enumerate( + zip(self.args_schema, other.args_schema) + ): + if isinstance(self_arg, DTensorSpec) and self_arg != other_arg: + return False + elif i >= static_argnum and self_arg != other_arg: + return False + + # check kwarg equality when there's a static kwarg key + if static_kwargkey: + for key in static_kwargkey: + if self.kwargs_schema.get(key, None) != other.kwargs_schema.get( + key, None + ): + return False + + return True + + def gen_fake_args(self) -> ArgsType: + """ + gen_fake_args: generate fake args for the operator, this is mainly used + by sharding propagation rules to generate fake args for the operator + to run the local tensor operator and get the output spec. + """ + return tree_map_only( + DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.args_schema + ) + + def gen_fake_kwargs(self) -> KwargsType: + """ + gen_fake_kwargs: generate fake kwargs for the operator, this is mainly used + by sharding propagation rules to generate fake kwargs for the operator + to run the local tensor operator and get the output spec. + """ + return tree_map_only( + DTensorSpec, _rebuild_tensor_from_dtensor_meta, self.kwargs_schema + ) + + def _inplace_rewrap_schema_suggestion(self, origin_schema: "OpSchema") -> None: + suggestion_args_spec = self.args_spec + new_arg_schema: List[object] = [] + idx_of_args_spec = 0 + for arg in origin_schema.args_schema: + if isinstance(arg, DTensorSpec): + new_arg_schema.append(suggestion_args_spec[idx_of_args_spec]) + idx_of_args_spec += 1 + else: + new_arg_schema.append(arg) + self.args_schema = tuple(new_arg_schema) + self.kwargs_schema = origin_schema.kwargs_schema + + +@dataclass +class OutputSharding: + """ + OutputSharding is a data class that is used by the sharding propagation + rules, it could set the output_spec upon successful propagation, and if + it failed, output_spec would become None and sharding propagation rules + could give a list of suggestions for inputs to reshard. + + NOTE: the schema_suggestion generated by sharding propagation should be + exactly the same as the operator OpSchema, except the DTensor DTensorSpecs + """ + + output_spec: OutputSpecType + schema_suggestions: Optional[List[OpSchema]] = None + failed_reason: Optional[str] = None + needs_redistribute: bool = False + + +@dataclass +class OpInfo: + """ + All Runtime Op execution info are packed here + """ + + mesh: DeviceMesh + schema: OpSchema + flat_args_schema: List[object] + local_args: Sequence[object] + local_kwargs: Dict[str, object] + args_tree_spec: Optional[TreeSpec] = None + + # the output sharding info + output_sharding: Optional[OutputSharding] = None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d19fdfa50cb70432e1a3dcb95981282a01a01e7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from .embedding_ops import * # noqa: F403 +from .matrix_ops import * # noqa: F403 +from .math_ops import * # noqa: F403 +from .tensor_ops import * # noqa: F403 +from .pointwise_ops import * # noqa: F403 +from .random_ops import * # noqa: F403 +from .view_ops import * # noqa: F403 +from .conv_ops import * # noqa: F403 +from .experimental_ops import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f536dd1056bd033373a0909a519e89dbc8bdb9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9f41f46a02821632fdf08f4f721caafdc6448a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/basic_strategy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3065c97dbbb7af82355687543d9c17feebabc88b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/common_rules.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bf662e065426aaaabc6ac009ec1fc3a2e9f937b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/conv_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a31d2a19e3f56c62d30c5b2b1240b120f157062 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/embedding_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bdde64d62dc7f65b2fe60ccc0cd6c4660f79318 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/experimental_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ab70d0c2595f2d9f91aa530f20b3c7d7e61db7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/math_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67869e428044d107c5415e11e3a720c1dc92d1bc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/matrix_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2884778bd45061ee4463f1b40f8555e453ddfb68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/pointwise_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b02a86ae2150930c7c535b9e5134ac96c8db3b7c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/random_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2df4178ce658263a34b98ef716470cd996af833e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/tensor_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44f3fb9650a2142f113a32ad1f53f2a008e53c99 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c871417b540dfdefaa4d4531bed462c5328bf34 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/__pycache__/view_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..80055281236fa9a7c186d00d0363eb947a22755b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/basic_strategy.py @@ -0,0 +1,184 @@ +import itertools +from dataclasses import dataclass + +from typing import List, Tuple + +from torch.distributed._tensor.op_schema import OpStrategy, PlacementStrategy +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + + +@dataclass +class EinsumDims: + contracting_dims: List[str] + batch_dims: List[str] + lhs_out_only_dims: List[str] + rhs_out_only_dims: List[str] + + @classmethod + def parse_equation(cls, equation: str) -> Tuple[List[str], str]: + # parse einop equation and extract arg specs + """ + Parse the einsum equation str to input dim chars and output dim char + """ + inputs, outputs = equation.split("->") + input_dims, output_dims = inputs.split(","), outputs.split(",") + + # NOTE: only support at most two inputs, and single output + # extend to support more inputs if needed in future + assert len(input_dims) <= 2, "Only support at most two inputs" + assert len(output_dims) == 1, "Only support single output" + output_dim = output_dims[0] + return input_dims, output_dim + + @classmethod + def parse_dims(cls, input_dims: List[str], output_dim: str) -> "EinsumDims": + """ + Parse the dims and extract the contracting, batch, and free dimensions + for the left and right hand sides. + """ + dim_char_set = set() + for input_dim in input_dims: + for input_char in list(input_dim): + dim_char_set.add(input_char) + + # get a determinisitc order of all dim chars + all_dim_chars = sorted(dim_char_set) + + # parse input and output dimensions + lhs_out_only_dims, rhs_out_only_dims = [], [] + batch_dims, contracting_dims = [], [] + + for dim_char in all_dim_chars: + if dim_char not in output_dim: + contracting_dims.append(dim_char) + else: + is_batch_dim = True + for input_dim in input_dims: + is_batch_dim = is_batch_dim and dim_char in input_dim + + if is_batch_dim: + batch_dims.append(dim_char) + else: + assert ( + len(input_dims) == 2 + ), "free dimension only supported for two inputs!" + lhs, rhs = input_dims + if dim_char in lhs: + lhs_out_only_dims.append(dim_char) + elif dim_char in rhs: + rhs_out_only_dims.append(dim_char) + else: + raise RuntimeError("Invalid dimension character") + + return cls( + contracting_dims=contracting_dims, + batch_dims=batch_dims, + lhs_out_only_dims=lhs_out_only_dims, + rhs_out_only_dims=rhs_out_only_dims, + ) + + +def gen_einsum_strategies( + equation: str, + mesh: DeviceMesh, + *, + linearity: bool = False, +) -> OpStrategy: + """ + Generate a strategy list for the ops that follow einsum style notation. + """ + # parse einop equation and extract dims + input_dims, output_dim = EinsumDims.parse_equation(equation) + edims = EinsumDims.parse_dims(input_dims, output_dim) + + all_mesh_dim_strategies = [] + + # generate strategies for each mesh dim + for mesh_dim in range(mesh.ndim): + mesh_dim_strategies = [] + + # placement list stores placements of [output, input1, input2, ...] + # first we always have replicate all for inputs and output + placement_list: List[Placement] = [Replicate()] * (len(input_dims) + 1) + mesh_dim_strategies.append(placement_list) + + if mesh.size(mesh_dim) <= 1: + # only replicate strategy for mesh dim with size 1 + # TODO: see if this is valid for the submesh case + continue + + # split batch dim + for batch_dim in edims.batch_dims: + output_batch_dim = output_dim.index(batch_dim) + placement_list = [Shard(output_batch_dim)] + for input_dim in input_dims: + input_batch_dim = input_dim.index(batch_dim) + placement_list.append(Shard(input_batch_dim)) + + mesh_dim_strategies.append(placement_list) + + # split contracting dim + for contracting_dim in edims.contracting_dims: + placement_list = [_Partial()] + for input_dim in input_dims: + input_contracting_dim = input_dim.index(contracting_dim) + placement_list.append(Shard(input_contracting_dim)) + + mesh_dim_strategies.append(placement_list) + + # split lhs free dim + for lhs_dim in edims.lhs_out_only_dims: + lhs_free_dim = output_dim.index(lhs_dim) + # this means split the lhs input and output + # i.e. S(0), R -> S(0) + lhs_placement_list: List[Placement] = [ + Shard(lhs_free_dim), + Shard(lhs_free_dim), + Replicate(), + ] + mesh_dim_strategies.append(lhs_placement_list) + + # split rhs free dim + for rhs_dim in edims.rhs_out_only_dims: + rhs_free_dim = output_dim.index(rhs_dim) + rhs_placement_list: List[Placement] = [ + Shard(rhs_free_dim), + Replicate(), + Shard(rhs_free_dim), + ] + mesh_dim_strategies.append(rhs_placement_list) + + # linearity strategy + if linearity: + linearity_placement_list: List[Placement] = [_Partial()] + for input_dim in input_dims: + linearity_placement_list.append(_Partial()) + mesh_dim_strategies.append(linearity_placement_list) + + all_mesh_dim_strategies.append(mesh_dim_strategies) + + # generate strategies for entire mesh + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + # TODO: filter out invalid strategies, at this point we generate + # all possible strategies without considering the whether the tensor + # dim could be sharded or not, we would need to filter out invalid + # strategies base on the actual tensor shape + # (i.e. for Shard, tensor dim size must > mesh size) + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + strat = PlacementStrategy(output_specs=spec_list[0], input_specs=spec_list[1:]) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..25fb92f6fcebfbed101150875f2cfad6e85c7079 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/view_ops.py @@ -0,0 +1,717 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from dataclasses import dataclass +from typing import Callable, cast, Dict, Iterable, Optional, Sequence, Set, Tuple, Union + +import torch + +from torch import Tensor +from torch._subclasses.fake_tensor import unset_fake_temporarily +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.api import Shard +from torch.distributed._tensor.op_schema import ( + OpSchema, + OutputSharding, + RuntimeSchemaInfo, +) +from torch.distributed._tensor.ops.utils import ( + normalize_dim, + normalize_dims, + prod, + register_prop_rule, +) + +from torch.distributed._tensor.placement_types import DTensorSpec, Placement, Replicate +from torch.fx.experimental.proxy_tensor import disable_proxy_modes_tracing + +aten = torch.ops.aten + +Shape = Tuple[int, ...] + + +@dataclass +class DimSpec: + """Specifies how an output dimension maps to an input dimension.""" + + def inputs(self) -> Iterable["DimSpec"]: + return () + + +# Rules that map each dimension of the output to dimensions of the input tensor +DimMap = Tuple[DimSpec, ...] + + +@dataclass +class Singleton(DimSpec): + """Output dimension is a singleton.""" + + pass + + +@dataclass +class InputDim(DimSpec): + """Output dimension maps directly to an input dimension.""" + + input_dim: int + + +@dataclass +class Broadcast(DimSpec): + """Output is the broadcast of a singleton input dimension.""" + + dim: DimSpec + dim_size: int + + @classmethod + def new(cls, dim: DimSpec, dim_size: int) -> DimSpec: + return Broadcast(dim, dim_size) + + def inputs(self) -> Iterable[DimSpec]: + return (self.dim,) + + +@dataclass +class NewDim(DimSpec): + """This is a new dimension created by the op.""" + + size: int + + @classmethod + def new(cls, size: int) -> DimSpec: + return Singleton() if size == 1 else NewDim(size) + + +@dataclass +class Repeat(DimSpec): + """Output dimension is the input dimension repeated n-times.""" + + input_dim: DimSpec + times: int + + @classmethod + def new(cls, dim: DimSpec, times: int) -> DimSpec: + if times == 1: + return dim + elif isinstance(dim, Singleton): + # repeating a singleton is the same as broadcasting it + return Broadcast(dim, times) + else: + return Repeat(dim, times) + + def inputs(self) -> Iterable[DimSpec]: + return (self.input_dim,) + + +@dataclass +class Flatten(DimSpec): + """Flatten a set of input dimensions, ensuring right-most adjacent elements remain adjacent in the output.""" + + input_dims: Sequence[DimSpec] + + @classmethod + def new(cls, dims: Sequence[DimSpec]) -> DimSpec: + if len(dims) == 0: + # flattening a scalar leads to a singleton + return Singleton() + elif len(dims) == 1: + # flattening a single dimension is no-op + return dims[0] + else: + return Flatten(dims) + + def inputs(self) -> Iterable[DimSpec]: + return self.input_dims + + +@dataclass +class Split(DimSpec): + """ + This dimension is a member of a decomposition of the input dim. + + Note that input_dim itself could be a Flattened set of input dims. + """ + + input_dim: DimSpec + group_shape: Shape + split_id: int + + @classmethod + def new(cls, dim: DimSpec, group_shape: Tuple[int, ...], idx: int) -> DimSpec: + assert len(group_shape) > 0 + if len(group_shape) == 1: + # not really a group, just return the input dim back + assert idx == 0 + return dim + elif group_shape[idx] == 1: + return Singleton() + else: + # remove singletons from group + # group_mapping = [(new_index, (shape, old_index)) ...] + group_mapping = list( + enumerate((s, i) for i, s in enumerate(group_shape) if s != 1) + ) + new_group_shape = tuple(m[1][0] for m in group_mapping) + new_idx = next(filter(lambda x: x[1][1] == idx, group_mapping))[0] + return Split(dim, new_group_shape, new_idx) + + def inputs(self) -> Iterable[DimSpec]: + return (self.input_dim,) + + +def dim_pad_left(ndim: int, min_dims: int) -> DimMap: + return (Singleton(),) * max(0, min_dims - ndim) + tuple( + InputDim(i) for i in range(ndim) + ) + + +def dim_atleast_3d(ndim: int) -> DimMap: + if ndim == 0: + return (Singleton(), Singleton(), Singleton()) + elif ndim == 1: + return (Singleton(), InputDim(0), Singleton()) + elif ndim == 2: + return (InputDim(0), InputDim(1), Singleton()) + else: + return tuple(InputDim(i) for i in range(ndim)) + + +def expand(input_shape: Shape, shape: Shape) -> DimMap: + """Implement broadcast on multiple dimensions.""" + assert len(shape) >= len(input_shape) + + # 1. create padded input dimensions + padded_input = dim_pad_left(len(input_shape), len(shape)) + # 2. check that input shapes are compatible + mapping = [] + for p, desired_s in zip(padded_input, shape): + if isinstance(p, Singleton): + actual_s = 1 + assert desired_s >= 0 + else: + assert isinstance(p, InputDim), f"DimSpec not supported in expand: {p}" + actual_s = input_shape[p.input_dim] + assert actual_s == 1 or desired_s == -1 or desired_s == actual_s + mapping.append( + p + if desired_s in (1, -1) or desired_s == actual_s + else Broadcast.new(p, desired_s) + ) + return tuple(mapping) + + +def normalize_sizes(sizes: Union[Shape, Tuple[Shape]]) -> Shape: + if isinstance(sizes[0], int): + return cast(Shape, sizes) + elif len(sizes) == 1: + return cast(Shape, sizes[0]) # type: ignore[redundant-cast] + else: + raise RuntimeError("Size must be int... or tuple") + + +def dim_flatten(ndim: int) -> DimMap: + if ndim == 0: + return (Singleton(),) + elif ndim == 1: + return (InputDim(0),) + else: + return (Flatten.new(tuple(InputDim(i) for i in range(ndim))),) + + +def dim_movedim( + ndim: int, + input: Union[int, Sequence[int]], + destination: Union[int, Sequence[int]], +) -> DimMap: + input = normalize_dims(input, ndim) + destination = normalize_dims(destination, ndim) + + assert len(input) == len(destination) + input_set = set(input) + assert len(input_set) == len(input), "Found repeated input dims" + assert len(set(destination)) == len(destination), "Found repeated output dims" + assert max(input) < ndim + assert max(destination) < ndim + + dest = [-1] * ndim + for i, d in zip(input, destination): + dest[d] = i + + unused_inputs_iter = iter(i for i in range(ndim) if i not in input_set) + for i in range(ndim): + if dest[i] == -1: + dest[i] = next(unused_inputs_iter) + + return tuple(InputDim(i) for i in dest) + + +def dim_repeat(ndim: int, sizes: Shape) -> DimMap: + sizes = normalize_sizes(sizes) + assert ( + len(sizes) >= ndim + ), f"Number of dimensions of repeat dims {sizes} can not be smaller than number of dimensions of tensor {ndim}." + pad = len(sizes) - ndim + return tuple(Repeat.new(Singleton(), s) for s in sizes[:pad]) + tuple( + Repeat.new(InputDim(i), s) for i, s in enumerate(sizes[pad:]) + ) + + +def infer_size(total_size: int, sizes: Shape) -> Shape: + """ + One dimension input to view may be "-1". + + Infer the size of this dimension given the total_size. + """ + infers = [i for i, s in enumerate(sizes) if s == -1] + size = prod(sizes) + assert len(infers) <= 1, "can only infer one size" + if infers: + size = -size + missing_size = total_size // size + assert ( + total_size % size == 0 + ), f"size inferred for -1 is not integral {sizes} should have {total_size} elements." + return tuple(s if s != -1 else missing_size for s in sizes) + assert size == total_size, f"sizes do not match {total_size} vs {size}" + return sizes + + +def view_groups(from_size: Shape, to_size: Shape) -> DimMap: + """ + Decompose a reshape operation into forwarding, flattening, or splitting dimensions for each output dimension. + + A view or reshape operation can be decomposed into a set of 3 types of smaller operations: + 1) Forward a dimension from input to output + 2) Flatten a set of dimensions into a single dimension + 3) Split one dimension into multiple dimensions + + view_groups identifies these operations and returns, for each output dimension, what + is operation was performed in the input dimension. For example: + + view_groups([2, 3, 4], [2, 12]) -> ( + InputDim(0), + Flatten((InputDim(1), InputDim(2))) + ) + + - ouptut dimension 0 maps to input dimension 0 + - output dimension 1 maps to a flattened input dimensions 1 and 2 + + + view_groups([2, 3], [3, 2]) -> ( + Split(Flatten((InputDim(0), InputDim(1))), (3, 2), 0), + Split(Flatten((InputDim(0), InputDim(1))), (3, 2), 1), + ) + + - in the above, input is flattened into a single dimension and then split + into two separate dimensions with different sizes from the input. + """ + from_nelem = prod(from_size) + to_size = infer_size(from_nelem, normalize_sizes(to_size)) + + assert from_nelem == prod(to_size), "Total view shape does not add up" + + from_idx = 0 + to_idx = 0 + from_len = len(from_size) + to_len = len(to_size) + + result_pp = [] + + while from_idx < from_len or to_idx < to_len: + from_group_dim, to_group_shape = [], [] + + if from_idx >= from_len: + f = 1 + else: + f = from_size[from_idx] + from_group_dim.append(from_idx) + from_idx += 1 + + if to_idx >= to_len: + t = 1 + else: + t = to_size[to_idx] + to_group_shape.append(t) + to_idx += 1 + + # if any of the groups is singleton, great, we need to backtrack though + if f == 1 and t != 1: + # produces ([1], []) + to_idx -= 1 + to_group_shape = [] + elif f != 1 and t == 1: + # produces ([], [1]) + from_idx -= 1 + from_group_dim = [] + else: + # produces ([1], [1]), ([2], [2]), ([2,3], [6]) + while f != t: + if f < t: + nf = from_size[from_idx] + from_group_dim.append(from_idx) + from_idx += 1 + f *= nf + else: + nt = to_size[to_idx] + to_group_shape.append(nt) + to_idx += 1 + t *= nt + + if len(to_group_shape) > 0: + flattened = Flatten.new( + tuple(InputDim(fi) for fi in from_group_dim if from_size[fi] > 1) + ) + result_pp += [ + Split.new(flattened, tuple(to_group_shape), i) + for i in range(len(to_group_shape)) + ] + + return tuple(result_pp) + + +def dim_tile(ndim: int, dims: Tuple[int, ...]) -> DimMap: + if len(dims) < ndim: + dims = (1,) * (ndim - len(dims)) + dims + return dim_repeat(ndim, dims) + + +def dim_transpose(ndim: int, dim1: int, dim2: int) -> DimMap: + dim1 = normalize_dim(dim1, ndim) + dim2 = normalize_dim(dim2, ndim) + assert dim1 < ndim + assert dim2 < ndim + dimmap = [InputDim(i) for i in range(ndim)] + swapdim = dimmap[dim1] + dimmap[dim1] = dimmap[dim2] + dimmap[dim2] = swapdim + return tuple(dimmap) + + +def dim_squeeze(shape: Shape, dim: Optional[int] = None) -> DimMap: + # FIXME: this is wrong when dim=None and one of the dimensions + # equals size of the mesh. For example squeeze(DTensor(tensor(4), Shard[0])) could + # end up as squeeze(tensor(1)) if we have 4 devices; this would lead to + # removal of a dimension that is not actually a singleton. + return tuple( + InputDim(i) + for i, s in enumerate(shape) + if s > 1 or (dim is not None and i != normalize_dim(dim, len(shape))) + ) + + +def dim_unsqueeze(ndim: int, dim: int) -> DimMap: + dims = tuple(InputDim(i) for i in range(ndim)) + if dim < 0: + dim += ndim + 1 + return dims[:dim] + (Singleton(),) + dims[dim:] + + +def dim_reduction( + ndim: int, dim_or_dims: Optional[Union[int, Sequence[int]]], keepdim: bool +) -> DimMap: + """ + General fallback for reduction ops where _Partial() does not apply. + + This will cause incoming tensor to be replicated on the reducing dimensions. + """ + if dim_or_dims is None: + dim_or_dims = tuple(range(ndim)) + if isinstance(dim_or_dims, int): + dim_or_dims = (dim_or_dims,) + dim_or_dims = tuple(d if d >= 0 else d + ndim for d in dim_or_dims) + return tuple( + InputDim(i) if i not in dim_or_dims else Singleton() + for i in range(ndim) + if i not in dim_or_dims or keepdim + ) + + +@dataclass +class Op: + dim_map: Callable[..., DimMap] + shape_argnum: Optional[int] = None + + +ops: Dict[Callable[..., torch.Tensor], Op] = { + torch.atleast_1d: Op(dim_map=lambda x: dim_pad_left(x.ndim, 1)), + torch.atleast_2d: Op(dim_map=lambda x: dim_pad_left(x.ndim, 2)), + torch.atleast_3d: Op(dim_map=lambda x: dim_atleast_3d(x.ndim)), + torch.broadcast_to: Op( + dim_map=lambda input, shape: expand(input.shape, shape), shape_argnum=1 + ), + Tensor.expand: Op( + dim_map=lambda self, *sizes: expand(self.shape, normalize_sizes(sizes)), + shape_argnum=1, + ), + torch.flatten: Op(dim_map=lambda tensor: dim_flatten(tensor.ndim)), + torch.movedim: Op( + dim_map=lambda input, source, destination: dim_movedim( + input.ndim, source, destination + ) + ), + torch.permute: Op( + dim_map=lambda input, dims: tuple( + InputDim(i) for i in normalize_dims(dims, input.ndim) + ) + ), + torch.ravel: Op(dim_map=lambda tensor: dim_flatten(tensor.ndim)), + Tensor.repeat: Op(dim_map=lambda self, *sizes: dim_repeat(self.ndim, sizes)), + torch.reshape: Op( + dim_map=lambda input, shape: view_groups(input.shape, shape), + shape_argnum=1, + ), + torch.squeeze: Op(dim_map=lambda input, dim=None: dim_squeeze(input.shape, dim)), + torch.tile: Op(dim_map=lambda input, dims: dim_tile(input.ndim, dims)), + torch.transpose: Op( + dim_map=lambda input, dim0, dim1: dim_transpose(input.ndim, dim0, dim1) + ), + torch.unsqueeze: Op(dim_map=lambda input, dim: dim_unsqueeze(input.ndim, dim)), + Tensor.view: Op( + dim_map=lambda input, *shape: view_groups(input.shape, shape), + shape_argnum=1, + ), +} + + +def propagate_shape_and_sharding( + in_shard: Sequence[Placement], + local_in_shape: Shape, + rule: DimMap, + mesh_sizes: Shape, +) -> Tuple[Shape, Optional[Sequence[Placement]], torch.Tensor]: + """ + Determine output sharding and tensor shape based on given global tensor shape and input sharding. + + Takes as input the global shape of the tensor, and the input sharding, + and produce corresponding output sharding and shape of the output tensor. + + Sharding propagation follows mapped dimensions: + - An output dimension that maps directly to an input dimension is sharded equally + - An output dimension that is a flattened set of input dimensions can only be + sharded if only the leftmost flattened dimension is sharded. + - An output dimension that is a split of the input dimension can only be sharded + if the leftmost split size is divisible by the mesh dimension + """ + assert len(in_shard) == len(mesh_sizes) + sharded_in_dims: Set[int] = {s.dim for s in in_shard if isinstance(s, Shard)} + # for each input dim, for each mesh dim, provides a list of possible shardable dimensions + shardable_dims: torch.Tensor = torch.ones( + (len(local_in_shape), len(mesh_sizes)), dtype=torch.bool + ) + + # in case an input dimension disappears (e.g. collapsing, reduction) + # we cannot shard in that dimension (we need a replication fall-back rule) + + seen_input_dims: Set[int] = set() + + def collect_used_inputs(cmd: DimSpec) -> None: + if isinstance(cmd, InputDim): + seen_input_dims.add(cmd.input_dim) + for inp in cmd.inputs(): + collect_used_inputs(inp) + + for cmd in rule: + collect_used_inputs(cmd) + for dim in range(len(local_in_shape)): + shardable_dims[dim, :] = dim in seen_input_dims + + def get_dim_size(cmd: DimSpec) -> Tuple[int, Optional[InputDim]]: + if isinstance(cmd, InputDim): + seen_input_dims.add(cmd.input_dim) + return ( + local_in_shape[cmd.input_dim], + cmd if cmd.input_dim in sharded_in_dims else None, + ) + elif isinstance(cmd, Flatten): + for dim in cmd.input_dims[1:]: + if isinstance(dim, InputDim): + shardable_dims[dim.input_dim, :] = False + dim0 = cmd.input_dims[0] + return ( + prod(get_dim_size(a)[0] for a in cmd.input_dims), + dim0 + if isinstance(dim0, InputDim) and dim0.input_dim in sharded_in_dims + else None, + ) + elif isinstance(cmd, Split): + _, in_dim = get_dim_size(cmd.input_dim) + out_size = cmd.group_shape[cmd.split_id] + if cmd.split_id == 0 and in_dim is not None: + # we need to check that the input dimension is divisible + # by the size of the submesh we're sharding it on + # NOTE: it would be possible to shard the same input dimension + # on more than one mesh dimension. In that case, the dimension + # needs to be divisible by the product of mesh sizes. + # In order to keep the problem more tractable, we will not consider + # double resharding as a suggestion (e.g. [Shard(0), Shard(0) ]) + # but we will allow it if that's the input and it's compatible + + # 1. is this dimension shardable on each individual mesh dim? + for mesh_dim, mesh_dim_size in enumerate(mesh_sizes): + shardable_dims[in_dim.input_dim, mesh_dim] = ( + out_size % mesh_dim_size == 0 + ) + + # 2. here we special case things like [Shard(0), Shard(0)] + submesh_size = 1 + for size, shard in zip(mesh_sizes, in_shard): + if isinstance(shard, Shard) and shard.dim == in_dim: + submesh_size *= size + assert ( + out_size % submesh_size == 0 + ), f"Resulting dimension size {out_size} is not divisible by its mesh dimension {submesh_size}." + + # we will only shard our first component of the split + return out_size, in_dim if cmd.split_id == 0 else None + elif isinstance(cmd, Singleton): + return 1, None + elif isinstance(cmd, Broadcast): + return cmd.dim_size, None + elif isinstance(cmd, NewDim): + return cmd.size, None + elif isinstance(cmd, Repeat): + size, in_dim = get_dim_size(cmd.input_dim) + if in_dim is not None: + shardable_dims[in_dim.input_dim, :] = False + return size * cmd.times, None + else: + raise RuntimeError(f"cmd not found: {cmd}, in rule: {rule}") + + dim_map = {} + out_shape = [] + for dim, cmd in enumerate(rule): + out_size, in_dim = get_dim_size(cmd) + out_shape.append(out_size) + if in_dim is not None: + dim_map[in_dim.input_dim] = dim + + needs_reshard = any( + isinstance(placement, Shard) and not shardable_dims[placement.dim][mesh_dim] + for mesh_dim, placement in enumerate(in_shard) + ) + + output_placements = ( + None + if needs_reshard + else [Shard(dim_map[s.dim]) if isinstance(s, Shard) else s for s in in_shard] + ) + + return (tuple(out_shape), output_placements, shardable_dims) + + +def register_prop_rule_map( + aten_op_overload: torch._ops.OpOverload, + local_op_name: Callable[..., torch.Tensor], + schema_info: Optional[RuntimeSchemaInfo] = None, +) -> None: + spec: Op = ops[local_op_name] + + @register_prop_rule(aten_op_overload, schema_info=schema_info) + def reshape_prop(op_schema: OpSchema) -> OutputSharding: + rules = spec.dim_map(*op_schema.args_schema, **op_schema.kwargs_schema) + input_dtensor_spec = cast(DTensorSpec, op_schema.args_schema[0]) + mesh = input_dtensor_spec.mesh + + assert isinstance( + input_dtensor_spec, DTensorSpec + ), "Expected first input to be a DTensorSpec" + global_in_shape = input_dtensor_spec.shape + assert global_in_shape is not None, "Shape required." + + with disable_proxy_modes_tracing(), unset_fake_temporarily(): + ( + global_out_shape, + shard_out, + shardable_dims, + ) = propagate_shape_and_sharding( + input_dtensor_spec.placements, + tuple(global_in_shape), + rules, + mesh.shape, + ) + + if shard_out is not None: + # no reshard needed + output_dtensor_spec = DTensorSpec(mesh=mesh, placements=tuple(shard_out)) + + # We only need the local shape to lower the call into the local op + args = op_schema.args_schema + shape_argnum = spec.shape_argnum + if shape_argnum is not None: + # compute the local shape from the global shape, then return + # a resharding even if we don't really reshard, the only reason + # for this type of resharding is to lower the global shape to + # local shape + local_out_shape = compute_local_shape( + list(global_out_shape), mesh, shard_out + ) + + suggested_schema = OpSchema( + op=op_schema.op, + args_schema=args[:shape_argnum] + + (tuple(local_out_shape),) + + args[shape_argnum + 1 :], + kwargs_schema=op_schema.kwargs_schema, + ) + return OutputSharding( + output_spec=output_dtensor_spec, + schema_suggestions=[suggested_schema], + needs_redistribute=True, + ) + + return OutputSharding(output_spec=output_dtensor_spec) + + else: + # TODO: optimize this. we shouldn't simply blindly replicate + # unshardable dims ... + # FIXME: this can be wrong for situations where we have + # [Shard(0), Shard(0)] + suggested_placements = [ + p + if not isinstance(p, Shard) or shardable_dims[p.dim][mesh_dim] + else Replicate() + for mesh_dim, p in enumerate(input_dtensor_spec.placements) + ] + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + DTensorSpec( + placements=tuple(suggested_placements), + mesh=input_dtensor_spec.mesh, + tensor_meta=input_dtensor_spec.tensor_meta, + ), + ) + + op_schema.args_schema[1:], + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + + +register_prop_rule_map(aten.squeeze.default, torch.squeeze) +register_prop_rule_map( + aten.squeeze.dim, torch.squeeze, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map(aten.view.default, Tensor.view, schema_info=RuntimeSchemaInfo(1)) +register_prop_rule_map( + aten.reshape.default, torch.reshape, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten._unsafe_view.default, Tensor.view, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.unsqueeze.default, torch.unsqueeze, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.expand.default, Tensor.expand, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.permute.default, torch.permute, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.repeat.default, Tensor.repeat, schema_info=RuntimeSchemaInfo(1) +) +register_prop_rule_map( + aten.transpose.int, torch.transpose, schema_info=RuntimeSchemaInfo(1) +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py new file mode 100644 index 0000000000000000000000000000000000000000..8d88d064e8fb69454c5f0d572bad09a93960a6aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/placement_types.py @@ -0,0 +1,620 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from dataclasses import dataclass +from typing import Any, cast, List, NamedTuple, Optional, Tuple + +import torch +import torch.distributed._functional_collectives as funcol +import torch.distributed.distributed_c10d as c10d + +from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter +from torch.distributed.device_mesh import DeviceMesh + + +class Placement: + # base class Placement type + + # convenient utils to check for placement types + def is_shard(self, dim: Optional[int] = None) -> bool: + is_shard_instance = isinstance(self, Shard) + if dim is not None and is_shard_instance: + return cast(Shard, self).dim == dim + else: + return is_shard_instance + + def is_replicate(self) -> bool: + return isinstance(self, Replicate) + + def is_partial(self) -> bool: + return isinstance(self, _Partial) + + +@dataclass(frozen=True) +class Shard(Placement): + # shard placement, shard on a dim + dim: int + + def _split_tensor( + self, + tensor: torch.Tensor, + num_chunks: int, + *, + with_padding: bool = True, + contiguous: bool = True, + ) -> Tuple[List[torch.Tensor], List[int]]: + """ + This function uses torch.chunk to split a tensor into num_chunks shards along + the Shard placement dimension, and return a list of shards with their pad sizes. + + Keyword args: + with_padding (bool, optional): when True, we pad the tensor on the last + few ranks before calling the collectives (i.e. scatter/all_gather, etc.). + This is because collectives usually require equal size tensor inputs + """ + assert ( + self.dim <= tensor.ndim + ), f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}" + + # chunk tensor over dimension `dim` into n slices with padding if necessary + tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim)) + # compute the chunk size inline with ``torch.chunk`` + full_chunk_size = (tensor.size(self.dim) + num_chunks - 1) // num_chunks + + # Compute chunk size for each chunk for ``self.dim`` + chunk_sizes = [ + tensor_list[idx].size(self.dim) if idx < len(tensor_list) else 0 + for idx in range(num_chunks) + ] + # Compute pad size on each chunk + pad_sizes = [full_chunk_size - chunk_size for chunk_size in chunk_sizes] + + # Reuse tensor to fill empty chunk with empty tensor + num_empty_tensors = num_chunks - len(tensor_list) + tensor_size = list(tensor_list[0].size()) + tensor_size = [ + size if idx != self.dim else 0 for idx, size in enumerate(tensor_size) + ] + tensor = tensor.new_zeros(tensor_size) + for _ in range(num_empty_tensors): + tensor_list.append(tensor) + + if with_padding or contiguous: + shard_list = [] + for shard, pad_size in zip(tensor_list, pad_sizes): + # Fill the empty tensor with zeroes with padding. + if with_padding and pad_size > 0: + shard = self._pad_tensor(shard, pad_size) + shard = shard.contiguous() if contiguous else shard + shard_list.append(shard) + return shard_list, pad_sizes + else: + return tensor_list, pad_sizes + + def _pad_tensor( + self, + tensor: torch.Tensor, + pad_size: int, + ) -> torch.Tensor: + if pad_size == 0: + return tensor + pad = [0, 0] * (tensor.ndim - self.dim) + pad[-1] = pad_size + return torch.nn.functional.pad(tensor, pad) + + def _unpad_tensor( + self, + tensor: torch.Tensor, + pad_size: int, + ) -> torch.Tensor: + if pad_size == 0: + return tensor + return tensor.narrow( + self.dim, + start=0, + length=tensor.size(self.dim) - pad_size, + ) + + @staticmethod + def _local_shard_size_on_dim( + size_on_dim: int, + num_chunks: int, + rank: int, + return_offset: bool = False, + ) -> Tuple[int, int]: + """ + returns the local shard size and offset on a given tensor dim + """ + # Compute the chunk size inline with ``torch.chunk`` + if size_on_dim % num_chunks == 0: + full_chunk_size = size_on_dim // num_chunks + return full_chunk_size, full_chunk_size * rank if return_offset else -1 + + # uneven sharding case + full_chunk_size = (size_on_dim + num_chunks - 1) // num_chunks + shard_starting_idx = full_chunk_size * rank + + if size_on_dim < shard_starting_idx: + return 0, size_on_dim if return_offset else -1 + else: + local_shard_size = ( + min(size_on_dim, shard_starting_idx + full_chunk_size) + - shard_starting_idx + ) + return local_shard_size, shard_starting_idx if return_offset else -1 + + def _shard_tensor( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + """ + shard and scatter a tensor on a mesh dimension (use coordinate + 0 on the mesh dimension as source of truth) + """ + my_coordinate = mesh.get_coordinate() + num_chunks = mesh.size(mesh_dim=mesh_dim) + + if my_coordinate is None: + # if rank is not part of mesh, we simply return an empty tensor + return tensor.new_empty(0, requires_grad=tensor.requires_grad) + + scatter_list, pad_sizes = self._split_tensor( + tensor, num_chunks, with_padding=True, contiguous=True + ) + + output = torch.empty_like(scatter_list[my_coordinate[mesh_dim]]) + mesh_scatter(output, scatter_list, mesh, mesh_dim=mesh_dim) + + # Only unpad if the local_tensor was padded on the dimension. + pad_size = pad_sizes[my_coordinate[mesh_dim]] + if pad_size > 0: + output = self._unpad_tensor(output, pad_size) + return output + + def _reduce_shard_tensor( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + reduce_op: c10d.ReduceOp.RedOpType, + mesh_dim: int, + ) -> torch.Tensor: + """ + reduce and scatter a tensor on a mesh dimension + """ + my_coordinate = mesh.get_coordinate() + num_chunks = mesh.size(mesh_dim=mesh_dim) + + if my_coordinate is None: + # if rank is not part of mesh, we simply return local_tensor, + # which should be an empty tensor + return tensor + + is_padded = tensor.size(self.dim) % num_chunks != 0 + if is_padded: + scattered_list, pad_sizes = self._split_tensor( + tensor, num_chunks, with_padding=True, contiguous=True + ) + tensor = torch.cat(scattered_list, dim=self.dim) + elif not tensor.is_contiguous(): + tensor = tensor.contiguous() + + output = funcol.reduce_scatter_tensor( + tensor, reduce_op.name, scatter_dim=self.dim, group=(mesh, mesh_dim) + ) + + if is_padded: + output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined] + return output + + def _to_replicate_tensor( + self, + local_tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + current_logical_shape: List[int], + ) -> torch.Tensor: + """ + This function all_gather all shards and return a tensor that + is replicated on the previously sharded mesh dimension + """ + num_chunks = mesh.size(mesh_dim=mesh_dim) + # check if it's uneven, so we need to pad input tensor before all_gather + local_shape = list(local_tensor.size()) + + logical_dim_size = current_logical_shape[self.dim] + is_padded = logical_dim_size % num_chunks != 0 + + if is_padded: + full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks + pad_size = full_chunk_size - local_shape[self.dim] + local_tensor = self._pad_tensor(local_tensor, pad_size) + + if not local_tensor.is_contiguous(): + local_tensor = local_tensor.contiguous() + + result = funcol.all_gather_tensor( + local_tensor, + gather_dim=self.dim, + group=(mesh, mesh_dim), + ) + if is_padded: + unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined] + result = self._unpad_tensor(result, unpad_size) + return result + + def _replicate_to_shard( + self, + local_tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_index: int, + ) -> torch.Tensor: + """ + transform from replicated tensor to a sharded tensor on + the current rank, which would perform a local chunk + """ + num_chunks = mesh.size(mesh_dim=mesh_dim) + shards, _ = self._split_tensor( + local_tensor, + num_chunks, + with_padding=False, + contiguous=False, + ) + return shards[shard_index].clone() + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Shard): + return False + return self.dim == other.dim + + def __hash__(self) -> int: + return hash(self.dim) + + def __repr__(self) -> str: + """ + machine readable representation of the Shard placement + """ + return f"Shard(dim={self.dim})" + + def __str__(self) -> str: + """human readable representation of the Shard placement""" + return f"S({self.dim})" + + +@dataclass(frozen=True) +class Replicate(Placement): + # replicate placement + def __eq__(self, other: object) -> bool: + if not isinstance(other, Replicate): + return False + return True + + def __hash__(self) -> int: + # every replicate placement is the same + return -1 + + def __repr__(self) -> str: + """ + machine readable representation of the Replicate placement + """ + return "Replicate()" + + def __str__(self) -> str: + """ + human readable representation of the Replicate placement + """ + return "R" + + def _replicate_tensor( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + """ + Replicate (broadcast) a torch.Tensor on a mesh dimension (use + the first coordinate on the mesh dimension as source of truth) + """ + my_coordinate = mesh.get_coordinate() + if my_coordinate is None: + # if rank is not part of mesh, we simply return an empty tensor + return tensor.new_empty(0, requires_grad=tensor.requires_grad) + + tensor = tensor.contiguous() + mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim) + return tensor + + +@dataclass(frozen=True) +class _Partial(Placement): + # This is a default _Partial placement with element-wise reduce op + # _Partial define three contracts: + # 1. _reduce_value: reduce the value of the tensor on the mesh dimension + # 2. _reduce_shard_value: reduce_scatter the value of the tensor on the mesh dimension + # 3. _partition_value: partition the value of a replicated tensor on the mesh dimension + # We can implement custom reductions as needed by subclassing this + # class and override those contracts. + reduce_op: c10d.ReduceOp.RedOpType = c10d.ReduceOp.SUM + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + return funcol.all_reduce( + tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim) + ) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + # by default call reduce_shard_tensor of the shard_spec. + shard_spec = cast(Shard, shard_spec) + return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim) + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # _partition_value is the conjugate operation of _reduce_value + # - i.e. _partition_value on a sum reduce op is just a divison operation + # - the _reduce_value on a sum reduce op would just be a sum(allreduce) operation + # TODO: if the reduce_op is min/max, etc. the _partition_value should be a + # different operation + assert ( + self.reduce_op == c10d.ReduceOp.SUM + ), "only support replicate to PartialSUM for now!" + num_chunks = mesh.size(mesh_dim=mesh_dim) + return tensor / num_chunks + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _Partial): + return False + return self.reduce_op == other.reduce_op + + def __hash__(self) -> int: + return 1 + hash(self.reduce_op) + + def __repr__(self) -> str: + """ + machine readable representation of the Partial placement + """ + return f"_Partial(reduce_op={self.reduce_op})" + + def __str__(self) -> str: + """ + human readable representation of the Partial placement + """ + return "P" + + +class TensorMeta(NamedTuple): + # simple named tuple to represent tensor metadata + # intentionally to stay simple only for sharding + # propagation purposes. + shape: torch.Size + stride: Tuple[int, ...] + dtype: torch.dtype + + +# used internally to propagate the placements +@dataclass +class DTensorSpec: + mesh: DeviceMesh + placements: Tuple[Placement, ...] + + # tensor meta will only be set during sharding propagation + tensor_meta: Optional[TensorMeta] = None + + def __post_init__(self): + if not isinstance(self.placements, tuple): + self.placements = tuple(self.placements) + self._hash: Optional[int] = None + + def __setattr__(self, attr: str, value: Any): + super().__setattr__(attr, value) + # Make sure to recompute the hash in case any of the hashed attributes + # change (though we do not expect `mesh` or `placements` to change) + if hasattr(self, "_hash") and attr in ("mesh", "placements", "tensor_meta"): + self._hash = None + + def _hash_impl(self) -> int: + # hashing and equality check for DTensorSpec are used to cache the sharding + # propagation results. We only need to consider the mesh, placements, shape + # dtype and stride. + # Caveat: we need to keep this in mind and sync hash and eq if we add more + # fields to them. + if self.tensor_meta is not None: + return hash( + ( + self.mesh, + self.placements, + self.tensor_meta.shape, + self.tensor_meta.stride, + self.tensor_meta.dtype, + ) + ) + return hash((self.mesh, self.placements)) + + def __hash__(self) -> int: + # We lazily cache the spec to avoid recomputing the hash upon each + # use, where we make sure to update the hash when the `tensor_meta` + # changes by overriding `__setattr__`. This must be lazy so that Dynamo + # does not try to hash non-singleton `SymInt`s for the stride. + if self._hash is None: + self._hash = self._hash_impl() + return self._hash + + def __eq__(self, __o: object) -> bool: + if not ( + isinstance(__o, DTensorSpec) + and self.mesh == __o.mesh + and self.placements == __o.placements + ): + return False + if self.tensor_meta is None or __o.tensor_meta is None: + return self.tensor_meta == __o.tensor_meta + + return ( + self.tensor_meta.shape == __o.tensor_meta.shape # type: ignore[union-attr] + and self.tensor_meta.stride == __o.tensor_meta.stride # type: ignore[union-attr] + and self.tensor_meta.dtype == __o.tensor_meta.dtype # type: ignore[union-attr] + ) + + def __str__(self) -> str: + """ + human readable representation of the DTensorSpec + """ + if len(self.placements) == 1: + placement_str = str(self.placements[0]) + else: + placement_str = str(self.placements) + + if self.tensor_meta is not None: + tensor_shape = str(tuple(self.tensor_meta.shape)) + else: + tensor_shape = "unknown shape" + + return f"Spec({placement_str} on {tensor_shape})" + + @property + def shape(self) -> torch.Size: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return self.tensor_meta.shape + + @property + def stride(self) -> Tuple[int, ...]: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return self.tensor_meta.stride + + @property + def ndim(self) -> int: + if self.tensor_meta is None: + raise ValueError("tensor_meta is not set") + return len(self.tensor_meta.shape) + + @property + def num_shards(self) -> int: + num_shards = 1 + for i, placement in enumerate(self.placements): + if placement.is_shard(): + num_shards *= self.mesh.size(i) + return num_shards + + @property + def device_mesh(self) -> DeviceMesh: + # simple aliasing for the mesh field, make some + # checks that mixes DTensor/DTensorSpec easier + return self.mesh + + @property + def dim_map(self) -> List[int]: + """ + dim_map is a property we derive from `placements` of + the distributed tensor. It simply return a list of ints + where dim_map[i] denotes the sharding mapping to the mesh + dimension, and len(dim_map) == dist_tensor.ndim + dim_map[i] = -1: means tensor dim i replicate on mesh + dim_map[i] = j: means tensor dim i shard on mesh dim j + + For example, we have a dist tensor that have the shape of + [18, 20, 30], and device_mesh([0, 1, 2, 3]), placements: + [Shard(1)], the dim_map of this placement would be: + [-1, 0, -1]. This representation is pretty helpful during + sharding propagation where we could know exactly each + tensor dimension is sharded or not. + + Note that if placements contains `_Partial`, we have to + explicitly deal with it, so that when we create a DTensorSpec + with dim_map, we could properly record the pending sums. + """ + # dims mapping of dist tensor sharding + # return size of tensor ndim, -1 represent replicate + # and int >=0 represent shard on that device mesh dim + r = [-1] * self.ndim + for i, placement in enumerate(self.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + if r[shard_dim] > -1: + raise ValueError( + f"Tensor dim {shard_dim} is already sharded on mesh dim {r[shard_dim]}," + " DTensor operator implementation does not support things like hybrid" + " sharding strategies yet (i.e. [Shard(0), Shard(0)])" + ) + r[shard_dim] = i + return r + + @property + def sums(self) -> List[int]: + """ + sums is a property we derive from `placements` of the + distributed tensor. It simply return a list of ints where + sums[i] denotes the pending sum (partial) on mesh dim i + """ + return [ + idx + for idx, placement in enumerate(self.placements) + if placement.is_partial() + ] + + @classmethod + def from_dim_map( + cls, + mesh: DeviceMesh, + dim_map: List[int], + sums: List[int], + tensor_meta: Optional[TensorMeta] = None, + ) -> "DTensorSpec": + """ + Construct a DTensorSpec from dim_map list and pending sum. + + Args: + mesh (class:`DeviceMesh`): device mesh to be used in the DTensorSpec + dim_map (List[int]): a list of integer that represents sharding on each + tensor dimension, see `dim_map` property doc for details + sums (List[int]): a list of integer that represents the dist tensor have + pending sum on which device mesh dimension. + tensor meta (TensorMeta): DTensor metadata + + Return: + a class:`DTensorSpec` object + """ + # by default replicate on device mesh dims + placements: List[Placement] = [Replicate() for _ in range(mesh.ndim)] + + # find all mesh dims that need pending reductions + for s in sums: + placements[s] = _Partial() + + for i, m in enumerate(dim_map): + if m >= 0: + placement = placements[m] + if placement.is_shard(): + placement = cast(Shard, placement) + raise RuntimeError( + f"DeviceMesh dimension cann't be mapped to two dimension of the same tensor: {i} and {placement.dim}" + ) + elif placement.is_partial(): + raise RuntimeError( + f"DeviceMesh dimension {m} cannot be both shard and partial!" + ) + placements[m] = Shard(i) + + return cls(mesh, tuple(placements), tensor_meta=tensor_meta) + + def is_replicated(self): + """ + return True if the current DTensorSpec replicates on all mesh dims (devices) + """ + return all(placement.is_replicate() for placement in self.placements) + + def shallow_copy_with_tensor_meta( + self, tensor_meta: Optional[TensorMeta] + ) -> "DTensorSpec": + """ + Shallow copy the DTensorSpec with a new tensor_meta. + """ + assert tensor_meta is not None, "shallow copy with no tensor_meta!" + return DTensorSpec( + self.mesh, + self.placements, + tensor_meta=tensor_meta, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py new file mode 100644 index 0000000000000000000000000000000000000000..58f3f42c94b70f129899ba33dafbf8b4e349a900 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/redistribute.py @@ -0,0 +1,337 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from functools import lru_cache +from typing import cast, Dict, List, NamedTuple, Tuple + +import torch +import torch.distributed._functional_collectives as funcol +import torch.distributed._tensor.api as dtensor +from torch.distributed._tensor.device_mesh import DeviceMesh +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + + +class _TransformInfo(NamedTuple): + mesh_dim: int + src_dst_placements: Tuple[Placement, Placement] + # logical_shape on this mesh dimension + logical_shape: List[int] + + +def _replicate_then_shard(val: _TransformInfo) -> int: + """ + This is a helper function to allow reordering _TransformInfo list. The high level + idea is that we want to reorder the sharding redistributions so that the DTensor + redistribution is consistent with its full tensor. This is built on top of two simple + assumptions: + 1. Replication happens from inner to outer dimension. i.e. Shard -> Replicate + 2. Sharding happens from outer to inner dimension, i.e. Replicate -> Shard + + So we always put the replication first and put sharding later. + """ + mesh_dim = val.mesh_dim + src, dst = val.src_dst_placements + if (dst.is_replicate() or dst.is_partial()) and src.is_shard(): + return -mesh_dim + elif (src.is_replicate() or src.is_partial()) and dst.is_shard(): + return mesh_dim + else: + return 0 + + +@lru_cache(maxsize=None) +def _gen_transform_infos( + src_spec: DTensorSpec, + dst_spec: DTensorSpec, +) -> List[_TransformInfo]: + """ + Generate the transform infos from the source placements to the target placements, to + transform from source to target placement it might have multipl steps, i.e. it might + decompose Si -> Sj into Si -> R -> Sj. + This would detects if there're mis-aligned shardings between src/dst placements. + i.e. (Shard(0), Shard(0)) -> (Replicate(), Shard(0)), in this case Shard(0) -> Shard(0) + for mesh dimension 1 actually needs reshard, because in the first case it's a sub-sharding + of an already tensor dimension 0, and in the second case, it's the first sharding on tensor + dimension 0. + + Note that we also currently handles sharding on different tensor dimensions, e.g. + Shard(0) -> Shard(1) in this pass + """ + src_dim_counts: Dict[int, int] = {} + dst_dim_counts: Dict[int, int] = {} + transform_infos: List[_TransformInfo] = [] + + src_placements = src_spec.placements + dst_placements = dst_spec.placements + device_mesh = src_spec.device_mesh + my_coordinate = device_mesh.get_coordinate() + assert my_coordinate is not None + + # logical shape records the logic tensor shape on the mesh dimension + # this is useful to ensure uneven sharding gets correct output shape + initial_logical_shape = list(src_spec.shape) + mesh_dims_to_logical_shape = [initial_logical_shape] + mesh_ndim = len(src_placements) + + for i, (src, dst) in enumerate(zip(src_placements, dst_placements)): + # detect mis-aligned sharding and build logical shapes + current_logical_shape = mesh_dims_to_logical_shape[i] + if isinstance(src, Shard): + src_dim_counts[src.dim] = src_dim_counts.get(src.dim, 0) + 1 + + if i < mesh_ndim - 1: + # calculate and save the logical shape for this sharding + mesh_dim_size = device_mesh.size(mesh_dim=i) + local_shard_size, _ = src._local_shard_size_on_dim( + current_logical_shape[src.dim], + mesh_dim_size, + my_coordinate[i], + ) + new_logical_shape = list(current_logical_shape) + new_logical_shape[src.dim] = local_shard_size + mesh_dims_to_logical_shape.append(new_logical_shape) + else: + mesh_dims_to_logical_shape.append(current_logical_shape) + + if isinstance(dst, Shard): + dst_dim_counts[dst.dim] = dst_dim_counts.get(dst.dim, 0) + 1 + + if ( + isinstance(src, Shard) + and isinstance(dst, Shard) + and ( + src.dim != dst.dim or src_dim_counts[src.dim] != dst_dim_counts[dst.dim] + ) + ): + # decompose Shard(i) -> Shard(j) into Shard(i) -> Replicate() -> Shard(j) + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(src, Replicate()), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(Replicate(), dst), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + else: + transform_infos.append( + _TransformInfo( + mesh_dim=i, + src_dst_placements=(src, dst), + logical_shape=mesh_dims_to_logical_shape[i], + ) + ) + + # sort the pairs by first perform replication then sharding + transform_infos.sort(key=_replicate_then_shard) + return transform_infos + + +def redistribute_local_tensor( + local_tensor: torch.Tensor, + current_spec: DTensorSpec, + target_spec: DTensorSpec, + *, + async_op: bool = False, + is_backward: bool = False, +) -> torch.Tensor: + """ + This redistribute the local tensor (torch.Tensor) from the current DTensorSpec to + the target DTensorSpec, which involves the necessary collective calls to transform + the local shard of the DTensor from its current spec to the target spec. + """ + + if current_spec.mesh != target_spec.mesh: + # TODO: alltoall/permute reshuffling to change device_mesh if they are not the same + raise NotImplementedError("Cross device mesh comm not supported yet!") + + new_local_tensor = None + device_mesh = current_spec.mesh + + my_coordinate = device_mesh.get_coordinate() + + if my_coordinate is None: + # if rank is not part of mesh, we skip redistribute and simply return local_tensor, + # which should be an empty tensor + return local_tensor + + transform_infos = _gen_transform_infos(current_spec, target_spec) + + for transform_info in transform_infos: + i = transform_info.mesh_dim + current, target = transform_info.src_dst_placements + num_chunks = device_mesh.size(mesh_dim=i) + + if current == target: + # short cut, just use the original local tensor + new_local_tensor = local_tensor + continue + + if target.is_replicate(): + # Case 1: target is Replicate + if current.is_partial(): + partial_spec = cast(_Partial, current) + new_local_tensor = partial_spec._reduce_value( + local_tensor, device_mesh, i + ) + elif current.is_shard(): + current_placement = cast(Shard, current) + new_local_tensor = current_placement._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + else: + raise RuntimeError( + f"redistribute from {current} to {target} not supported yet" + ) + elif target.is_shard(): + # Case 2: target is Shard + target_placement = cast(Shard, target) + target_dim = target_placement.dim + if current.is_partial(): + partial_spec = cast(_Partial, current) + new_local_tensor = partial_spec._reduce_shard_value( + local_tensor, device_mesh, i, target_placement + ) + elif current.is_replicate(): + # split the tensor and return the corresponding cloned local shard + new_local_tensor = target_placement._replicate_to_shard( + local_tensor, device_mesh, i, my_coordinate[i] + ) + else: + # NOTE: we don't support this case efficiently yet, the fallback path we are going here is + # to decompose Shard(0) -> Shard(1) into Shard(0) -> Replicate -> Shard(1) + # TODO: enable this with all_to_all + assert ( + current.is_shard() + ), f"Current placement should be shard but found {current}" + shard_spec = cast(Shard, current) + if shard_spec.dim != target_placement.dim: + new_local_tensor = shard_spec._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + shards, _ = target_placement._split_tensor( + new_local_tensor, + num_chunks, + with_padding=False, + contiguous=False, + ) + new_local_tensor = shards[my_coordinate[i]] + elif target.is_partial(): + if current.is_replicate(): + partial_spec = cast(_Partial, target) + # skip the replicate to partial transformation when we are in backward pass + # In this case we keep the grad as replicate, this is because we don't + # want to convert the replicated gradients back to partial, although + # that's logically conform with the same layout, converting the gradients + # back to partial is actually useless as you would have to do reduce later + # which would be more expensive than keeping it replicate! For this reason, + # we keep the replicate grad here. + new_local_tensor = ( + partial_spec._partition_value(local_tensor, device_mesh, i) + if not is_backward + else local_tensor + ) + elif current.is_shard(): + if not is_backward: + raise RuntimeError( + f"redistribute from {current} to {target} not supported yet" + ) + # for backward shard -> partial, we just need to convert the shard to replicate + current_placement = cast(Shard, current) + new_local_tensor = current_placement._to_replicate_tensor( + local_tensor, device_mesh, i, transform_info.logical_shape + ) + else: + # partial -> partial no op, should never hit + new_local_tensor = local_tensor + + assert new_local_tensor is not None + local_tensor = new_local_tensor + + assert new_local_tensor is not None, "redistribute failed!" + + if not async_op and isinstance(new_local_tensor, funcol.AsyncCollectiveTensor): + new_local_tensor = new_local_tensor.wait() + + return new_local_tensor + + +class Redistribute(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + # pyre-fixme[2]: Parameter must be annotated. + ctx, + input: "dtensor.DTensor", + device_mesh: DeviceMesh, + placements: Tuple[Placement, ...], + async_op: bool = False, + ): + current_spec = input._spec + ctx.current_spec = current_spec + ctx.async_op = async_op + target_spec = DTensorSpec( + device_mesh, placements, tensor_meta=input._spec.tensor_meta + ) + + local_tensor = input._local_tensor + output = redistribute_local_tensor( + local_tensor, current_spec, target_spec, async_op=async_op + ) + + return dtensor.DTensor( + output, + device_mesh, + target_spec.placements, + shape=input.shape, + dtype=input.dtype, + requires_grad=input.requires_grad, + stride=input.stride(), + ) + + @staticmethod + def backward(ctx, grad_output: "dtensor.DTensor"): # type: ignore[override] + previous_spec = ctx.current_spec + current_spec = grad_output._spec + async_op = ctx.async_op + + local_tensor = grad_output._local_tensor + output = redistribute_local_tensor( + local_tensor, + current_spec, + previous_spec, + async_op=async_op, + is_backward=True, + ) + # normalize the target placement to replicate if it is partial + normalized_placements: List[Placement] = [] + for previous_placement in previous_spec.placements: + if previous_placement.is_partial(): + # keep target placement to replicate instead of partial in this case + normalized_placements.append(Replicate()) + else: + normalized_placements.append(previous_placement) + output_dtensor = dtensor.DTensor( + output, + previous_spec.mesh, + tuple(normalized_placements), + shape=grad_output.shape, + dtype=grad_output.dtype, + requires_grad=grad_output.requires_grad, + stride=grad_output.stride(), + ) + + return ( + output_dtensor, + None, + None, + None, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py new file mode 100644 index 0000000000000000000000000000000000000000..c2cf784e8c02dad72325a4883267d3ac70a9616e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/sharding_prop.py @@ -0,0 +1,410 @@ +from functools import lru_cache +from itertools import chain +from typing import Callable, cast, Dict, List, Optional, Sequence, Union + +import torch +from torch._ops import OpOverload +from torch._subclasses import FakeTensorMode +from torch.distributed._tensor._utils import try_find_mesh_from_args +from torch.distributed._tensor.op_schema import ( + DTensorSpec, + OpInfo, + OpSchema, + OpStrategy, + OutputSharding, + OutputSpecType, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) +from torch.distributed._tensor.placement_types import TensorMeta +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +def _length(obj) -> int: + if obj is None: + return 0 + if not isinstance(obj, Sequence): + return 1 + return len(obj) + + +class ShardingPropagator: + def __init__(self) -> None: + self.op_to_rules: Dict[OpOverload, Callable[[OpSchema], OutputSharding]] = {} + self.op_strategy_funcs: Dict[ + OpOverload, + Callable[[DeviceMesh, OpSchema], StrategyType], + ] = {} + # op map to save static argnum to decide to reuse sharding prop cache or re-run sharding prop + self.op_to_schema_info: Dict[OpOverload, RuntimeSchemaInfo] = {} + self.propagate_op_sharding = lru_cache(None)(self.propagate_op_sharding_non_cached) # type: ignore[method-assign] + + def register_sharding_prop_rule( + self, + op_overload: OpOverload, + rule_func: Callable[[OpSchema], OutputSharding], + schema_info: Optional[RuntimeSchemaInfo] = None, + ): + """ + Register a sharding propagation rule for an operator. + """ + self.op_to_rules[op_overload] = rule_func + if schema_info is not None: + self.op_to_schema_info[op_overload] = schema_info + + def register_op_strategy( + self, + op_overload: OpOverload, + strategy_func: Callable[[DeviceMesh, OpSchema], StrategyType], + schema_info: Optional[RuntimeSchemaInfo] = None, + ): + """ + Register a sharding strategy generator for an operator. + """ + self.op_strategy_funcs[op_overload] = strategy_func + if schema_info is not None: + self.op_to_schema_info[op_overload] = schema_info + + @lru_cache + def _propagate_tensor_meta( + self, op_schema: OpSchema + ) -> Union[None, TensorMeta, Sequence[Optional[TensorMeta]]]: + """ + Propagate the tensor metadata, it could either return a TensorMeta + or a list/tuple of TensorMetas + """ + if op_schema.op == aten.equal.default: + # data dependent ops can't be used for fake propagation + return None + + # NOTE: We must call the tracing in fake tensor mode so that it + # avoids materializing memory + with FakeTensorMode(): + fake_args = op_schema.gen_fake_args() + fake_kwargs = op_schema.gen_fake_kwargs() + fake_out = op_schema.op(*fake_args, **fake_kwargs) + + if isinstance(fake_out, torch.Tensor): + return TensorMeta( + shape=fake_out.shape, stride=fake_out.stride(), dtype=fake_out.dtype + ) + + elif isinstance(fake_out, (tuple, list)): + tensor_meta_list: List[Optional[TensorMeta]] = [] + for fake_out_item in fake_out: + if isinstance(fake_out_item, torch.Tensor): + tensor_meta_list.append( + TensorMeta( + shape=fake_out_item.shape, + stride=fake_out_item.stride(), + dtype=fake_out_item.dtype, + ) + ) + else: + tensor_meta_list.append(None) + return ( + tuple(tensor_meta_list) + if isinstance(fake_out, tuple) + else tensor_meta_list + ) + else: + # if fake is not a tensor or tuple of tensor, return as none + return None + + def _wrap_output_spec_tensor_meta( + self, + op: OpOverload, + output_specs: OutputSpecType, + output_tensor_meta: Union[None, TensorMeta, Sequence[Optional[TensorMeta]]], + ) -> None: + """ + Wrap the output_specs with the tensor metadata from the output. + """ + + if isinstance(output_specs, DTensorSpec): + if not isinstance(output_tensor_meta, TensorMeta): + # Either error due to ShardingPropagator or due to incorrect OutputSpec + if not isinstance(output_tensor_meta, (tuple, list)): + raise ValueError( + "ShardingPropagator error: output does not have an associated TensorMeta" + ) + raise ValueError( + f"For the op {op.name()}, `output_specs` has 1 output which does not equal the " + f"number of op outputs: {len(output_tensor_meta)}." + ) + output_specs.tensor_meta = output_tensor_meta + elif isinstance(output_specs, (tuple, list)): + if not isinstance(output_tensor_meta, (tuple, list)) or len( + output_specs + ) != len(output_tensor_meta): + raise ValueError( + f"For the op {op.name()}, `output_specs` has {len(output_specs)} outputs which does not equal the " + f"number of op outputs {_length(output_tensor_meta)}." + ) + for i, spec in enumerate(output_specs): + if isinstance(spec, DTensorSpec): + output_tensor_meta_i = output_tensor_meta[i] + if not isinstance(output_tensor_meta_i, TensorMeta): + raise ValueError( + f"ShardingPropagator error: output {i} does not have an associated TensorMeta" + ) + spec.tensor_meta = output_tensor_meta_i + + def propagate(self, op_info: OpInfo) -> None: + # We cannot use an lru cache if we know that inputs will have dynamic shapes, + # because SymInts are not hashable. + # This is generally ok because this only happens during tracing in torch.compile, + # and tracing does not need to be as fast as eagermode DTensor usages. + if op_info.schema.has_symints: + output_sharding = self.propagate_op_sharding_non_cached(op_info.schema) + else: + output_sharding = self.propagate_op_sharding(op_info.schema) + op_info.output_sharding = output_sharding + + def propagate_op_sharding_non_cached(self, op_schema: OpSchema) -> OutputSharding: + """ + Propagate the sharding for an operator given the op_schema. + """ + # special case op, we don't need to propagate for local + # scalar. TODO: figure out a better way to handle this + if op_schema.op is aten._local_scalar_dense.default: + return OutputSharding(None, [op_schema]) + + out_tensor_meta = self._propagate_tensor_meta(op_schema) + + def spec_to_strategy(spec: object) -> object: + if isinstance(spec, DTensorSpec): + return OpStrategy([PlacementStrategy(spec)]) + elif ( + isinstance(spec, (list, tuple)) + and len(spec) > 0 + and isinstance(spec[0], DTensorSpec) + ): + # tensor list create tuple strategy + tuple_strategy = [spec_to_strategy(s) for s in spec] + tuple_strategy = cast(Sequence[StrategyType], tuple_strategy) + return TupleStrategy( + tuple(tuple_strategy) if isinstance(spec, tuple) else tuple_strategy + ) + else: + return spec + + if op_schema.op in self.op_strategy_funcs: + # generate op strategy for the op. + mesh = try_find_mesh_from_args(op_schema.op, op_schema.args_schema) + # swap the args spec with args strategies + args_op_strategy = [spec_to_strategy(i) for i in op_schema.args_schema] + + kwargs_op_strategy = { + k: spec_to_strategy(v) for k, v in op_schema.kwargs_schema.items() + } + + # construct a new OpSchema on args for strategy based propagation + strategy_schema: OpSchema = OpSchema( + op=op_schema.op, + args_schema=tuple(args_op_strategy), + kwargs_schema=kwargs_op_strategy, + ) + + op_strategy = self.op_strategy_funcs[op_schema.op](mesh, strategy_schema) + + if isinstance(op_strategy, OpStrategy): + # single Op strategy + output_strategy = self._select_strategy(op_strategy) + + # check if we need to redistribute the input + needs_redistribute = False + expected_input_specs = [] + + # in case where the op does not specify input_specs and output_specs + # is a DTensorSpec, we use output_specs as the spec for each DTensor + # input arg. + if output_strategy.input_specs is None: + assert isinstance(output_strategy.output_specs, DTensorSpec) + + for idx, input_spec in enumerate(op_schema.args_spec): + desired_spec = ( + output_strategy.output_spec + if output_strategy.input_specs is None + else output_strategy.input_specs[idx] + ) + expected_input_specs.append(desired_spec) + if input_spec.placements != desired_spec.placements: + needs_redistribute = True + + suggestion_schema = None + if needs_redistribute: + reshard_schema = OpSchema( + op_schema.op, tuple(expected_input_specs), {} + ) + reshard_schema._inplace_rewrap_schema_suggestion(op_schema) + suggestion_schema = [reshard_schema] + + # construct output spec for the op + if op_schema.return_type_tuple_tensor_like(): + # for ops that return multiple tensors and the output_specs is not + # a tuple, we use a tuple of that single output spec as the new + # output_specs + output_specs: OutputSpecType = output_strategy.output_specs + if isinstance(output_specs, DTensorSpec): + output_specs = tuple( + [ + # create a new DTensorSpec with the same placement as the + # output_specs in output_strategy + DTensorSpec( + mesh=output_specs.mesh, + placements=output_specs.placements, + tensor_meta=output_specs.tensor_meta, + ) + for _ in range(len(op_schema.op._schema.returns)) + ] + ) + elif op_schema.return_type_tensor(): + output_specs = output_strategy.output_specs + else: + output_specs = None + + output_sharding = OutputSharding( + output_specs, + suggestion_schema, + needs_redistribute=needs_redistribute, + ) + elif isinstance(op_strategy, TupleStrategy): + # tuple strategy output sharding processing + # runtime selected placement strategy for each TupleStrategy input arg + selected_strategies: List[PlacementStrategy] = [] + out_spec_list: List[DTensorSpec] = [] + for strategy in op_strategy.childs: + assert isinstance(strategy, OpStrategy) + selected_strategy = self._select_strategy(strategy) + selected_strategies.append(selected_strategy) + out_spec_list.append(selected_strategy.output_spec) + + needs_redistribute = False + suggestion_args: List[object] = [] + for arg_idx, arg in enumerate(op_schema.args_schema): + if isinstance(arg, (list, tuple)) and isinstance( + arg[0], DTensorSpec + ): + expected_input_spec_list: List[DTensorSpec] = [] + for idx, arg_spec in enumerate(arg): + expected_input_spec = selected_strategies[idx].input_spec( + arg_idx + ) + expected_input_spec = ( + expected_input_spec.shallow_copy_with_tensor_meta( + arg_spec.tensor_meta + ) + ) + if arg_spec.placements != expected_input_spec.placements: + needs_redistribute = True + expected_input_spec_list.append(expected_input_spec) + suggestion_args.append( + tuple(expected_input_spec_list) + if isinstance(arg, tuple) + else expected_input_spec_list + ) + elif isinstance(arg, DTensorSpec): + expected_input_spec = selected_strategies[0].input_spec(arg_idx) + expected_input_spec = ( + expected_input_spec.shallow_copy_with_tensor_meta( + arg.tensor_meta + ) + ) + if arg.placements != expected_input_spec.placements: + needs_redistribute = True + suggestion_args.append(expected_input_spec) + else: + suggestion_args.append(arg) + + suggestion_schema = None + if needs_redistribute: + reshard_schema = OpSchema( + op_schema.op, tuple(suggestion_args), op_schema.kwargs_schema + ) + suggestion_schema = [reshard_schema] + + output_sharding = OutputSharding( + tuple(out_spec_list) if out_tensor_meta is not None else None, + suggestion_schema, + needs_redistribute=needs_redistribute, + ) + else: + raise ValueError("Unsupported op strategy type") + + # associate the output sharding with the output tensor metadata + self._wrap_output_spec_tensor_meta( + op_schema.op, output_sharding.output_spec, out_tensor_meta + ) + return output_sharding + elif op_schema.op in self.op_to_rules: + # propagate the sharding with rule + sharding_prop_func = self.op_to_rules[op_schema.op] + + # step 1. there's sharding propagation rule, run + # sharding propagation to get the output sharding + try: + output_sharding = sharding_prop_func(op_schema) + except NotImplementedError as e: + raise e + except Exception as e: + raise RuntimeError( + f"Sharding propagation failed on op {op_schema}.\n" f"Error: {e}" + ) from e + + # step 2. if can't get output_spec from sharding + # propagation (i.e. no rules apply for input + # placements), we return the output sharding + # with schema suggestions, which can be used to + # decide how to do redistribute on inputs + if output_sharding.output_spec is None: + if output_sharding.schema_suggestions is None: + if output_sharding.failed_reason is not None: + raise RuntimeError( + f"Sharding propagation failed on op {op_schema}!" + f"Failed reason: {output_sharding.failed_reason}" + ) + else: + # we do auto redistribute on inputs if necessary + # to get an eligible input, which we will pick a + # schema suggestion base on the redistribute cost. + # For now we simply pick the first suggestion. + suggested_input_schema = output_sharding.schema_suggestions[0] + # run sharding propagation again with suggested schema + propagation_res = sharding_prop_func(suggested_input_schema) + # we set the output sharding with the new propagation result + # so that dispatching know both output_spec and schema_suggestions + # exist, which indicates a reshard is needed + output_sharding.output_spec = propagation_res.output_spec + output_sharding.needs_redistribute = True + + # associate the output sharding with the output tensor metadata + self._wrap_output_spec_tensor_meta( + op_schema.op, output_sharding.output_spec, out_tensor_meta + ) + + return output_sharding + else: + raise NotImplementedError( + f"Operator {op_schema.op} does not have a sharding strategy registered." + ) + + def _select_strategy(self, strategy: OpStrategy) -> PlacementStrategy: + if len(strategy.strategies) == 1: + # short cut with only one possible strategy + return strategy.strategies[0] + + strategy_costs: List[float] = [] + for strtg in strategy.strategies: + assert ( + strtg.redistribute_cost is not None + ), "must set redistribute cost each strategy!" + redistribute_cost = sum(chain.from_iterable(strtg.redistribute_cost)) + strategy_costs.append(redistribute_cost) + + # for eager execution, we just select the one with the minimal redistribute cost + return strategy.strategies[strategy_costs.index(min(strategy_costs))] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..ebcc981d2c93ac24e5c9fd7414af1038939076ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/tp_conv.py @@ -0,0 +1,277 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import cast, Dict, List, Tuple + +import torch +import torch.distributed as dist +import torch.distributed._tensor.api as dtensor + +aten = torch.ops.aten + + +def _requires_data_exchange(padding): + # TODO: whether there requires data exchange is currently determined by padding + return padding[1] != 0 + + +def _is_supported(input_size, kernel_size, stride, padding, dilation): + if dilation[1] != 1: + raise RuntimeError("Dilation must be 1 for tensor parallel convolution.") + if padding[1] != 0: + if stride[1] != 1: + raise RuntimeError( + "Stride must be 1 when there is padding for tensor parallel convolution." + ) + if kernel_size[3] // 2 > input_size[3]: + raise RuntimeError( + "kernel_size[3] // 2 should be less than or equal to input_size[3] for tensor parallel convolution." + ) + else: + if not (input_size[3] % stride[1] == 0 and stride[1] == kernel_size[3]): + raise RuntimeError( + "It requires that input_size[3] is divisible by stride[1] and stride[1] equals kernel_size[3] " + "when there is padding for tensor parallel convolution." + ) + return True + + +def _ring_send_recv_construct(in_tensor, d1, d2, left, right, rank, size): + # dist comms and reconstruct local input tensor + send_to_right = in_tensor[:, :, :, -d1:].contiguous() + send_to_left = in_tensor[:, :, :, :d2].contiguous() + recv_from_right = torch.zeros_like(send_to_left) + recv_from_left = torch.zeros_like(send_to_right) + + send_op_right = dist.P2POp(dist.isend, send_to_right, right) + send_op_left = dist.P2POp(dist.isend, send_to_left, left) + recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right) + recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left) + + reqs = dist.batch_isend_irecv( + [send_op_right, send_op_left, recv_op_left, recv_op_right] + ) + for req in reqs: + req.wait() + + if rank == 0: + in_tensor = torch.cat([in_tensor, recv_from_right], dim=-1) + elif rank == size - 1: + in_tensor = torch.cat([recv_from_left, in_tensor], dim=-1) + else: + in_tensor = torch.cat([recv_from_left, in_tensor, recv_from_right], dim=-1) + + return in_tensor + + +def _ring_send_recv_aggregate(grad_in_tensor, d1, d2, left, right, rank, size): + # dist comms and aggregate gradients for edge pixels + send_to_right = grad_in_tensor[:, :, :, -d2:].contiguous() + send_to_left = grad_in_tensor[:, :, :, :d1].contiguous() + recv_from_right = torch.zeros_like(send_to_left) + recv_from_left = torch.zeros_like(send_to_right) + + send_op_right = dist.P2POp(dist.isend, send_to_right, right) + send_op_left = dist.P2POp(dist.isend, send_to_left, left) + recv_op_right = dist.P2POp(dist.irecv, recv_from_right, right) + recv_op_left = dist.P2POp(dist.irecv, recv_from_left, left) + + reqs = dist.batch_isend_irecv( + [send_op_right, send_op_left, recv_op_left, recv_op_right] + ) + for req in reqs: + req.wait() + + if rank == 0: + grad_in_tensor = grad_in_tensor[:, :, :, :-d2] + grad_in_tensor[:, :, :, -d1:] = torch.add( + grad_in_tensor[:, :, :, -d1:], recv_from_right + ) + elif rank == size - 1: + grad_in_tensor = grad_in_tensor[:, :, :, d1:] + grad_in_tensor[:, :, :, :d2] = torch.add( + grad_in_tensor[:, :, :, :d2], recv_from_left + ) + else: + grad_in_tensor = grad_in_tensor[:, :, :, d1:-d2] + grad_in_tensor[:, :, :, -d1:] = torch.add( + grad_in_tensor[:, :, :, -d1:], recv_from_right + ) + grad_in_tensor[:, :, :, :d2] = torch.add( + grad_in_tensor[:, :, :, :d2], recv_from_left + ) + + +def tp_convolution( + op_call: torch._ops.OpOverload, + local_tensor_args: Tuple[object, ...], + local_tensor_kwargs: Dict[str, object], +) -> object: + assert op_call == aten.convolution.default + assert len(local_tensor_args) == 9 + + rank = dist.get_rank() + size = dist.get_world_size() + in_tensor = cast(torch.Tensor, local_tensor_args[0]) + weight = cast(torch.Tensor, local_tensor_args[1]) + stride, padding, dilation = local_tensor_args[3:6] + + assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation) + assert isinstance(padding, List) + + if not _requires_data_exchange(padding): + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + return local_results + else: + # step 0 compute the overlap pixels of the input tensor + d = weight.shape[3] - 1 + d1 = d // 2 + d2 = d - d1 + assert d1 + d2 == d + right = (rank + 1) % size + left = (rank - 1 + size) % size + + # step1 reconstruct local input tensor + in_tensor = _ring_send_recv_construct( + in_tensor, d1, d2, left, right, rank, size + ) + + # step2 feed local input tensor to op_call + local_tensor_args_list = list(local_tensor_args) + local_tensor_args_list[0] = in_tensor + local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list) + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + + # step3 remove extra outputs from the results + padding_w = padding[1] + w = local_results.size(3) + if rank == 0: + local_results = local_results[:, :, :, : w - padding_w] + elif rank == size - 1: + local_results = local_results[:, :, :, padding_w:] + else: + local_results = local_results[:, :, :, padding_w : w - padding_w] + + return local_results + + +def tp_convolution_backward( + op_call: torch._ops.OpOverload, + local_tensor_args: Tuple[object, ...], + local_tensor_kwargs: Dict[str, object], +) -> object: + assert op_call == aten.convolution_backward.default + assert len(local_tensor_args) == 11 + + rank = dist.get_rank() + size = dist.get_world_size() + grad_out_tensor = cast(torch.Tensor, local_tensor_args[0]) + in_tensor = cast(torch.Tensor, local_tensor_args[1]) + weight = cast(torch.Tensor, local_tensor_args[2]) + stride, padding, dilation = local_tensor_args[4:7] + + assert _is_supported(in_tensor.shape, weight.shape, stride, padding, dilation) + assert isinstance(padding, List) + + if not _requires_data_exchange(padding): + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + return local_results + else: + # step 0 compute the overlap pixels of the input tensor + d = weight.shape[3] - 1 + d1 = d // 2 + d2 = d - d1 + assert d1 + d2 == d + right = (rank + 1) % size + left = (rank - 1 + size) % size + + # step1 reconstruct local input tensor + in_tensor = _ring_send_recv_construct( + in_tensor, d1, d2, left, right, rank, size + ) + + # step2 reconstruct local gradient output tensor + N, C_out, H_out, _ = grad_out_tensor.shape + padding_w = padding[1] + if rank == 0: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (0, padding_w), "constant", 0 + ) + elif rank == size - 1: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (padding_w, 0), "constant", 0 + ) + else: + grad_out_tensor = torch.nn.functional.pad( + grad_out_tensor, (padding_w, padding_w), "constant", 0 + ) + + # step3 feed local input tensor to op_call + local_tensor_args_list = list(local_tensor_args) + local_tensor_args_list[0] = grad_out_tensor + local_tensor_args_list[1] = in_tensor + local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list) + local_results = op_call(*local_tensor_args, **local_tensor_kwargs) + + # step4 aggregate gradients for edge pixels + grad_in_tensor = local_results[0] + grad_in_tensor = _ring_send_recv_aggregate( + grad_in_tensor, d1, d2, left, right, rank, size + ) + + local_results = list(local_results) + local_results[0] = grad_in_tensor + local_results = cast(Tuple[object, ...], local_results) + + return local_results + + +def convolution_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + # extract local tensor and sharding infos to a OpInfo + op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + + # sharding propagation + dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info) + output_sharding = op_info.output_sharding + assert output_sharding is not None, "output sharding should not be None" + + # local propagation + local_results = tp_convolution( + op_call, tuple(op_info.local_args), op_info.local_kwargs + ) + + return dtensor.DTensor._op_dispatcher.wrap( + local_results, output_sharding.output_spec + ) + + +def convolution_backward_handler( + op_call: torch._ops.OpOverload, + args: Tuple[object, ...], + kwargs: Dict[str, object], +) -> object: + # Redistribute grad_output tensor to the same placement as input tensor + args = list(args) + assert isinstance(args[0], dtensor.DTensor) and isinstance(args[1], dtensor.DTensor) + args[0] = args[0].redistribute(args[1].device_mesh, args[1].placements) + args = tuple(args) + + # extract local tensor and sharding infos to a OpInfo + op_info = dtensor.DTensor._op_dispatcher.unwrap_to_op_info(op_call, args, kwargs) + + # sharding propagation + dtensor.DTensor._op_dispatcher.sharding_propagator.propagate(op_info) + output_sharding = op_info.output_sharding + assert output_sharding is not None, "output sharding should not be None" + + # local propagation + local_results = tp_convolution_backward( + op_call, tuple(op_info.local_args), op_info.local_kwargs + ) + + return dtensor.DTensor._op_dispatcher.wrap( + local_results, output_sharding.output_spec + )